Merge branch 'perf/urgent' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2018-10-02 09:50:34 +02:00
commit 97e831e130
236 changed files with 1757 additions and 1292 deletions

View File

@ -1,4 +1,4 @@
Device-Tree bindings for input/gpio_keys.c keyboard driver Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
Required properties: Required properties:
- compatible = "gpio-keys"; - compatible = "gpio-keys";

View File

@ -33,4 +33,3 @@ Video Function Calls
video-clear-buffer video-clear-buffer
video-set-streamtype video-set-streamtype
video-set-format video-set-format
video-set-attributes

View File

@ -9716,13 +9716,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained S: Maintained
F: drivers/media/dvb-frontends/mn88473* F: drivers/media/dvb-frontends/mn88473*
PCI DRIVER FOR MOBIVEIL PCIE IP
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
F: drivers/pci/controller/pcie-mobiveil.c
MODULE SUPPORT MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org> M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@ -11137,6 +11130,13 @@ F: include/uapi/linux/switchtec_ioctl.h
F: include/linux/switchtec.h F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/ F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MOBIVEIL PCIE IP
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
F: drivers/pci/controller/pcie-mobiveil.c
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
M: Jason Cooper <jason@lakedaemon.net> M: Jason Cooper <jason@lakedaemon.net>
@ -11203,8 +11203,14 @@ F: tools/pci/
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
M: Russell Currey <ruscur@russell.cc> M: Russell Currey <ruscur@russell.cc>
M: Sam Bobroff <sbobroff@linux.ibm.com>
M: Oliver O'Halloran <oohall@gmail.com>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
S: Supported S: Supported
F: Documentation/PCI/pci-error-recovery.txt
F: drivers/pci/pcie/aer.c
F: drivers/pci/pcie/dpc.c
F: drivers/pci/pcie/err.c
F: Documentation/powerpc/eeh-pci-error-recovery.txt F: Documentation/powerpc/eeh-pci-error-recovery.txt
F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/kernel/eeh*.c
F: arch/powerpc/platforms/*/eeh*.c F: arch/powerpc/platforms/*/eeh*.c

View File

@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data; extern unsigned int rtas_data;
extern unsigned long long memory_limit; extern unsigned long long memory_limit;
extern bool init_mem_is_free;
extern unsigned long klimit; extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);

View File

@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION #ifdef CONFIG_PPC_DENORMALISATION
mfspr r10,SPRN_HSRR1 mfspr r10,SPRN_HSRR1
mfspr r11,SPRN_HSRR0 /* save HSRR0 */
andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
addi r11,r11,-4 /* HSRR0 is next instruction */
bne+ denorm_assist bne+ denorm_assist
#endif #endif
@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
*/ */
XVCPSGNDP32(32) XVCPSGNDP32(32)
denorm_done: denorm_done:
mfspr r11,SPRN_HSRR0
subi r11,r11,4
mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9 mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13) ld r9,PACA_EXGEN+EX_R9(r13)

View File

@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
std r1, PACATMSCRATCH(r13) std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13) ld r1, PACAR1(r13)
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */ std r11, GPR11(r1) /* Temporary stash */
/*
* Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
* clobbered by an exception once we turn on MSR_RI below.
*/
ld r11, PACATMSCRATCH(r13)
std r11, GPR1(r1)
/*
* Store r13 away so we can free up the scratch SPR for the SLB fault
* handler (needed once we start accessing the thread_struct).
*/
GET_SCRATCH0(r11)
std r11, GPR13(r1)
/* Reset MSR RI so we can take SLB faults again */ /* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI li r11, MSR_RI
mtmsrd r11, 1 mtmsrd r11, 1
/* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR mfspr r11, SPRN_PPR
HMT_MEDIUM HMT_MEDIUM
@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
SAVE_GPR(8, r7) /* user r8 */ SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */ SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */ SAVE_GPR(10, r7) /* user r10 */
ld r3, PACATMSCRATCH(r13) /* user r1 */ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */ ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */ ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */ ld r6, GPR12(r1) /* user r12 */
GET_SCRATCH0(8) /* user r13 */ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7) std r3, GPR1(r7)
std r4, GPR7(r7) std r4, GPR7(r7)
std r5, GPR11(r7) std r5, GPR11(r7)

View File

@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
addc r0, r8, r9 addc r0, r8, r9
ld r10, 0(r4) ld r10, 0(r4)
ld r11, 8(r4) ld r11, 8(r4)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
rotldi r5, r5, 8
#endif
adde r0, r0, r10 adde r0, r0, r10
add r5, r5, r7 add r5, r5, r7
adde r0, r0, r11 adde r0, r0, r11

View File

@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
{ {
int err; int err;
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
return 0;
}
__put_user_size(instr, patch_addr, 4, err); __put_user_size(instr, patch_addr, 4, err);
if (err) if (err)
return err; return err;

View File

@ -63,6 +63,7 @@
#endif #endif
unsigned long long memory_limit; unsigned long long memory_limit;
bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
pte_t *kmap_pte; pte_t *kmap_pte;
@ -396,6 +397,7 @@ void free_initmem(void)
{ {
ppc_md.progress = ppc_printk_progress; ppc_md.progress = ppc_printk_progress;
mark_initmem_nx(); mark_initmem_nx();
init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }

View File

@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
int new_nid; int new_nid;
/* Use associativity from first thread for all siblings */ /* Use associativity from first thread for all siblings */
vphn_get_associativity(cpu, associativity); if (vphn_get_associativity(cpu, associativity))
return cpu_to_node(cpu);
new_nid = associativity_to_nid(associativity); new_nid = associativity_to_nid(associativity);
if (new_nid < 0 || !node_possible(new_nid)) if (new_nid < 0 || !node_possible(new_nid))
new_nid = first_online_node; new_nid = first_online_node;
@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
static void reset_topology_timer(void) static void reset_topology_timer(void)
{ {
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); if (vphn_enabled)
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP

View File

@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
* Since any pkey can be used for data or execute, we will just treat * Since any pkey can be used for data or execute, we will just treat
* all keys as equal and track them as one entity. * all keys as equal and track them as one entity.
*/ */
pkeys_total = be32_to_cpu(vals[0]); pkeys_total = vals[0];
pkeys_devtree_defined = true; pkeys_devtree_defined = true;
} }

View File

@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3; level_shift = entries_shift + 3;
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
if ((level_shift - 3) * levels + page_shift >= 60) if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL; return -EINVAL;
/* Allocate TCE table */ /* Allocate TCE table */

View File

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_PROTOTYPES_H
#include <linux/ftrace.h>
#include <asm-generic/asm-prototypes.h>
#endif /* _ASM_RISCV_PROTOTYPES_H */

View File

@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
push %ebx push %ebx
push %ecx push %ecx
push %edx push %edx
push %edi
/*
* RIP-relative addressing is needed to access the encryption bit
* variable. Since we are running in 32-bit mode we need this call/pop
* sequence to get the proper relative addressing.
*/
call 1f
1: popl %edi
subl $1b, %edi
movl enc_bit(%edi), %eax
cmpl $0, %eax
jge .Lsev_exit
/* Check if running under a hypervisor */ /* Check if running under a hypervisor */
movl $1, %eax movl $1, %eax
@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
movl %ebx, %eax movl %ebx, %eax
andl $0x3f, %eax /* Return the encryption bit location */ andl $0x3f, %eax /* Return the encryption bit location */
movl %eax, enc_bit(%edi)
jmp .Lsev_exit jmp .Lsev_exit
.Lno_sev: .Lno_sev:
xor %eax, %eax xor %eax, %eax
movl %eax, enc_bit(%edi)
.Lsev_exit: .Lsev_exit:
pop %edi
pop %edx pop %edx
pop %ecx pop %ecx
pop %ebx pop %ebx
@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
ENDPROC(set_sev_encryption_mask) ENDPROC(set_sev_encryption_mask)
.data .data
enc_bit:
.int 0xffffffff
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8 .balign 8

View File

@ -36,6 +36,7 @@
static int num_counters_llc; static int num_counters_llc;
static int num_counters_nb; static int num_counters_nb;
static bool l3_mask;
static HLIST_HEAD(uncore_unused_list); static HLIST_HEAD(uncore_unused_list);
@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1; hwc->idx = -1;
/*
* SliceMask and ThreadMask need to be set for certain L3 events in
* Family 17h. For other events, the two fields do not affect the count.
*/
if (l3_mask)
hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
if (event->cpu < 0) if (event->cpu < 0)
return -EINVAL; return -EINVAL;
@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l3"; amd_llc_pmu.name = "amd_l3";
format_attr_event_df.show = &event_show_df; format_attr_event_df.show = &event_show_df;
format_attr_event_l3.show = &event_show_l3; format_attr_event_l3.show = &event_show_l3;
l3_mask = true;
} else { } else {
num_counters_nb = NUM_COUNTERS_NB; num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L2; num_counters_llc = NUM_COUNTERS_L2;
@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l2"; amd_llc_pmu.name = "amd_l2";
format_attr_event_df = format_attr_event; format_attr_event_df = format_attr_event;
format_attr_event_l3 = format_attr_event; format_attr_event_l3 = format_attr_event;
l3_mask = false;
} }
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;

View File

@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
void bdx_uncore_cpu_init(void) void bdx_uncore_cpu_init(void)
{ {
int pkg = topology_phys_to_logical_pkg(0); int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
}, },
{ /* M3UPI0 Link 0 */ { /* M3UPI0 Link 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0), .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
}, },
{ /* M3UPI0 Link 1 */ { /* M3UPI0 Link 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1), .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
}, },
{ /* M3UPI1 Link 2 */ { /* M3UPI1 Link 2 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2), .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
}, },
{ /* end: all zeroes */ } { /* end: all zeroes */ }
}; };

View File

@ -46,6 +46,14 @@
#define INTEL_ARCH_EVENT_MASK \ #define INTEL_ARCH_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
#define AMD64_L3_SLICE_SHIFT 48
#define AMD64_L3_SLICE_MASK \
((0xFULL) << AMD64_L3_SLICE_SHIFT)
#define AMD64_L3_THREAD_SHIFT 56
#define AMD64_L3_THREAD_MASK \
((0xFFULL) << AMD64_L3_THREAD_SHIFT)
#define X86_RAW_EVENT_MASK \ #define X86_RAW_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_EVENT | \ (ARCH_PERFMON_EVENTSEL_EVENT | \
ARCH_PERFMON_EVENTSEL_UMASK | \ ARCH_PERFMON_EVENTSEL_UMASK | \

View File

@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
/* /*
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter * queue_hw_ctx after freeze the queue, so we use q_usage_counter
* to avoid race with it. __blk_mq_update_nr_hw_queues will users * to avoid race with it.
* synchronize_rcu to ensure all of the users go out of the critical
* section below and see zeroed q_usage_counter.
*/ */
rcu_read_lock(); if (!percpu_ref_tryget(&q->q_usage_counter))
if (percpu_ref_is_zero(&q->q_usage_counter)) {
rcu_read_unlock();
return; return;
}
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags; struct blk_mq_tags *tags = hctx->tags;
@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
} }
rcu_read_unlock(); blk_queue_exit(q);
} }
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,

View File

@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!rq->q); BUG_ON(!rq->q);
if (rq->mq_ctx != this_ctx) { if (rq->mq_ctx != this_ctx) {
if (this_ctx) { if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, blk_mq_sched_insert_requests(this_q, this_ctx,
&ctx_list, &ctx_list,
from_schedule); from_schedule);
@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* on 'ctx_list'. Do those. * on 'ctx_list'. Do those.
*/ */
if (this_ctx) { if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
from_schedule); from_schedule);
} }

View File

@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
; ;
if (q->nr_sorted && printed++ < 10) { if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
printk(KERN_ERR "%s: forced dispatching is broken " printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n", "(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted); q->elevator->type->elevator_name, q->nr_sorted);

View File

@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_del(&gnt_list_entry->node); list_del(&gnt_list_entry->node);
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--; rinfo->persistent_gnts_c--;
__free_page(gnt_list_entry->page); gnt_list_entry->gref = GRANT_INVALID_REF;
kfree(gnt_list_entry); list_add_tail(&gnt_list_entry->node, &rinfo->grants);
} }
spin_unlock_irqrestore(&rinfo->ring_lock, flags); spin_unlock_irqrestore(&rinfo->ring_lock, flags);

View File

@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
data->base = of_iomap(node, 0); data->base = of_iomap(node, 0);
if (!data->base) { if (!data->base) {
pr_err("Could not map PIT address\n"); pr_err("Could not map PIT address\n");
return -ENXIO; ret = -ENXIO;
goto exit;
} }
data->mck = of_clk_get(node, 0); data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck)) { if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n"); pr_err("Unable to get mck clk\n");
return PTR_ERR(data->mck); ret = PTR_ERR(data->mck);
goto exit;
} }
ret = clk_prepare_enable(data->mck); ret = clk_prepare_enable(data->mck);
if (ret) { if (ret) {
pr_err("Unable to enable mck\n"); pr_err("Unable to enable mck\n");
return ret; goto exit;
} }
/* Get the interrupts property */ /* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0); data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) { if (!data->irq) {
pr_err("Unable to get IRQ from DT\n"); pr_err("Unable to get IRQ from DT\n");
return -EINVAL; ret = -EINVAL;
goto exit;
} }
/* /*
@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
ret = clocksource_register_hz(&data->clksrc, pit_rate); ret = clocksource_register_hz(&data->clksrc, pit_rate);
if (ret) { if (ret) {
pr_err("Failed to register clocksource\n"); pr_err("Failed to register clocksource\n");
return ret; goto exit;
} }
/* Set up irq handler */ /* Set up irq handler */
@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
"at91_tick", data); "at91_tick", data);
if (ret) { if (ret) {
pr_err("Unable to setup IRQ\n"); pr_err("Unable to setup IRQ\n");
return ret; clocksource_unregister(&data->clksrc);
goto exit;
} }
/* Set up and register clockevents */ /* Set up and register clockevents */
@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
clockevents_register_device(&data->clkevt); clockevents_register_device(&data->clkevt);
return 0; return 0;
exit:
kfree(data);
return ret;
} }
TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init); at91sam926x_pit_dt_init);

View File

@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
cr &= ~fttmr010->t1_enable_val; cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR); writel(cr, fttmr010->base + TIMER_CR);
/* Setup the match register forward/backward in time */ if (fttmr010->count_down) {
cr = readl(fttmr010->base + TIMER1_COUNT); /*
if (fttmr010->count_down) * ASPEED Timer Controller will load TIMER1_LOAD register
cr -= cycles; * into TIMER1_COUNT register when the timer is re-enabled.
else */
cr += cycles; writel(cycles, fttmr010->base + TIMER1_LOAD);
writel(cr, fttmr010->base + TIMER1_MATCH1); } else {
/* Setup the match register forward in time */
cr = readl(fttmr010->base + TIMER1_COUNT);
writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
}
/* Start */ /* Start */
cr = readl(fttmr010->base + TIMER_CR); cr = readl(fttmr010->base + TIMER_CR);

View File

@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
return -ENXIO; return -ENXIO;
} }
if (!of_machine_is_compatible("ti,am43"))
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
ti_32k_timer.counter = ti_32k_timer.base; ti_32k_timer.counter = ti_32k_timer.base;
/* /*

View File

@ -44,7 +44,7 @@ enum _msm8996_version {
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{ {
size_t len; size_t len;
u32 *msm_id; u32 *msm_id;
@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
} }
module_init(qcom_cpufreq_kryo_init); module_init(qcom_cpufreq_kryo_init);
static void __init qcom_cpufreq_kryo_exit(void) static void __exit qcom_cpufreq_kryo_exit(void)
{ {
platform_device_unregister(kryo_cpufreq_pdev); platform_device_unregister(kryo_cpufreq_pdev);
platform_driver_unregister(&qcom_cpufreq_kryo_driver); platform_driver_unregister(&qcom_cpufreq_kryo_driver);

View File

@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
} }
static const struct address_space_operations dev_dax_aops = {
.set_page_dirty = noop_set_page_dirty,
.invalidatepage = noop_invalidatepage,
};
static int dax_open(struct inode *inode, struct file *filp) static int dax_open(struct inode *inode, struct file *filp)
{ {
struct dax_device *dax_dev = inode_dax(inode); struct dax_device *dax_dev = inode_dax(inode);
@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
dev_dbg(&dev_dax->dev, "trace\n"); dev_dbg(&dev_dax->dev, "trace\n");
inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode; inode->i_mapping->host = __dax_inode;
inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping; filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = dev_dax; filp->private_data = dev_dax;

View File

@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
{ {
int i; int i;
cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->vce.vcpu_bo == NULL) if (adev->vce.vcpu_bo == NULL)
return 0; return 0;
@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES) if (i == AMDGPU_MAX_VCE_HANDLES)
return 0; return 0;
cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */ /* TODO: suspending running encoding sessions isn't supported */
return -EINVAL; return -EINVAL;
} }

View File

@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
unsigned size; unsigned size;
void *ptr; void *ptr;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.vcpu_bo == NULL) if (adev->vcn.vcpu_bo == NULL)
return 0; return 0;
cancel_delayed_work_sync(&adev->vcn.idle_work);
size = amdgpu_bo_size(adev->vcn.vcpu_bo); size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr; ptr = adev->vcn.cpu_addr;

View File

@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
return NULL; return NULL;
} }
static void emulated_link_detect(struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
enum dc_edid_status edid_status;
struct dc_context *dc_ctx = link->ctx;
struct dc_sink *sink = NULL;
struct dc_sink *prev_sink = NULL;
link->type = dc_connection_none;
prev_sink = link->local_sink;
if (prev_sink != NULL)
dc_sink_retain(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
break;
}
case SIGNAL_TYPE_DVI_SINGLE_LINK: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
break;
}
case SIGNAL_TYPE_DVI_DUAL_LINK: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
}
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
break;
}
case SIGNAL_TYPE_EDP: {
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
break;
}
default:
DC_ERROR("Invalid connector type! signal:%d\n",
link->connector_signal);
return;
}
sink_init_data.link = link;
sink_init_data.sink_signal = sink_caps.signal;
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
return;
}
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
link->ctx,
link,
sink);
if (edid_status != EDID_OK)
DC_ERROR("Failed to read EDID");
}
static int dm_resume(void *handle) static int dm_resume(void *handle)
{ {
struct amdgpu_device *adev = handle; struct amdgpu_device *adev = handle;
@ -654,6 +735,7 @@ static int dm_resume(void *handle)
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *new_plane_state; struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state; struct dm_plane_state *dm_new_plane_state;
enum dc_connection_type new_connection_type = dc_connection_none;
int ret; int ret;
int i; int i;
@ -684,7 +766,13 @@ static int dm_resume(void *handle)
continue; continue;
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none)
emulated_link_detect(aconnector->dc_link);
else
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink) if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false; aconnector->fake_enable = false;
@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base; struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
/* In case of failure or MST no need to update connector status or notify the OS /* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in it's own context. * since (for MST case) MST does this in it's own context.
@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
if (aconnector->fake_enable) if (aconnector->fake_enable)
aconnector->fake_enable = false; aconnector->fake_enable = false;
if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_hotplug_event(dev);
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct dc_link *dc_link = aconnector->dc_link; struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state; bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be * conflict, after implement i2c helper, this mutex should be
@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) { !is_mst_root_connector) {
/* Downstream Port status changed. */ /* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { if (!dc_link_detect_sink(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
if (aconnector->fake_enable)
aconnector->fake_enable = false;
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
drm_kms_helper_hotplug_event(dev);
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable) if (aconnector->fake_enable)
aconnector->fake_enable = false; aconnector->fake_enable = false;
@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_mode_info *mode_info = &adev->mode_info; struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt; uint32_t link_cnt;
int32_t total_overlay_planes, total_primary_planes; int32_t total_overlay_planes, total_primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
link_cnt = dm->dc->caps.max_links; link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) { if (amdgpu_dm_mode_config_init(dm->adev)) {
@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i); link = dc_get_link_at_index(dm->dc, i);
if (dc_link_detect(link, DETECT_REASON_BOOT)) { if (!dc_link_detect_sink(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
amdgpu_dm_update_connector_after_detect(aconnector);
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link); register_backlight_device(dm, link);
} }
@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable) if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true; stream->ignore_msa_timing_param = true;
finish: finish:
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink); dc_sink_release(sink);
return stream; return stream;

View File

@ -195,7 +195,7 @@ static bool program_hpd_filter(
return result; return result;
} }
static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{ {
uint32_t is_hpd_high = 0; uint32_t is_hpd_high = 0;
struct gpio *hpd_pin; struct gpio *hpd_pin;
@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
return false; return false;
if (false == detect_sink(link, &new_connection_type)) { if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER(); BREAK_TO_DEBUGGER();
return false; return false;
} }

View File

@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link); bool dc_link_is_dp_sink_present(struct dc_link *link);
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
/* /*
* DPCD access interfaces * DPCD access interfaces
*/ */

View File

@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg; dc->prev_display_config = *pp_display_cfg;
} }
void dce110_set_bandwidth( static void dce110_set_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context, struct dc_state *context,
bool decrease_allowed) bool decrease_allowed)

View File

@ -68,11 +68,6 @@ void dce110_fill_display_configs(
const struct dc_state *context, const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg); struct dm_pp_display_configuration *pp_display_cfg);
void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed);
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on); void dp_receiver_power_ctrl(struct dc_link *link, bool on);

View File

@ -244,17 +244,6 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false; dh_data->dchub_info_valid = false;
} }
static void dce120_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
if (context->stream_count <= 0)
return;
dce110_set_bandwidth(dc, context, decrease_allowed);
}
void dce120_hw_sequencer_construct(struct dc *dc) void dce120_hw_sequencer_construct(struct dc *dc)
{ {
/* All registers used by dce11.2 match those in dce11 in offset and /* All registers used by dce11.2 match those in dce11 in offset and
@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc); dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub; dc->hwss.update_dchub = dce120_update_dchub;
dc->hwss.set_bandwidth = dce120_set_bandwidth;
} }

View File

@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
drm->irq_enabled = true; drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc); ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n"); DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail; goto vblank_fail;

View File

@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches, dma_addr_t *addrs, s32 *pitches,
int num_planes, u16 w, u16 h, u32 fmt_id) int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs)
{ {
u32 base = MALIDP500_SE_MEMWRITE_BASE; u32 base = MALIDP500_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP500_SE_MEMWRITE_OUT_SIZE); MALIDP500_SE_MEMWRITE_OUT_SIZE);
if (rgb2yuv_coeffs) {
int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
}
}
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
return 0; return 0;
@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches, dma_addr_t *addrs, s32 *pitches,
int num_planes, u16 w, u16 h, u32 fmt_id) int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs)
{ {
u32 base = MALIDP550_SE_MEMWRITE_BASE; u32 base = MALIDP550_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL); MALIDP550_SE_CONTROL);
if (rgb2yuv_coeffs) {
int i;
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
}
}
return 0; return 0;
} }

View File

@ -191,7 +191,8 @@ struct malidp_hw {
* @param fmt_id - internal format ID of output buffer * @param fmt_id - internal format ID of output buffer
*/ */
int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
const s16 *rgb2yuv_coeffs);
/* /*
* Disable the writing to memory of the next frame's content. * Disable the writing to memory of the next frame's content.

View File

@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
s32 pitches[2]; s32 pitches[2];
u8 format; u8 format;
u8 n_planes; u8 n_planes;
bool rgb2yuv_initialized;
const s16 *rgb2yuv_coeffs;
}; };
static int malidp_mw_connector_get_modes(struct drm_connector *connector) static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
static struct drm_connector_state * static struct drm_connector_state *
malidp_mw_connector_duplicate_state(struct drm_connector *connector) malidp_mw_connector_duplicate_state(struct drm_connector *connector)
{ {
struct malidp_mw_connector_state *mw_state; struct malidp_mw_connector_state *mw_state, *mw_current_state;
if (WARN_ON(!connector->state)) if (WARN_ON(!connector->state))
return NULL; return NULL;
@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
if (!mw_state) if (!mw_state)
return NULL; return NULL;
/* No need to preserve any of our driver-local data */ mw_current_state = to_mw_state(connector->state);
mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
return &mw_state->base; return &mw_state->base;
@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
}; };
static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
47, 157, 16,
-26, -87, 112,
112, -102, -10,
16, 128, 128
};
static int static int
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
} }
mw_state->n_planes = n_planes; mw_state->n_planes = n_planes;
if (fb->format->is_yuv)
mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
return 0; return 0;
} }
@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
drm_writeback_queue_job(mw_conn, conn_state->writeback_job); drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
conn_state->writeback_job = NULL; conn_state->writeback_job = NULL;
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes, mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format); fb->width, fb->height, mw_state->format,
!mw_state->rgb2yuv_initialized ?
mw_state->rgb2yuv_coeffs : NULL);
mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
} else { } else {
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
hwdev->hw->disable_memwrite(hwdev); hwdev->hw->disable_memwrite(hwdev);

View File

@ -205,6 +205,7 @@
#define MALIDP500_SE_BASE 0x00c00 #define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c #define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
#define MALIDP500_SE_MEMWRITE_BASE 0x00e00 #define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00 #define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00 #define MALIDP500_CONFIG_VALID 0x00f00
@ -238,6 +239,7 @@
#define MALIDP550_SE_CONTROL 0x08010 #define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
#define MALIDP550_SE_MEMWRITE_BASE 0x08100 #define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000 #define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010 #define MALIDP550_DC_CONTROL 0x0c010

View File

@ -24,7 +24,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <drm/drm_device.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_panel.h> #include <drm/drm_panel.h>
@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector) if (panel->connector)
return -EBUSY; return -EBUSY;
panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
if (!panel->link) {
dev_err(panel->dev, "failed to link panel to %s\n",
dev_name(connector->dev->dev));
return -EINVAL;
}
panel->connector = connector; panel->connector = connector;
panel->drm = connector->dev; panel->drm = connector->dev;
@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
*/ */
int drm_panel_detach(struct drm_panel *panel) int drm_panel_detach(struct drm_panel *panel)
{ {
device_link_del(panel->link);
panel->connector = NULL; panel->connector = NULL;
panel->drm = NULL; panel->drm = NULL;

View File

@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{ {
int ret; int ret;
WARN_ON(*fence);
*fence = drm_syncobj_fence_get(syncobj); *fence = drm_syncobj_fence_get(syncobj);
if (*fence) if (*fence)
return 1; return 1;
@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
if (entries[i].fence)
continue;
drm_syncobj_fence_get_or_add_callback(syncobjs[i], drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence, &entries[i].fence,
&entries[i].syncobj_cb, &entries[i].syncobj_cb,

View File

@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct component_match *match = NULL; struct component_match *match = NULL;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!dev->platform_data) { if (!dev->platform_data) {
struct device_node *core_node; struct device_node *core_node;
@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") { for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np)) if (!of_device_is_available(np))
continue; continue;
pdev = platform_device_register_simple("etnaviv", -1,
NULL, 0); pdev = platform_device_alloc("etnaviv", -1);
if (IS_ERR(pdev)) { if (!pdev) {
ret = PTR_ERR(pdev); ret = -ENOMEM;
of_node_put(np); of_node_put(np);
goto unregister_platform_driver; goto unregister_platform_driver;
} }
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
/*
* Apply the same DMA configuration to the virtual etnaviv
* device as the GPU we found. This assumes that all Vivante
* GPUs in the system share the same DMA constraints.
*/
of_dma_configure(&pdev->dev, np, true);
ret = platform_device_add(pdev);
if (ret) {
platform_device_put(pdev);
of_node_put(np);
goto unregister_platform_driver;
}
etnaviv_drm = pdev; etnaviv_drm = pdev;
of_node_put(np); of_node_put(np);
break; break;

View File

@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
th->thdev[i] = NULL; th->thdev[i] = NULL;
} }
th->num_thdevs = lowest; if (lowest >= 0)
th->num_thdevs = lowest;
} }
if (thdrv->attr_group) if (thdrv->attr_group)
@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
{ {
.start = TH_MMIO_SW, .start = 1, /* use resource[1] */
.end = 0, .end = 0,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
struct intel_th_device *thdev; struct intel_th_device *thdev;
struct resource res[3]; struct resource res[3];
unsigned int req = 0; unsigned int req = 0;
bool is64bit = false;
int r, err; int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name, thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
thdev->drvdata = th->drvdata; thdev->drvdata = th->drvdata;
for (r = 0; r < th->num_resources; r++)
if (th->resource[r].flags & IORESOURCE_MEM_64) {
is64bit = true;
break;
}
memcpy(res, subdev->res, memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres); sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) { for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource; struct resource *devres = th->resource;
int bar = TH_MMIO_CONFIG; int bar = 0; /* cut subdevices' MMIO from resource[0] */
/* /*
* Take .end == 0 to mean 'take the whole bar', * Take .end == 0 to mean 'take the whole bar',
@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
*/ */
if (!res[r].end && res[r].flags == IORESOURCE_MEM) { if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start; bar = res[r].start;
if (is64bit)
bar *= 2;
res[r].start = 0; res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1; res[r].end = resource_size(&devres[bar]) - 1;
} }

View File

@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
.driver_data = (kernel_ulong_t)&intel_th_2x, .driver_data = (kernel_ulong_t)&intel_th_2x,
}, },
{
/* Ice Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ 0 }, { 0 },
}; };

View File

@ -337,55 +337,6 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
return 0; return 0;
} }
/**
* add_modify_gid - Add or modify GID table entry
*
* @table: GID table in which GID to be added or modified
* @attr: Attributes of the GID
*
* Returns 0 on success or appropriate error code. It accepts zero
* GID addition for non RoCE ports for HCA's who report them as valid
* GID. However such zero GIDs are not added to the cache.
*/
static int add_modify_gid(struct ib_gid_table *table,
const struct ib_gid_attr *attr)
{
struct ib_gid_table_entry *entry;
int ret = 0;
/*
* Invalidate any old entry in the table to make it safe to write to
* this index.
*/
if (is_gid_entry_valid(table->data_vec[attr->index]))
put_gid_entry(table->data_vec[attr->index]);
/*
* Some HCA's report multiple GID entries with only one valid GID, and
* leave other unused entries as the zero GID. Convert zero GIDs to
* empty table entries instead of storing them.
*/
if (rdma_is_zero_gid(&attr->gid))
return 0;
entry = alloc_gid_entry(attr);
if (!entry)
return -ENOMEM;
if (rdma_protocol_roce(attr->device, attr->port_num)) {
ret = add_roce_gid(entry);
if (ret)
goto done;
}
store_gid_entry(table, entry);
return 0;
done:
put_gid_entry(entry);
return ret;
}
/** /**
* del_gid - Delete GID table entry * del_gid - Delete GID table entry
* *
@ -419,6 +370,55 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
put_gid_entry_locked(entry); put_gid_entry_locked(entry);
} }
/**
* add_modify_gid - Add or modify GID table entry
*
* @table: GID table in which GID to be added or modified
* @attr: Attributes of the GID
*
* Returns 0 on success or appropriate error code. It accepts zero
* GID addition for non RoCE ports for HCA's who report them as valid
* GID. However such zero GIDs are not added to the cache.
*/
static int add_modify_gid(struct ib_gid_table *table,
const struct ib_gid_attr *attr)
{
struct ib_gid_table_entry *entry;
int ret = 0;
/*
* Invalidate any old entry in the table to make it safe to write to
* this index.
*/
if (is_gid_entry_valid(table->data_vec[attr->index]))
del_gid(attr->device, attr->port_num, table, attr->index);
/*
* Some HCA's report multiple GID entries with only one valid GID, and
* leave other unused entries as the zero GID. Convert zero GIDs to
* empty table entries instead of storing them.
*/
if (rdma_is_zero_gid(&attr->gid))
return 0;
entry = alloc_gid_entry(attr);
if (!entry)
return -ENOMEM;
if (rdma_protocol_roce(attr->device, attr->port_num)) {
ret = add_roce_gid(entry);
if (ret)
goto done;
}
store_gid_entry(table, entry);
return 0;
done:
put_gid_entry(entry);
return ret;
}
/* rwlock should be read locked, or lock should be held */ /* rwlock should be read locked, or lock should be held */
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
const struct ib_gid_attr *val, bool default_gid, const struct ib_gid_attr *val, bool default_gid,

View File

@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
mutex_lock(&mut); mutex_lock(&mut);
if (!ctx->closing) { if (!ctx->closing) {
mutex_unlock(&mut); mutex_unlock(&mut);
ucma_put_ctx(ctx);
wait_for_completion(&ctx->comp);
/* rdma_destroy_id ensures that no event handlers are /* rdma_destroy_id ensures that no event handlers are
* inflight for that id before releasing it. * inflight for that id before releasing it.
*/ */

View File

@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
if ((cmd->base.attr_mask & IB_QP_CUR_STATE && if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
cmd->base.cur_qp_state > IB_QPS_ERR) || cmd->base.cur_qp_state > IB_QPS_ERR) ||
cmd->base.qp_state > IB_QPS_ERR) { (cmd->base.attr_mask & IB_QP_STATE &&
cmd->base.qp_state > IB_QPS_ERR)) {
ret = -EINVAL; ret = -EINVAL;
goto release_qp; goto release_qp;
} }
attr->qp_state = cmd->base.qp_state; if (cmd->base.attr_mask & IB_QP_STATE)
attr->cur_qp_state = cmd->base.cur_qp_state; attr->qp_state = cmd->base.qp_state;
attr->path_mtu = cmd->base.path_mtu; if (cmd->base.attr_mask & IB_QP_CUR_STATE)
attr->path_mig_state = cmd->base.path_mig_state; attr->cur_qp_state = cmd->base.cur_qp_state;
attr->qkey = cmd->base.qkey; if (cmd->base.attr_mask & IB_QP_PATH_MTU)
attr->rq_psn = cmd->base.rq_psn; attr->path_mtu = cmd->base.path_mtu;
attr->sq_psn = cmd->base.sq_psn; if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
attr->dest_qp_num = cmd->base.dest_qp_num; attr->path_mig_state = cmd->base.path_mig_state;
attr->qp_access_flags = cmd->base.qp_access_flags; if (cmd->base.attr_mask & IB_QP_QKEY)
attr->pkey_index = cmd->base.pkey_index; attr->qkey = cmd->base.qkey;
attr->alt_pkey_index = cmd->base.alt_pkey_index; if (cmd->base.attr_mask & IB_QP_RQ_PSN)
attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; attr->rq_psn = cmd->base.rq_psn;
attr->max_rd_atomic = cmd->base.max_rd_atomic; if (cmd->base.attr_mask & IB_QP_SQ_PSN)
attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; attr->sq_psn = cmd->base.sq_psn;
attr->min_rnr_timer = cmd->base.min_rnr_timer; if (cmd->base.attr_mask & IB_QP_DEST_QPN)
attr->port_num = cmd->base.port_num; attr->dest_qp_num = cmd->base.dest_qp_num;
attr->timeout = cmd->base.timeout; if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
attr->retry_cnt = cmd->base.retry_cnt; attr->qp_access_flags = cmd->base.qp_access_flags;
attr->rnr_retry = cmd->base.rnr_retry; if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
attr->alt_port_num = cmd->base.alt_port_num; attr->pkey_index = cmd->base.pkey_index;
attr->alt_timeout = cmd->base.alt_timeout; if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
attr->rate_limit = cmd->rate_limit; attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
attr->max_rd_atomic = cmd->base.max_rd_atomic;
if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
attr->min_rnr_timer = cmd->base.min_rnr_timer;
if (cmd->base.attr_mask & IB_QP_PORT)
attr->port_num = cmd->base.port_num;
if (cmd->base.attr_mask & IB_QP_TIMEOUT)
attr->timeout = cmd->base.timeout;
if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
attr->retry_cnt = cmd->base.retry_cnt;
if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
attr->rnr_retry = cmd->base.rnr_retry;
if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
attr->alt_port_num = cmd->base.alt_port_num;
attr->alt_timeout = cmd->base.alt_timeout;
attr->alt_pkey_index = cmd->base.alt_pkey_index;
}
if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
attr->rate_limit = cmd->rate_limit;
if (cmd->base.attr_mask & IB_QP_AV) if (cmd->base.attr_mask & IB_QP_AV)
copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,

View File

@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
list_del(&entry->obj_list); list_del(&entry->obj_list);
kfree(entry); kfree(entry);
} }
file->ev_queue.is_closed = 1;
spin_unlock_irq(&file->ev_queue.lock); spin_unlock_irq(&file->ev_queue.lock);
uverbs_close_fd(filp); uverbs_close_fd(filp);

View File

@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
kfree(rcu_dereference_protected(*slot, true)); kfree(rcu_dereference_protected(*slot, true));
radix_tree_iter_delete(&uapi->radix, &iter, slot); radix_tree_iter_delete(&uapi->radix, &iter, slot);
} }
kfree(uapi);
} }
struct uverbs_api *uverbs_alloc_api( struct uverbs_api *uverbs_alloc_api(

View File

@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */ /* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock); static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq; static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait); static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
/* SR-IOV helper functions */ /* SR-IOV helper functions */
@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
if (!rdev) if (!rdev)
return; return;
bnxt_re_ib_unreg(rdev, false); bnxt_re_ib_unreg(rdev);
} }
static void bnxt_re_stop_irq(void *handle) static void bnxt_re_stop_irq(void *handle)
@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
/* Driver registration routines used to let the networking driver (bnxt_en) /* Driver registration routines used to let the networking driver (bnxt_en)
* to know that the RoCE driver is now installed * to know that the RoCE driver is now installed
*/ */
static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
{ {
struct bnxt_en_dev *en_dev; struct bnxt_en_dev *en_dev;
int rc; int rc;
@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
return -EINVAL; return -EINVAL;
en_dev = rdev->en_dev; en_dev = rdev->en_dev;
/* Acquire rtnl lock if it is not invokded from netdev event */
if (lock_wait)
rtnl_lock();
rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
BNXT_ROCE_ULP); BNXT_ROCE_ULP);
if (lock_wait)
rtnl_unlock();
return rc; return rc;
} }
@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
en_dev = rdev->en_dev; en_dev = rdev->en_dev;
rtnl_lock();
rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
&bnxt_re_ulp_ops, rdev); &bnxt_re_ulp_ops, rdev);
rtnl_unlock();
return rc; return rc;
} }
static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
{ {
struct bnxt_en_dev *en_dev; struct bnxt_en_dev *en_dev;
int rc; int rc;
@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
en_dev = rdev->en_dev; en_dev = rdev->en_dev;
if (lock_wait)
rtnl_lock();
rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
if (lock_wait)
rtnl_unlock();
return rc; return rc;
} }
@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
rtnl_lock();
num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
rdev->msix_entries, rdev->msix_entries,
num_msix_want); num_msix_want);
@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
} }
rdev->num_msix = num_msix_got; rdev->num_msix = num_msix_got;
done: done:
rtnl_unlock();
return rc; return rc;
} }
@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout; fw_msg->timeout = timeout;
} }
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
bool lock_wait)
{ {
struct bnxt_en_dev *en_dev = rdev->en_dev; struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {0}; struct hwrm_ring_free_input req = {0};
struct hwrm_ring_free_output resp; struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg; struct bnxt_fw_msg fw_msg;
bool do_unlock = false;
int rc = -EINVAL; int rc = -EINVAL;
if (!en_dev) if (!en_dev)
return rc; return rc;
memset(&fw_msg, 0, sizeof(fw_msg)); memset(&fw_msg, 0, sizeof(fw_msg));
if (lock_wait) {
rtnl_lock();
do_unlock = true;
}
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
if (rc) if (rc)
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
"Failed to free HW ring:%d :%#x", req.ring_id, rc); "Failed to free HW ring:%d :%#x", req.ring_id, rc);
if (do_unlock)
rtnl_unlock();
return rc; return rc;
} }
@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
return rc; return rc;
memset(&fw_msg, 0, sizeof(fw_msg)); memset(&fw_msg, 0, sizeof(fw_msg));
rtnl_lock();
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0; req.enables = 0;
req.page_tbl_addr = cpu_to_le64(dma_arr[0]); req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
if (!rc) if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id); *fw_ring_id = le16_to_cpu(resp.ring_id);
rtnl_unlock();
return rc; return rc;
} }
static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
u32 fw_stats_ctx_id, bool lock_wait) u32 fw_stats_ctx_id)
{ {
struct bnxt_en_dev *en_dev = rdev->en_dev; struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_stat_ctx_free_input req = {0}; struct hwrm_stat_ctx_free_input req = {0};
struct bnxt_fw_msg fw_msg; struct bnxt_fw_msg fw_msg;
bool do_unlock = false;
int rc = -EINVAL; int rc = -EINVAL;
if (!en_dev) if (!en_dev)
return rc; return rc;
memset(&fw_msg, 0, sizeof(fw_msg)); memset(&fw_msg, 0, sizeof(fw_msg));
if (lock_wait) {
rtnl_lock();
do_unlock = true;
}
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
"Failed to free HW stats context %#x", rc); "Failed to free HW stats context %#x", rc);
if (do_unlock)
rtnl_unlock();
return rc; return rc;
} }
@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
return rc; return rc;
memset(&fw_msg, 0, sizeof(fw_msg)); memset(&fw_msg, 0, sizeof(fw_msg));
rtnl_lock();
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000); req.update_period_ms = cpu_to_le32(1000);
@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
if (!rc) if (!rc)
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
rtnl_unlock();
return rc; return rc;
} }
@ -929,19 +897,19 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
return rc; return rc;
} }
static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait) static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
{ {
int i; int i;
for (i = 0; i < rdev->num_msix - 1; i++) { for (i = 0; i < rdev->num_msix - 1; i++) {
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait); bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]); bnxt_qplib_free_nq(&rdev->nq[i]);
} }
} }
static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait) static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
{ {
bnxt_re_free_nq_res(rdev, lock_wait); bnxt_re_free_nq_res(rdev);
if (rdev->qplib_res.dpi_tbl.max) { if (rdev->qplib_res.dpi_tbl.max) {
bnxt_qplib_dealloc_dpi(&rdev->qplib_res, bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0; return 0;
} }
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
{ {
int i, rc; int i, rc;
@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
cancel_delayed_work(&rdev->worker); cancel_delayed_work(&rdev->worker);
bnxt_re_cleanup_res(rdev); bnxt_re_cleanup_res(rdev);
bnxt_re_free_res(rdev, lock_wait); bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
if (rc) if (rc)
dev_warn(rdev_to_dev(rdev), dev_warn(rdev_to_dev(rdev),
"Failed to deinitialize RCFW: %#x", rc); "Failed to deinitialize RCFW: %#x", rc);
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
lock_wait);
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait); bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw); bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
} }
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
rc = bnxt_re_free_msix(rdev, lock_wait); rc = bnxt_re_free_msix(rdev);
if (rc) if (rc)
dev_warn(rdev_to_dev(rdev), dev_warn(rdev_to_dev(rdev),
"Failed to free MSI-X vectors: %#x", rc); "Failed to free MSI-X vectors: %#x", rc);
} }
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
rc = bnxt_re_unregister_netdev(rdev, lock_wait); rc = bnxt_re_unregister_netdev(rdev);
if (rc) if (rc)
dev_warn(rdev_to_dev(rdev), dev_warn(rdev_to_dev(rdev),
"Failed to unregister with netdev: %#x", rc); "Failed to unregister with netdev: %#x", rc);
@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{ {
int i, j, rc; int i, j, rc;
bool locked;
/* Acquire rtnl lock through out this function */
rtnl_lock();
locked = true;
/* Registered a new RoCE device instance to netdev */ /* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev); rc = bnxt_re_register_netdev(rdev);
if (rc) { if (rc) {
@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
} }
rtnl_unlock();
locked = false;
/* Register ib dev */ /* Register ib dev */
rc = bnxt_re_register_ib(rdev); rc = bnxt_re_register_ib(rdev);
if (rc) { if (rc) {
pr_err("Failed to register with IB: %#x\n", rc); pr_err("Failed to register with IB: %#x\n", rc);
goto fail; goto fail;
} }
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
dev_info(rdev_to_dev(rdev), "Device registered successfully"); dev_info(rdev_to_dev(rdev), "Device registered successfully");
for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
rc = device_create_file(&rdev->ibdev.dev, rc = device_create_file(&rdev->ibdev.dev,
@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto fail; goto fail;
} }
} }
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width); &rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
return 0; return 0;
free_sctx: free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true); bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx: free_ctx:
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
disable_rcfw: disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring: free_ring:
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true); bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
free_rcfw: free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw); bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail: fail:
bnxt_re_ib_unreg(rdev, true); if (!locked)
rtnl_lock();
bnxt_re_ib_unreg(rdev);
rtnl_unlock();
return rc; return rc;
} }
@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
*/ */
if (atomic_read(&rdev->sched_count) > 0) if (atomic_read(&rdev->sched_count) > 0)
goto exit; goto exit;
bnxt_re_ib_unreg(rdev, false); bnxt_re_ib_unreg(rdev);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev); bnxt_re_dev_unreg(rdev);
break; break;
@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
*/ */
flush_workqueue(bnxt_re_wq); flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev); bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true); /* Acquire the rtnl_lock as the L2 resources are freed here */
rtnl_lock();
bnxt_re_ib_unreg(rdev);
rtnl_unlock();
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev); bnxt_re_dev_unreg(rdev);
} }

View File

@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
struct hfi1_devdata *dd = ppd->dd; struct hfi1_devdata *dd = ppd->dd;
struct send_context *sc; struct send_context *sc;
int i; int i;
int sc_flags;
if (flags & FREEZE_SELF) if (flags & FREEZE_SELF)
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
/* notify all SDMA engines that they are going into a freeze */ /* notify all SDMA engines that they are going into a freeze */
sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
SCF_LINK_DOWN : 0);
/* do halt pre-handling on all enabled send contexts */ /* do halt pre-handling on all enabled send contexts */
for (i = 0; i < dd->num_send_contexts; i++) { for (i = 0; i < dd->num_send_contexts; i++) {
sc = dd->send_contexts[i].sc; sc = dd->send_contexts[i].sc;
if (sc && (sc->flags & SCF_ENABLED)) if (sc && (sc->flags & SCF_ENABLED))
sc_stop(sc, SCF_FROZEN | SCF_HALTED); sc_stop(sc, sc_flags);
} }
/* Send context are frozen. Notify user space */ /* Send context are frozen. Notify user space */
@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
handle_linkup_change(dd, 1); handle_linkup_change(dd, 1);
pio_kernel_linkup(dd);
/* /*
* After link up, a new link width will have been set. * After link up, a new link width will have been set.

View File

@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
unsigned long flags; unsigned long flags;
int write = 1; /* write sendctrl back */ int write = 1; /* write sendctrl back */
int flush = 0; /* re-read sendctrl to make sure it is flushed */ int flush = 0; /* re-read sendctrl to make sure it is flushed */
int i;
spin_lock_irqsave(&dd->sendctrl_lock, flags); spin_lock_irqsave(&dd->sendctrl_lock, flags);
@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
reg |= SEND_CTRL_SEND_ENABLE_SMASK; reg |= SEND_CTRL_SEND_ENABLE_SMASK;
/* Fall through */ /* Fall through */
case PSC_DATA_VL_ENABLE: case PSC_DATA_VL_ENABLE:
mask = 0;
for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
if (!dd->vld[i].mtu)
mask |= BIT_ULL(i);
/* Disallow sending on VLs not enabled */ /* Disallow sending on VLs not enabled */
mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
SEND_CTRL_UNSUPPORTED_VL_SHIFT; SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break; break;
case PSC_GLOBAL_DISABLE: case PSC_GLOBAL_DISABLE:
@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
void sc_disable(struct send_context *sc) void sc_disable(struct send_context *sc)
{ {
u64 reg; u64 reg;
unsigned long flags;
struct pio_buf *pbuf; struct pio_buf *pbuf;
if (!sc) if (!sc)
return; return;
/* do all steps, even if already disabled */ /* do all steps, even if already disabled */
spin_lock_irqsave(&sc->alloc_lock, flags); spin_lock_irq(&sc->alloc_lock);
reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
sc->flags &= ~SCF_ENABLED; sc->flags &= ~SCF_ENABLED;
sc_wait_for_packet_egress(sc, 1); sc_wait_for_packet_egress(sc, 1);
write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
spin_unlock_irqrestore(&sc->alloc_lock, flags);
/* /*
* Flush any waiters. Once the context is disabled, * Flush any waiters. Once the context is disabled,
@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
* proceed with the flush. * proceed with the flush.
*/ */
udelay(1); udelay(1);
spin_lock_irqsave(&sc->release_lock, flags); spin_lock(&sc->release_lock);
if (sc->sr) { /* this context has a shadow ring */ if (sc->sr) { /* this context has a shadow ring */
while (sc->sr_tail != sc->sr_head) { while (sc->sr_tail != sc->sr_head) {
pbuf = &sc->sr[sc->sr_tail].pbuf; pbuf = &sc->sr[sc->sr_tail].pbuf;
@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
sc->sr_tail = 0; sc->sr_tail = 0;
} }
} }
spin_unlock_irqrestore(&sc->release_lock, flags); spin_unlock(&sc->release_lock);
spin_unlock_irq(&sc->alloc_lock);
} }
/* return SendEgressCtxtStatus.PacketOccupancy */ /* return SendEgressCtxtStatus.PacketOccupancy */
@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
sc = dd->send_contexts[i].sc; sc = dd->send_contexts[i].sc;
if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
continue; continue;
if (sc->flags & SCF_LINK_DOWN)
continue;
sc_enable(sc); /* will clear the sc frozen flag */ sc_enable(sc); /* will clear the sc frozen flag */
} }
} }
/**
* pio_kernel_linkup() - Re-enable send contexts after linkup event
* @dd: valid devive data
*
* When the link goes down, the freeze path is taken. However, a link down
* event is different from a freeze because if the send context is re-enabled
* whowever is sending data will start sending data again, which will hang
* any QP that is sending data.
*
* The freeze path now looks at the type of event that occurs and takes this
* path for link down event.
*/
void pio_kernel_linkup(struct hfi1_devdata *dd)
{
struct send_context *sc;
int i;
for (i = 0; i < dd->num_send_contexts; i++) {
sc = dd->send_contexts[i].sc;
if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
continue;
sc_enable(sc); /* will clear the sc link down flag */
}
}
/* /*
* Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
* Returns: * Returns:
@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
{ {
unsigned long flags; unsigned long flags;
/* mark the context */
sc->flags |= flag;
/* stop buffer allocations */ /* stop buffer allocations */
spin_lock_irqsave(&sc->alloc_lock, flags); spin_lock_irqsave(&sc->alloc_lock, flags);
/* mark the context */
sc->flags |= flag;
sc->flags &= ~SCF_ENABLED; sc->flags &= ~SCF_ENABLED;
spin_unlock_irqrestore(&sc->alloc_lock, flags); spin_unlock_irqrestore(&sc->alloc_lock, flags);
wake_up(&sc->halt_wait); wake_up(&sc->halt_wait);

View File

@ -139,6 +139,7 @@ struct send_context {
#define SCF_IN_FREE 0x02 #define SCF_IN_FREE 0x02
#define SCF_HALTED 0x04 #define SCF_HALTED 0x04
#define SCF_FROZEN 0x08 #define SCF_FROZEN 0x08
#define SCF_LINK_DOWN 0x10
struct send_context_info { struct send_context_info {
struct send_context *sc; /* allocated working context */ struct send_context *sc; /* allocated working context */
@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
void pio_reset_all(struct hfi1_devdata *dd); void pio_reset_all(struct hfi1_devdata *dd);
void pio_freeze(struct hfi1_devdata *dd); void pio_freeze(struct hfi1_devdata *dd);
void pio_kernel_unfreeze(struct hfi1_devdata *dd); void pio_kernel_unfreeze(struct hfi1_devdata *dd);
void pio_kernel_linkup(struct hfi1_devdata *dd);
/* global PIO send control operations */ /* global PIO send control operations */
#define PSC_GLOBAL_ENABLE 0 #define PSC_GLOBAL_ENABLE 0

View File

@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) { if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT; ret = -EFAULT;
goto free_txreq; goto free_tx;
} }
iovec = &req->iovs[req->iov_idx]; iovec = &req->iovs[req->iov_idx];
WARN_ON(iovec->offset); WARN_ON(iovec->offset);

View File

@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd; struct hfi1_devdata *dd;
u8 sc5; u8 sc5;
u8 sl;
if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
/* test the mapping for validity */ /* test the mapping for validity */
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp); ppd = ppd_from_ibp(ibp);
sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
dd = dd_from_ppd(ppd); dd = dd_from_ppd(ppd);
sl = rdma_ah_get_sl(ah_attr);
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
return -EINVAL;
sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL; return -EINVAL;
return 0; return 0;

View File

@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj; struct devx_obj *obj;
int err; int err;
@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err) if (err)
goto obj_free; goto obj_destroy;
return 0; return 0;
obj_destroy:
mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
obj_free: obj_free:
kfree(obj); kfree(obj);
return err; return err;

View File

@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{ {
struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch; struct srp_rdma_ch *ch;
int i; int i, j;
u8 status; u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
for (i = 0; i < target->ch_count; i++) { for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i]; ch = &target->ch[i];
for (i = 0; i < target->req_ring_size; ++i) { for (j = 0; j < target->req_ring_size; ++j) {
struct srp_request *req = &ch->req_ring[i]; struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
} }

View File

@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
*/ */
static unsigned char atakbd_keycode[0x72] = { /* American layout */ static unsigned char atakbd_keycode[0x73] = { /* American layout */
[0] = KEY_GRAVE,
[1] = KEY_ESC, [1] = KEY_ESC,
[2] = KEY_1, [2] = KEY_1,
[3] = KEY_2, [3] = KEY_2,
@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[38] = KEY_L, [38] = KEY_L,
[39] = KEY_SEMICOLON, [39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE, [40] = KEY_APOSTROPHE,
[41] = KEY_BACKSLASH, /* FIXME, '#' */ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT, [42] = KEY_LEFTSHIFT,
[43] = KEY_GRAVE, /* FIXME: '~' */ [43] = KEY_BACKSLASH,
[44] = KEY_Z, [44] = KEY_Z,
[45] = KEY_X, [45] = KEY_X,
[46] = KEY_C, [46] = KEY_C,
@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[66] = KEY_F8, [66] = KEY_F8,
[67] = KEY_F9, [67] = KEY_F9,
[68] = KEY_F10, [68] = KEY_F10,
[69] = KEY_ESC, [71] = KEY_HOME,
[70] = KEY_DELETE, [72] = KEY_UP,
[71] = KEY_KP7,
[72] = KEY_KP8,
[73] = KEY_KP9,
[74] = KEY_KPMINUS, [74] = KEY_KPMINUS,
[75] = KEY_KP4, [75] = KEY_LEFT,
[76] = KEY_KP5, [77] = KEY_RIGHT,
[77] = KEY_KP6,
[78] = KEY_KPPLUS, [78] = KEY_KPPLUS,
[79] = KEY_KP1, [80] = KEY_DOWN,
[80] = KEY_KP2, [82] = KEY_INSERT,
[81] = KEY_KP3, [83] = KEY_DELETE,
[82] = KEY_KP0,
[83] = KEY_KPDOT,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
[92] = KEY_KPASTERISK, /* FIXME */
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
[96] = KEY_102ND, [96] = KEY_102ND,
[97] = KEY_KPASTERISK, /* FIXME */ [97] = KEY_UNDO,
[98] = KEY_KPSLASH, [98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN, [99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN, [100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH, [101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK, [102] = KEY_KPASTERISK,
[103] = KEY_UP, [103] = KEY_KP7,
[104] = KEY_KPASTERISK, /* FIXME */ [104] = KEY_KP8,
[105] = KEY_LEFT, [105] = KEY_KP9,
[106] = KEY_RIGHT, [106] = KEY_KP4,
[107] = KEY_KPASTERISK, /* FIXME */ [107] = KEY_KP5,
[108] = KEY_DOWN, [108] = KEY_KP6,
[109] = KEY_KPASTERISK, /* FIXME */ [109] = KEY_KP1,
[110] = KEY_KPASTERISK, /* FIXME */ [110] = KEY_KP2,
[111] = KEY_KPASTERISK, /* FIXME */ [111] = KEY_KP3,
[112] = KEY_KPASTERISK, /* FIXME */ [112] = KEY_KP0,
[113] = KEY_KPASTERISK /* FIXME */ [113] = KEY_KPDOT,
[114] = KEY_KPENTER,
}; };
static struct input_dev *atakbd_dev; static struct input_dev *atakbd_dev;
@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down) static void atakbd_interrupt(unsigned char scancode, char down)
{ {
if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here? // report raw events here?
scancode = atakbd_keycode[scancode]; scancode = atakbd_keycode[scancode];
if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ input_report_key(atakbd_dev, scancode, down);
input_report_key(atakbd_dev, scancode, 1); input_sync(atakbd_dev);
input_report_key(atakbd_dev, scancode, 0); } else /* scancodes >= 0xf3 are mouse data, most likely */
input_sync(atakbd_dev);
} else {
input_report_key(atakbd_dev, scancode, down);
input_sync(atakbd_dev);
}
} else /* scancodes >= 0xf2 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return; return;

View File

@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
min = abs->minimum; min = abs->minimum;
max = abs->maximum; max = abs->maximum;
if ((min != 0 || max != 0) && max <= min) { if ((min != 0 || max != 0) && max < min) {
printk(KERN_DEBUG printk(KERN_DEBUG
"%s: invalid abs[%02x] min:%d max:%d\n", "%s: invalid abs[%02x] min:%d max:%d\n",
UINPUT_NAME, code, min, max); UINPUT_NAME, code, min, max);

View File

@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
static const char * const middle_button_pnp_ids[] = { static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */ "LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */ "LEN2132", /* ThinkPad P52 */
"LEN2133", /* ThinkPad P72 w/ NFC */
"LEN2134", /* ThinkPad P72 */
NULL NULL
}; };

View File

@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev); struct i2c_client *client = to_i2c_client(dev);
int ret; int ret;
if (device_may_wakeup(dev))
return enable_irq_wake(client->irq);
ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
return ret > 0 ? 0 : ret; return ret > 0 ? 0 : ret;
} }
@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
{ {
struct i2c_client *client = to_i2c_client(dev); struct i2c_client *client = to_i2c_client(dev);
if (device_may_wakeup(dev))
return disable_irq_wake(client->irq);
return egalax_wake_up_device(client); return egalax_wake_up_device(client);
} }

View File

@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
/* The callers make sure that get_device_id() does not fail here */ /* The callers make sure that get_device_id() does not fail here */
devid = get_device_id(dev); devid = get_device_id(dev);
/* For ACPI HID devices, we simply return the devid as such */
if (!dev_is_pci(dev))
return devid;
ivrs_alias = amd_iommu_alias_table[devid]; ivrs_alias = amd_iommu_alias_table[devid];
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
if (ivrs_alias == pci_alias) if (ivrs_alias == pci_alias)

View File

@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev) && info->pasid_supported) { if (dev && dev_is_pci(dev) && info->pasid_supported) {
ret = intel_pasid_alloc_table(dev); ret = intel_pasid_alloc_table(dev);
if (ret) { if (ret) {
__dmar_remove_one_dev_info(info); pr_warn("No pasid table for %s, pasid disabled\n",
spin_unlock_irqrestore(&device_domain_lock, flags); dev_name(dev));
return NULL; info->pasid_supported = 0;
} }
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);

View File

@ -11,7 +11,7 @@
#define __INTEL_PASID_H #define __INTEL_PASID_H
#define PASID_MIN 0x1 #define PASID_MIN 0x1
#define PASID_MAX 0x100000 #define PASID_MAX 0x20000
struct pasid_entry { struct pasid_entry {
u64 val; u64 val;

View File

@ -1241,6 +1241,12 @@ static int rk_iommu_probe(struct platform_device *pdev)
static void rk_iommu_shutdown(struct platform_device *pdev) static void rk_iommu_shutdown(struct platform_device *pdev)
{ {
struct rk_iommu *iommu = platform_get_drvdata(pdev);
int i = 0, irq;
while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu);
pm_runtime_force_suspend(&pdev->dev); pm_runtime_force_suspend(&pdev->dev);
} }

View File

@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq; extern struct workqueue_struct *bcache_wq;
extern struct workqueue_struct *bch_journal_wq;
extern struct mutex bch_register_lock; extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets; extern struct list_head bch_cache_sets;

View File

@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
closure_get(&ca->set->cl); closure_get(&ca->set->cl);
INIT_WORK(&ja->discard_work, journal_discard_work); INIT_WORK(&ja->discard_work, journal_discard_work);
schedule_work(&ja->discard_work); queue_work(bch_journal_wq, &ja->discard_work);
} }
} }
@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
: &j->w[0]; : &j->w[0];
__closure_wake_up(&w->wait); __closure_wake_up(&w->wait);
continue_at_nobarrier(cl, journal_write, system_wq); continue_at_nobarrier(cl, journal_write, bch_journal_wq);
} }
static void journal_write_unlock(struct closure *cl) static void journal_write_unlock(struct closure *cl)
@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
spin_unlock(&c->journal.lock); spin_unlock(&c->journal.lock);
btree_flush_write(c); btree_flush_write(c);
continue_at(cl, journal_write, system_wq); continue_at(cl, journal_write, bch_journal_wq);
return; return;
} }

View File

@ -47,6 +47,7 @@ static int bcache_major;
static DEFINE_IDA(bcache_device_idx); static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait; static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq; struct workqueue_struct *bcache_wq;
struct workqueue_struct *bch_journal_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
/* limitation of partitions number on single bcache device */ /* limitation of partitions number on single bcache device */
@ -2341,6 +2342,9 @@ static void bcache_exit(void)
kobject_put(bcache_kobj); kobject_put(bcache_kobj);
if (bcache_wq) if (bcache_wq)
destroy_workqueue(bcache_wq); destroy_workqueue(bcache_wq);
if (bch_journal_wq)
destroy_workqueue(bch_journal_wq);
if (bcache_major) if (bcache_major)
unregister_blkdev(bcache_major, "bcache"); unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot); unregister_reboot_notifier(&reboot);
@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
if (!bcache_wq) if (!bcache_wq)
goto err; goto err;
bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
if (!bch_journal_wq)
goto err;
bcache_kobj = kobject_create_and_add("bcache", fs_kobj); bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
if (!bcache_kobj) if (!bcache_kobj)
goto err; goto err;

View File

@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
V4L2_CID_AUTO_WHITE_BALANCE, V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1, 0, 1, 1,
V4L2_WHITE_BALANCE_AUTO); V4L2_WHITE_BALANCE_AUTO);
if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
ret = PTR_ERR(mt9v111->auto_awb);
goto error_free_ctrls;
}
mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
&mt9v111_ctrl_ops, &mt9v111_ctrl_ops,
V4L2_CID_EXPOSURE_AUTO, V4L2_CID_EXPOSURE_AUTO,
V4L2_EXPOSURE_MANUAL, V4L2_EXPOSURE_MANUAL,
0, V4L2_EXPOSURE_AUTO); 0, V4L2_EXPOSURE_AUTO);
if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
ret = PTR_ERR(mt9v111->auto_exp);
goto error_free_ctrls;
}
/* Initialize timings */
mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_HBLANK, V4L2_CID_HBLANK,
MT9V111_CORE_R05_MIN_HBLANK, MT9V111_CORE_R05_MIN_HBLANK,
MT9V111_CORE_R05_MAX_HBLANK, 1, MT9V111_CORE_R05_MAX_HBLANK, 1,
MT9V111_CORE_R05_DEF_HBLANK); MT9V111_CORE_R05_DEF_HBLANK);
if (IS_ERR_OR_NULL(mt9v111->hblank)) {
ret = PTR_ERR(mt9v111->hblank);
goto error_free_ctrls;
}
mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_VBLANK, V4L2_CID_VBLANK,
MT9V111_CORE_R06_MIN_VBLANK, MT9V111_CORE_R06_MIN_VBLANK,
MT9V111_CORE_R06_MAX_VBLANK, 1, MT9V111_CORE_R06_MAX_VBLANK, 1,
MT9V111_CORE_R06_DEF_VBLANK); MT9V111_CORE_R06_DEF_VBLANK);
if (IS_ERR_OR_NULL(mt9v111->vblank)) {
ret = PTR_ERR(mt9v111->vblank);
goto error_free_ctrls;
}
/* PIXEL_RATE is fixed: just expose it to user space. */ /* PIXEL_RATE is fixed: just expose it to user space. */
v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
if (mt9v111->ctrls.error) {
ret = mt9v111->ctrls.error;
goto error_free_ctrls;
}
mt9v111->sd.ctrl_handler = &mt9v111->ctrls; mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
/* Start with default configuration: 640x480 UYVY. */ /* Start with default configuration: 640x480 UYVY. */
@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
if (ret) if (ret)
goto error_free_ctrls; goto error_free_entity;
#endif #endif
ret = mt9v111_chip_probe(mt9v111); ret = mt9v111_chip_probe(mt9v111);
if (ret) if (ret)
goto error_free_ctrls; goto error_free_entity;
ret = v4l2_async_register_subdev(&mt9v111->sd); ret = v4l2_async_register_subdev(&mt9v111->sd);
if (ret) if (ret)
goto error_free_ctrls; goto error_free_entity;
return 0; return 0;
error_free_ctrls: error_free_entity:
v4l2_ctrl_handler_free(&mt9v111->ctrls);
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&mt9v111->sd.entity); media_entity_cleanup(&mt9v111->sd.entity);
#endif #endif
error_free_ctrls:
v4l2_ctrl_handler_free(&mt9v111->ctrls);
mutex_destroy(&mt9v111->pwr_mutex); mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex); mutex_destroy(&mt9v111->stream_mutex);
@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd); v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&mt9v111->ctrls);
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity); media_entity_cleanup(&sd->entity);
#endif #endif
v4l2_ctrl_handler_free(&mt9v111->ctrls);
mutex_destroy(&mt9v111->pwr_mutex); mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex); mutex_destroy(&mt9v111->stream_mutex);

View File

@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
depends on MFD_CROS_EC depends on MFD_CROS_EC
select CEC_CORE select CEC_CORE
select CEC_NOTIFIER select CEC_NOTIFIER
select CHROME_PLATFORMS
select CROS_EC_PROTO
---help--- ---help---
If you say yes here you will get support for the If you say yes here you will get support for the
ChromeOS Embedded Controller's CEC. ChromeOS Embedded Controller's CEC.

View File

@ -10,6 +10,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>

View File

@ -12,6 +12,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))

View File

@ -12,6 +12,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))

View File

@ -10,6 +10,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>

View File

@ -10,6 +10,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h> #include <linux/mutex.h>
@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
else else
return -EINVAL; return -EINVAL;
ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
GFP_KERNEL); GFP_KERNEL);
if (!ispif->line) if (!ispif->line)
return -ENOMEM; return -ENOMEM;

View File

@ -9,6 +9,7 @@
*/ */
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include "camss-vfe.h" #include "camss-vfe.h"

View File

@ -9,6 +9,7 @@
*/ */
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include "camss-vfe.h" #include "camss-vfe.h"

View File

@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
return -EINVAL; return -EINVAL;
} }
camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
GFP_KERNEL); sizeof(*camss->csiphy), GFP_KERNEL);
if (!camss->csiphy) if (!camss->csiphy)
return -ENOMEM; return -ENOMEM;
camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
GFP_KERNEL); GFP_KERNEL);
if (!camss->csid) if (!camss->csid)
return -ENOMEM; return -ENOMEM;
camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
GFP_KERNEL);
if (!camss->vfe) if (!camss->vfe)
return -ENOMEM; return -ENOMEM;
@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
MODULE_DEVICE_TABLE(of, camss_dt_match); MODULE_DEVICE_TABLE(of, camss_dt_match);
static int camss_runtime_suspend(struct device *dev) static int __maybe_unused camss_runtime_suspend(struct device *dev)
{ {
return 0; return 0;
} }
static int camss_runtime_resume(struct device *dev) static int __maybe_unused camss_runtime_resume(struct device *dev)
{ {
return 0; return 0;
} }

View File

@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].addr == state->af9033_i2c_addr[1]) if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000; reg |= 0x100000;
ret = af9035_wr_regs(d, reg, &msg[0].buf[3], ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
msg[0].len - 3); &msg[0].buf[3],
msg[0].len - 3)
: -EOPNOTSUPP;
} else { } else {
/* I2C write */ /* I2C write */
u8 buf[MAX_XFER_SIZE]; u8 buf[MAX_XFER_SIZE];

View File

@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
struct slave *slave = NULL; struct slave *slave = NULL;
struct list_head *iter; struct list_head *iter;
struct ad_info ad_info; struct ad_info ad_info;
struct netpoll_info *ni;
const struct net_device_ops *ops;
if (BOND_MODE(bond) == BOND_MODE_8023AD) if (BOND_MODE(bond) == BOND_MODE_8023AD)
if (bond_3ad_get_active_agg_info(bond, &ad_info)) if (bond_3ad_get_active_agg_info(bond, &ad_info))
return; return;
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
ops = slave->dev->netdev_ops; if (!bond_slave_is_up(slave))
if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
continue; continue;
if (BOND_MODE(bond) == BOND_MODE_8023AD) { if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
continue; continue;
} }
ni = rcu_dereference_bh(slave->dev->npinfo); netpoll_poll_dev(slave->dev);
if (down_trylock(&ni->dev_lock))
continue;
ops->ndo_poll_controller(slave->dev);
up(&ni->dev_lock);
} }
} }

View File

@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
static void bmac_set_timeout(struct net_device *dev); static void bmac_set_timeout(struct net_device *dev);
static void bmac_tx_timeout(struct timer_list *t); static void bmac_tx_timeout(struct timer_list *t);
static int bmac_output(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
static void bmac_start(struct net_device *dev); static void bmac_start(struct net_device *dev);
#define DBDMA_SET(x) ( ((x) | (x) << 16) ) #define DBDMA_SET(x) ( ((x) | (x) << 16) )
@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags); spin_unlock_irqrestore(&bp->lock, flags);
} }
static int static netdev_tx_t
bmac_output(struct sk_buff *skb, struct net_device *dev) bmac_output(struct sk_buff *skb, struct net_device *dev)
{ {
struct bmac_data *bp = netdev_priv(dev); struct bmac_data *bp = netdev_priv(dev);

View File

@ -78,7 +78,7 @@ struct mace_data {
static int mace_open(struct net_device *dev); static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev); static int mace_close(struct net_device *dev);
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev); static void mace_set_multicast(struct net_device *dev);
static void mace_reset(struct net_device *dev); static void mace_reset(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr); static int mace_set_address(struct net_device *dev, void *addr);
@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
mp->timeout_active = 1; mp->timeout_active = 1;
} }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{ {
struct mace_data *mp = netdev_priv(dev); struct mace_data *mp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *td = mp->tx_dma; volatile struct dbdma_regs __iomem *td = mp->tx_dma;

View File

@ -89,7 +89,7 @@ struct mace_frame {
static int mace_open(struct net_device *dev); static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev); static int mace_close(struct net_device *dev);
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev); static void mace_set_multicast(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr); static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev); static void mace_reset(struct net_device *dev);
@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
* Transmit a frame * Transmit a frame
*/ */
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{ {
struct mace_data *mp = netdev_priv(dev); struct mace_data *mp = netdev_priv(dev);
unsigned long flags; unsigned long flags;

View File

@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
} }
/* for single fragment packets use build_skb() */ /* for single fragment packets use build_skb() */
if (buff->is_eop) { if (buff->is_eop &&
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
skb = build_skb(page_address(buff->page), skb = build_skb(page_address(buff->page),
buff->len + AQ_SKB_ALIGN); AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) { if (unlikely(!skb)) {
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->len - ETH_HLEN, buff->len - ETH_HLEN,
SKB_TRUESIZE(buff->len - ETH_HLEN)); SKB_TRUESIZE(buff->len - ETH_HLEN));
for (i = 1U, next_ = buff->next, if (!buff->is_eop) {
buff_ = &self->buff_ring[next_]; true; for (i = 1U, next_ = buff->next,
next_ = buff_->next, buff_ = &self->buff_ring[next_];
buff_ = &self->buff_ring[next_], ++i) { true; next_ = buff_->next,
skb_add_rx_frag(skb, i, buff_->page, 0, buff_ = &self->buff_ring[next_], ++i) {
buff_->len, skb_add_rx_frag(skb, i,
SKB_TRUESIZE(buff->len - buff_->page, 0,
ETH_HLEN)); buff_->len,
buff_->is_cleaned = 1; SKB_TRUESIZE(buff->len -
ETH_HLEN));
buff_->is_cleaned = 1;
if (buff_->is_eop) if (buff_->is_eop)
break; break;
}
} }
} }

View File

@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
} }
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
int i;
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
}
}
#endif
static int bnx2x_validate_addr(struct net_device *dev) static int bnx2x_validate_addr(struct net_device *dev)
{ {
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_tx_timeout = bnx2x_tx_timeout, .ndo_tx_timeout = bnx2x_tx_timeout,
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = __bnx2x_setup_tc, .ndo_setup_tc = __bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV #ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_mac = bnx2x_set_vf_mac,

View File

@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp); bnxt_queue_sp_work(bp);
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void bnxt_poll_controller(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
int i;
/* Only process tx rings/combined rings in netpoll mode. */
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
napi_schedule(&txr->bnapi->napi);
}
}
#endif
static void bnxt_timer(struct timer_list *t) static void bnxt_timer(struct timer_list *t)
{ {
struct bnxt *bp = from_timer(bp, t, timer); struct bnxt *bp = from_timer(bp, t, timer);
@ -8519,9 +8504,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_set_vf_link_state = bnxt_set_vf_link_state, .ndo_set_vf_link_state = bnxt_set_vf_link_state,
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
.ndo_set_vf_trust = bnxt_set_vf_trust, .ndo_set_vf_trust = bnxt_set_vf_trust,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bnxt_poll_controller,
#endif #endif
.ndo_setup_tc = bnxt_setup_tc, .ndo_setup_tc = bnxt_setup_tc,
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL

View File

@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
} }
} }
if (i == ARRAY_SIZE(nvm_params))
return -EOPNOTSUPP;
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id; idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)

View File

@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return 0; return 0;
} }
static void bnxt_tc_parse_vlan(struct bnxt *bp, static int bnxt_tc_parse_vlan(struct bnxt *bp,
struct bnxt_tc_actions *actions, struct bnxt_tc_actions *actions,
const struct tc_action *tc_act) const struct tc_action *tc_act)
{ {
if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { switch (tcf_vlan_action(tc_act)) {
case TCA_VLAN_ACT_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { break;
case TCA_VLAN_ACT_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
break;
default:
return -EOPNOTSUPP;
} }
return 0;
} }
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
/* Push/pop VLAN */ /* Push/pop VLAN */
if (is_tcf_vlan(tc_act)) { if (is_tcf_vlan(tc_act)) {
bnxt_tc_parse_vlan(bp, actions, tc_act); rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
if (rc)
return rc;
continue; continue;
} }

View File

@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
}; };
struct cpl_abort_req_rss6 { struct cpl_abort_req_rss6 {
WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be32 srqidx_status; __be32 srqidx_status;
}; };

View File

@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
return rx; return rx;
} }
static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ep93xx_priv *ep = netdev_priv(dev); struct ep93xx_priv *ep = netdev_priv(dev);
struct ep93xx_tdesc *txd; struct ep93xx_tdesc *txd;

View File

@ -113,7 +113,7 @@ struct net_local {
/* Index to functions, as function prototypes. */ /* Index to functions, as function prototypes. */
static int net_open(struct net_device *dev); static int net_open(struct net_device *dev);
static int net_send_packet(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t net_interrupt(int irq, void *dev_id); static irqreturn_t net_interrupt(int irq, void *dev_id);
static void set_multicast_list(struct net_device *dev); static void set_multicast_list(struct net_device *dev);
static void net_rx(struct net_device *dev); static void net_rx(struct net_device *dev);
@ -324,7 +324,7 @@ net_open(struct net_device *dev)
return 0; return 0;
} }
static int static netdev_tx_t
net_send_packet(struct sk_buff *skb, struct net_device *dev) net_send_packet(struct sk_buff *skb, struct net_device *dev)
{ {
struct net_local *lp = netdev_priv(dev); struct net_local *lp = netdev_priv(dev);

View File

@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
#define RX_AREA_END 0x0fc00 #define RX_AREA_END 0x0fc00
static int ether1_open(struct net_device *dev); static int ether1_open(struct net_device *dev);
static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t ether1_interrupt(int irq, void *dev_id); static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev); static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev); static void ether1_setmulticastlist(struct net_device *dev);
@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
netif_wake_queue(dev); netif_wake_queue(dev);
} }
static int static netdev_tx_t
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{ {
int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;

View File

@ -347,7 +347,7 @@ static const char init_setup[] =
0x7f /* *multi IA */ }; 0x7f /* *multi IA */ };
static int i596_open(struct net_device *dev); static int i596_open(struct net_device *dev);
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id); static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev); static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
} }
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct i596_private *lp = netdev_priv(dev); struct i596_private *lp = netdev_priv(dev);
struct tx_cmd *tx_cmd; struct tx_cmd *tx_cmd;

View File

@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
static int sun3_82586_open(struct net_device *dev); static int sun3_82586_open(struct net_device *dev);
static int sun3_82586_close(struct net_device *dev); static int sun3_82586_close(struct net_device *dev);
static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev); static void set_multicast_list(struct net_device *dev);
static void sun3_82586_timeout(struct net_device *dev); static void sun3_82586_timeout(struct net_device *dev);
@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
* send frame * send frame
*/ */
static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t
sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
{ {
int len,i; int len,i;
#ifndef NO_NOPCOMMANDS #ifndef NO_NOPCOMMANDS

View File

@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
if (of_phy_is_fixed_link(np)) { if (of_phy_is_fixed_link(np)) {
int res = emac_dt_mdio_probe(dev); int res = emac_dt_mdio_probe(dev);
if (!res) { if (res)
res = of_phy_register_fixed_link(np); return res;
if (res)
mdiobus_unregister(dev->mii_bus); res = of_phy_register_fixed_link(np);
dev->phy_dev = of_phy_find_device(np);
if (res || !dev->phy_dev) {
mdiobus_unregister(dev->mii_bus);
return res ? res : -EINVAL;
} }
return res; emac_adjust_link(dev->ndev);
put_device(&dev->phy_dev->mdio.dev);
} }
return 0; return 0;
} }

View File

@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface); void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_macvlan_schedule(struct fm10k_intfc *interface); void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
#ifdef CONFIG_NET_POLL_CONTROLLER
void fm10k_netpoll(struct net_device *netdev);
#endif
/* Netdev */ /* Netdev */
struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);

View File

@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del, .ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fm10k_netpoll,
#endif
.ndo_features_check = fm10k_features_check, .ndo_features_check = fm10k_features_check,
}; };

View File

@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* fm10k_netpoll - A Polling 'interrupt' handler
* @netdev: network interface device structure
*
* This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing.
**/
void fm10k_netpoll(struct net_device *netdev)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
int i;
/* if interface is down do nothing */
if (test_bit(__FM10K_DOWN, interface->state))
return;
for (i = 0; i < interface->num_q_vectors; i++)
fm10k_msix_clean_rings(0, interface->q_vector[i]);
}
#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break #define FM10K_ERR_MSG(type) case (type): error = #type; break
static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault) struct fm10k_fault *fault)

View File

@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* i40evf_netpoll - A Polling 'interrupt' handler
* @netdev: network interface device structure
*
* This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing.
**/
static void i40evf_netpoll(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
int i;
/* if interface is down do nothing */
if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
return;
for (i = 0; i < q_vectors; i++)
i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
}
#endif
/** /**
* i40evf_irq_affinity_notify - Callback for affinity changes * i40evf_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed * @notify: context as to what irq was changed
@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_features_check = i40evf_features_check, .ndo_features_check = i40evf_features_check,
.ndo_fix_features = i40evf_fix_features, .ndo_fix_features = i40evf_fix_features,
.ndo_set_features = i40evf_set_features, .ndo_set_features = i40evf_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
.ndo_setup_tc = i40evf_setup_tc, .ndo_setup_tc = i40evf_setup_tc,
}; };

View File

@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors = vsi_stats->rx_length_errors; stats->rx_length_errors = vsi_stats->rx_length_errors;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* ice_netpoll - polling "interrupt" handler
* @netdev: network interface device structure
*
* Used by netconsole to send skbs without having to re-enable interrupts.
* This is not called in the normal interrupt path.
*/
static void ice_netpoll(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i;
if (test_bit(__ICE_DOWN, vsi->state) ||
!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
return;
for (i = 0; i < vsi->num_q_vectors; i++)
ice_msix_clean_rings(0, vsi->q_vectors[i]);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
/** /**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @vsi: VSI having NAPI disabled * @vsi: VSI having NAPI disabled
@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu, .ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64, .ndo_get_stats64 = ice_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ice_netpoll,
#endif /* CONFIG_NET_POLL_CONTROLLER */
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features, .ndo_set_features = ice_set_features,

Some files were not shown because too many files have changed in this diff Show More