This is the 5.4.32 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl6UJ1IACgkQONu9yGCS aT6i1A//RmLhD1Td+1pGWODgsMtfYD1FMB07D2uyFlX/NJe3jnhR9XGIKjFEtFSt bLXoQZlm07GPS7fsll7rE6Ydq0b1c5z59l+5pFCP4cc0ehM3Gca/V77ui0YBh9Mq /Jvk70qyE4en20qTia16cjozUjMreYPDdbqoR4rB3Lq+ALEEOwS0G4h2Nd4PTYGp YDXBwYn+O/f+CAl1arQeOSwpEThGGA4giSzBGaevq09xl2oIs4hAIWTpZ0WtugjR C4AXSEfl9Y/3OcYJm9KYVx4HqyunDhM3rY5ecCZXqeG4g9i5PZob9KisHuhs4nDD CBHd8ALTk0jo869MpizJ7nlcaGWPzBMSyxQeo1icq340KZjH/zWm7FY72VL+tBI0 DSpPyP7zJmQESuZGRWfjLZkETH3edg6VI9233pB6OSfddYh7asrDVcw1jwMpr6Pf Y71aR7D9cYVyNPAP5AzQzdKUQlEPW1t4GhW6Cwxt7lbMZV18N73vYUxpl7IjSUa6 6J5FHEIylnOFmpObzEC4Rj45Poy5ziI44/jmKMf7jmua9IAmHK2Dd6X5XjCRX40C Urf5it+wC5vkHVS6SW1tN4kbBhDfThHsAG71a3Y7kZeSpT6MrgneLEM9/BNk+csv gKTMukqZrgmR+zYTY78nbwM/XflqIqSwkF97GNvalyvT48Cokco= =Wb3/ -----END PGP SIGNATURE----- Merge 5.4.32 into android-5.4-stable Changes in 5.4.32 net: phy: realtek: fix handling of RTL8105e-integrated PHY cxgb4: fix MPS index overwrite when setting MAC address ipv6: don't auto-add link-local address to lag ports net: dsa: bcm_sf2: Do not register slave MDIO bus with OF net: dsa: bcm_sf2: Ensure correct sub-node is parsed net: dsa: mt7530: fix null pointer dereferencing in port5 setup net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers net_sched: add a temporary refcnt for struct tcindex_data net_sched: fix a missing refcnt in tcindex_init() net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting slcan: Don't transmit uninitialized stack data in padding tun: Don't put_page() for all negative return values from XDP program mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE r8169: change back SG and TSO to be disabled by default s390: prevent leaking kernel address in BEAR random: always use batched entropy for get_random_u{32,64} usb: dwc3: gadget: Wrap around when skip TRBs uapi: rename ext2_swab() to swab() and share globally in swab.h slub: improve bit diffusion for freelist ptr obfuscation tools/accounting/getdelays.c: fix netlink attribute length hwrng: imx-rngc - fix an error path ACPI: PM: Add acpi_[un]register_wakeup_handler() platform/x86: intel_int0002_vgpio: Use acpi_register_wakeup_handler() ASoC: jz4740-i2s: Fix divider written at incorrect offset in register IB/hfi1: Call kobject_put() when kobject_init_and_add() fails IB/hfi1: Fix memory leaks in sysfs registration and unregistration IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads ARM: imx: Enable ARM_ERRATA_814220 for i.MX6UL and i.MX7D ARM: imx: only select ARM_ERRATA_814220 for ARMv7-A ceph: remove the extra slashes in the server path ceph: canonicalize server path in place include/uapi/linux/swab.h: fix userspace breakage, use __BITS_PER_LONG for swap RDMA/ucma: Put a lock around every call to the rdma_cm layer RDMA/cma: Teach lockdep about the order of rtnl and lock RDMA/siw: Fix passive connection establishment Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow blk-mq: Keep set->nr_hw_queues and set->map[].nr_queues in sync fbcon: fix null-ptr-deref in fbcon_switch drm/i915: Fix ref->mutex deadlock in i915_active_wait() iommu/vt-d: Allow devices with RMRRs to use identity domain Linux 5.4.32 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I520b282d0cdebf3f80293c014d5cd6e88d956d55
This commit is contained in:
commit
724ffa0096
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 31
|
||||
SUBLEVEL = 32
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -520,6 +520,7 @@ config SOC_IMX6UL
|
||||
bool "i.MX6 UltraLite support"
|
||||
select PINCTRL_IMX6UL
|
||||
select SOC_IMX6
|
||||
select ARM_ERRATA_814220
|
||||
|
||||
help
|
||||
This enables support for Freescale i.MX6 UltraLite processor.
|
||||
@ -556,6 +557,7 @@ config SOC_IMX7D
|
||||
select PINCTRL_IMX7D
|
||||
select SOC_IMX7D_CA7 if ARCH_MULTI_V7
|
||||
select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
|
||||
select ARM_ERRATA_814220 if ARCH_MULTI_V7
|
||||
help
|
||||
This enables support for Freescale i.MX7 Dual processor.
|
||||
|
||||
|
@ -141,7 +141,9 @@ struct lowcore {
|
||||
|
||||
/* br %r1 trampoline */
|
||||
__u16 br_r1_trampoline; /* 0x0400 */
|
||||
__u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
|
||||
__u32 return_lpswe; /* 0x0402 */
|
||||
__u32 return_mcck_lpswe; /* 0x0406 */
|
||||
__u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
|
||||
|
||||
/*
|
||||
* 0xe00 contains the address of the IPL Parameter Information
|
||||
|
@ -162,6 +162,7 @@ typedef struct thread_struct thread_struct;
|
||||
#define INIT_THREAD { \
|
||||
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
|
||||
.fpu.regs = (void *) init_task.thread.fpu.fprs, \
|
||||
.last_break = 1, \
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <uapi/asm/setup.h>
|
||||
#include <linux/build_bug.h>
|
||||
|
||||
#define EP_OFFSET 0x10008
|
||||
#define EP_STRING "S390EP"
|
||||
@ -157,6 +158,12 @@ static inline unsigned long kaslr_offset(void)
|
||||
return __kaslr_offset;
|
||||
}
|
||||
|
||||
static inline u32 gen_lpswe(unsigned long addr)
|
||||
{
|
||||
BUILD_BUG_ON(addr > 0xfff);
|
||||
return 0xb2b20000 | addr;
|
||||
}
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define IPL_DEVICE (IPL_DEVICE_OFFSET)
|
||||
|
@ -125,6 +125,8 @@ int main(void)
|
||||
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
|
||||
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
|
||||
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
|
||||
OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
|
||||
OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
|
||||
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
|
||||
OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
|
||||
OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
|
||||
|
@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
|
||||
|
||||
.macro SWITCH_ASYNC savearea,timer
|
||||
tmhh %r8,0x0001 # interrupting from user ?
|
||||
jnz 1f
|
||||
jnz 2f
|
||||
lgr %r14,%r9
|
||||
cghi %r14,__LC_RETURN_LPSWE
|
||||
je 0f
|
||||
slg %r14,BASED(.Lcritical_start)
|
||||
clg %r14,BASED(.Lcritical_length)
|
||||
jhe 0f
|
||||
jhe 1f
|
||||
0:
|
||||
lghi %r11,\savearea # inside critical section, do cleanup
|
||||
brasl %r14,cleanup_critical
|
||||
tmhh %r8,0x0001 # retest problem state after cleanup
|
||||
jnz 1f
|
||||
0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
|
||||
jnz 2f
|
||||
1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
|
||||
slgr %r14,%r15
|
||||
srag %r14,%r14,STACK_SHIFT
|
||||
jnz 2f
|
||||
jnz 3f
|
||||
CHECK_STACK \savearea
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j 3f
|
||||
1: UPDATE_VTIME %r14,%r15,\timer
|
||||
j 4f
|
||||
2: UPDATE_VTIME %r14,%r15,\timer
|
||||
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
2: lg %r15,__LC_ASYNC_STACK # load async stack
|
||||
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
3: lg %r15,__LC_ASYNC_STACK # load async stack
|
||||
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
.endm
|
||||
|
||||
.macro UPDATE_VTIME w1,w2,enter_timer
|
||||
@ -401,7 +404,7 @@ ENTRY(system_call)
|
||||
stpt __LC_EXIT_TIMER
|
||||
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
|
||||
lmg %r11,%r15,__PT_R11(%r11)
|
||||
lpswe __LC_RETURN_PSW
|
||||
b __LC_RETURN_LPSWE(%r0)
|
||||
.Lsysc_done:
|
||||
|
||||
#
|
||||
@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
|
||||
BPOFF
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_CURRENT
|
||||
srag %r11,%r10,12
|
||||
jnz 0f
|
||||
/* if __LC_LAST_BREAK is < 4096, it contains one of
|
||||
* the lpswe addresses in lowcore. Set it to 1 (initial state)
|
||||
* to prevent leaking that address to userspace.
|
||||
*/
|
||||
lghi %r10,1
|
||||
0: lg %r12,__LC_CURRENT
|
||||
lghi %r11,0
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_PGM_OLD_PSW
|
||||
tmhh %r8,0x0001 # test problem state bit
|
||||
jnz 2f # -> fault in user space
|
||||
jnz 3f # -> fault in user space
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
# cleanup critical section for program checks in sie64a
|
||||
lgr %r14,%r9
|
||||
slg %r14,BASED(.Lsie_critical_start)
|
||||
clg %r14,BASED(.Lsie_critical_length)
|
||||
jhe 0f
|
||||
jhe 1f
|
||||
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
|
||||
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
larl %r9,sie_exit # skip forward to sie_exit
|
||||
lghi %r11,_PIF_GUEST_FAULT
|
||||
#endif
|
||||
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
|
||||
jnz 1f # -> enabled, can't be a double fault
|
||||
1: tmhh %r8,0x4000 # PER bit set in old PSW ?
|
||||
jnz 2f # -> enabled, can't be a double fault
|
||||
tm __LC_PGM_ILC+3,0x80 # check for per exception
|
||||
jnz .Lpgm_svcper # -> single stepped svc
|
||||
1: CHECK_STACK __LC_SAVE_AREA_SYNC
|
||||
2: CHECK_STACK __LC_SAVE_AREA_SYNC
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
# CHECK_VMAP_STACK branches to stack_overflow or 4f
|
||||
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
|
||||
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
|
||||
# CHECK_VMAP_STACK branches to stack_overflow or 5f
|
||||
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
|
||||
3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
|
||||
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
lgr %r14,%r12
|
||||
aghi %r14,__TASK_thread # pointer to thread_struct
|
||||
lghi %r13,__LC_PGM_TDB
|
||||
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
|
||||
jz 3f
|
||||
jz 4f
|
||||
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
|
||||
3: stg %r10,__THREAD_last_break(%r14)
|
||||
4: lgr %r13,%r11
|
||||
4: stg %r10,__THREAD_last_break(%r14)
|
||||
5: lgr %r13,%r11
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
# clear user controlled registers to prevent speculative use
|
||||
@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
|
||||
stg %r13,__PT_FLAGS(%r11)
|
||||
stg %r10,__PT_ARGS(%r11)
|
||||
tm __LC_PGM_ILC+3,0x80 # check for per exception
|
||||
jz 5f
|
||||
jz 6f
|
||||
tmhh %r8,0x0001 # kernel per event ?
|
||||
jz .Lpgm_kprobe
|
||||
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
|
||||
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
|
||||
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
|
||||
5: REENABLE_IRQS
|
||||
6: REENABLE_IRQS
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
larl %r1,pgm_check_table
|
||||
llgh %r10,__PT_INT_CODE+2(%r11)
|
||||
@ -775,7 +785,7 @@ ENTRY(io_int_handler)
|
||||
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
|
||||
.Lio_exit_kernel:
|
||||
lmg %r11,%r15,__PT_R11(%r11)
|
||||
lpswe __LC_RETURN_PSW
|
||||
b __LC_RETURN_LPSWE(%r0)
|
||||
.Lio_done:
|
||||
|
||||
#
|
||||
@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
|
||||
stpt __LC_EXIT_TIMER
|
||||
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
|
||||
0: lmg %r11,%r15,__PT_R11(%r11)
|
||||
lpswe __LC_RETURN_MCCK_PSW
|
||||
b __LC_RETURN_MCCK_LPSWE
|
||||
|
||||
.Lmcck_panic:
|
||||
lg %r15,__LC_NODAT_STACK
|
||||
@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
|
||||
#endif
|
||||
|
||||
ENTRY(cleanup_critical)
|
||||
cghi %r9,__LC_RETURN_LPSWE
|
||||
je .Lcleanup_lpswe
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
|
||||
jl 0f
|
||||
@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
|
||||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
|
||||
mvc 0(64,%r11),__PT_R8(%r9)
|
||||
lmg %r0,%r7,__PT_R0(%r9)
|
||||
.Lcleanup_lpswe:
|
||||
1: lmg %r8,%r9,__LC_RETURN_PSW
|
||||
BR_EX %r14,%r11
|
||||
.Lcleanup_sysc_restore_insn:
|
||||
|
@ -105,6 +105,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
|
||||
p->thread.system_timer = 0;
|
||||
p->thread.hardirq_timer = 0;
|
||||
p->thread.softirq_timer = 0;
|
||||
p->thread.last_break = 1;
|
||||
|
||||
frame->sf.back_chain = 0;
|
||||
/* new return point is ret_from_fork */
|
||||
|
@ -73,6 +73,7 @@
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/uv.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include "entry.h"
|
||||
|
||||
/*
|
||||
@ -457,6 +458,8 @@ static void __init setup_lowcore_dat_off(void)
|
||||
lc->spinlock_index = 0;
|
||||
arch_spin_lock_setup(0);
|
||||
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
|
||||
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
|
||||
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
|
||||
|
||||
set_prefix((u32)(unsigned long) lc);
|
||||
lowcore_ptr[0] = lc;
|
||||
|
@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
lc->spinlock_lockval = arch_spin_lockval(cpu);
|
||||
lc->spinlock_index = 0;
|
||||
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
|
||||
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
|
||||
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
|
||||
if (nmi_alloc_per_cpu(lc))
|
||||
goto out_async;
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
|
@ -415,6 +415,10 @@ void __init vmem_map_init(void)
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
|
||||
/* we need lowcore executable for our LPSWE instructions */
|
||||
set_memory_x(0, 1);
|
||||
|
||||
pr_info("Write protected kernel read-only data: %luk\n",
|
||||
(unsigned long)(__end_rodata - _stext) >> 10);
|
||||
}
|
||||
|
@ -3007,6 +3007,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
|
||||
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
||||
{
|
||||
/*
|
||||
* blk_mq_map_queues() and multiple .map_queues() implementations
|
||||
* expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
|
||||
* number of hardware queues.
|
||||
*/
|
||||
if (set->nr_maps == 1)
|
||||
set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
|
||||
|
||||
if (set->ops->map_queues && !is_kdump_kernel()) {
|
||||
int i;
|
||||
|
||||
|
@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void)
|
||||
if (acpi_any_fixed_event_status_set())
|
||||
return true;
|
||||
|
||||
/* Check wakeups from drivers sharing the SCI. */
|
||||
if (acpi_check_wakeup_handlers())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If there are no EC events to process and at least one of the
|
||||
* other enabled GPEs is active, the wakeup is regarded as a
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
extern void acpi_enable_wakeup_devices(u8 sleep_state);
|
||||
extern void acpi_disable_wakeup_devices(u8 sleep_state);
|
||||
extern bool acpi_check_wakeup_handlers(void);
|
||||
|
||||
extern struct list_head acpi_wakeup_device_list;
|
||||
extern struct mutex acpi_device_lock;
|
||||
|
@ -12,6 +12,15 @@
|
||||
#include "internal.h"
|
||||
#include "sleep.h"
|
||||
|
||||
struct acpi_wakeup_handler {
|
||||
struct list_head list_node;
|
||||
bool (*wakeup)(void *context);
|
||||
void *context;
|
||||
};
|
||||
|
||||
static LIST_HEAD(acpi_wakeup_handler_head);
|
||||
static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
|
||||
|
||||
/*
|
||||
* We didn't lock acpi_device_lock in the file, because it invokes oops in
|
||||
* suspend/resume and isn't really required as this is called in S-state. At
|
||||
@ -96,3 +105,75 @@ int __init acpi_wakeup_device_init(void)
|
||||
mutex_unlock(&acpi_device_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_register_wakeup_handler - Register wakeup handler
|
||||
* @wake_irq: The IRQ through which the device may receive wakeups
|
||||
* @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
|
||||
* @context: Context to pass to the handler when calling it
|
||||
*
|
||||
* Drivers which may share an IRQ with the SCI can use this to register
|
||||
* a handler which returns true when the device they are managing wants
|
||||
* to trigger a wakeup.
|
||||
*/
|
||||
int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
|
||||
void *context)
|
||||
{
|
||||
struct acpi_wakeup_handler *handler;
|
||||
|
||||
/*
|
||||
* If the device is not sharing its IRQ with the SCI, there is no
|
||||
* need to register the handler.
|
||||
*/
|
||||
if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
|
||||
return 0;
|
||||
|
||||
handler = kmalloc(sizeof(*handler), GFP_KERNEL);
|
||||
if (!handler)
|
||||
return -ENOMEM;
|
||||
|
||||
handler->wakeup = wakeup;
|
||||
handler->context = context;
|
||||
|
||||
mutex_lock(&acpi_wakeup_handler_mutex);
|
||||
list_add(&handler->list_node, &acpi_wakeup_handler_head);
|
||||
mutex_unlock(&acpi_wakeup_handler_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
|
||||
|
||||
/**
|
||||
* acpi_unregister_wakeup_handler - Unregister wakeup handler
|
||||
* @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
|
||||
* @context: Context passed to acpi_register_wakeup_handler()
|
||||
*/
|
||||
void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
|
||||
void *context)
|
||||
{
|
||||
struct acpi_wakeup_handler *handler;
|
||||
|
||||
mutex_lock(&acpi_wakeup_handler_mutex);
|
||||
list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
|
||||
if (handler->wakeup == wakeup && handler->context == context) {
|
||||
list_del(&handler->list_node);
|
||||
kfree(handler);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&acpi_wakeup_handler_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
|
||||
|
||||
bool acpi_check_wakeup_handlers(void)
|
||||
{
|
||||
struct acpi_wakeup_handler *handler;
|
||||
|
||||
/* No need to lock, nothing else is running when we're called. */
|
||||
list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
|
||||
if (handler->wakeup(handler->context))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (rngc->err_reg != 0)
|
||||
if (rngc->err_reg != 0) {
|
||||
imx_rngc_irq_mask_clear(rngc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2147,11 +2147,11 @@ struct batched_entropy {
|
||||
|
||||
/*
|
||||
* Get a random word for internal kernel use only. The quality of the random
|
||||
* number is either as good as RDRAND or as good as /dev/urandom, with the
|
||||
* goal of being quite fast and not depleting entropy. In order to ensure
|
||||
* number is good as /dev/urandom, but there is no backtrack protection, with
|
||||
* the goal of being quite fast and not depleting entropy. In order to ensure
|
||||
* that the randomness provided by this function is okay, the function
|
||||
* wait_for_random_bytes() should be called and return 0 at least once
|
||||
* at any point prior.
|
||||
* wait_for_random_bytes() should be called and return 0 at least once at any
|
||||
* point prior.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
|
||||
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
|
||||
@ -2164,15 +2164,6 @@ u64 get_random_u64(void)
|
||||
struct batched_entropy *batch;
|
||||
static void *previous;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if (arch_get_random_long((unsigned long *)&ret))
|
||||
return ret;
|
||||
#else
|
||||
if (arch_get_random_long((unsigned long *)&ret) &&
|
||||
arch_get_random_long((unsigned long *)&ret + 1))
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
warn_unseeded_randomness(&previous);
|
||||
|
||||
batch = raw_cpu_ptr(&batched_entropy_u64);
|
||||
@ -2197,9 +2188,6 @@ u32 get_random_u32(void)
|
||||
struct batched_entropy *batch;
|
||||
static void *previous;
|
||||
|
||||
if (arch_get_random_int(&ret))
|
||||
return ret;
|
||||
|
||||
warn_unseeded_randomness(&previous);
|
||||
|
||||
batch = raw_cpu_ptr(&batched_entropy_u32);
|
||||
|
@ -121,7 +121,7 @@ static inline void debug_active_assert(struct i915_active *ref) { }
|
||||
#endif
|
||||
|
||||
static void
|
||||
__active_retire(struct i915_active *ref)
|
||||
__active_retire(struct i915_active *ref, bool lock)
|
||||
{
|
||||
struct active_node *it, *n;
|
||||
struct rb_root root;
|
||||
@ -138,7 +138,8 @@ __active_retire(struct i915_active *ref)
|
||||
retire = true;
|
||||
}
|
||||
|
||||
mutex_unlock(&ref->mutex);
|
||||
if (likely(lock))
|
||||
mutex_unlock(&ref->mutex);
|
||||
if (!retire)
|
||||
return;
|
||||
|
||||
@ -153,21 +154,28 @@ __active_retire(struct i915_active *ref)
|
||||
}
|
||||
|
||||
static void
|
||||
active_retire(struct i915_active *ref)
|
||||
active_retire(struct i915_active *ref, bool lock)
|
||||
{
|
||||
GEM_BUG_ON(!atomic_read(&ref->count));
|
||||
if (atomic_add_unless(&ref->count, -1, 1))
|
||||
return;
|
||||
|
||||
/* One active may be flushed from inside the acquire of another */
|
||||
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
|
||||
__active_retire(ref);
|
||||
if (likely(lock))
|
||||
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
|
||||
__active_retire(ref, lock);
|
||||
}
|
||||
|
||||
static void
|
||||
node_retire(struct i915_active_request *base, struct i915_request *rq)
|
||||
{
|
||||
active_retire(node_from_active(base)->ref);
|
||||
active_retire(node_from_active(base)->ref, true);
|
||||
}
|
||||
|
||||
static void
|
||||
node_retire_nolock(struct i915_active_request *base, struct i915_request *rq)
|
||||
{
|
||||
active_retire(node_from_active(base)->ref, false);
|
||||
}
|
||||
|
||||
static struct i915_active_request *
|
||||
@ -364,7 +372,7 @@ int i915_active_acquire(struct i915_active *ref)
|
||||
void i915_active_release(struct i915_active *ref)
|
||||
{
|
||||
debug_active_assert(ref);
|
||||
active_retire(ref);
|
||||
active_retire(ref, true);
|
||||
}
|
||||
|
||||
static void __active_ungrab(struct i915_active *ref)
|
||||
@ -391,7 +399,7 @@ void i915_active_ungrab(struct i915_active *ref)
|
||||
{
|
||||
GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
|
||||
|
||||
active_retire(ref);
|
||||
active_retire(ref, true);
|
||||
__active_ungrab(ref);
|
||||
}
|
||||
|
||||
@ -421,12 +429,13 @@ int i915_active_wait(struct i915_active *ref)
|
||||
break;
|
||||
}
|
||||
|
||||
err = i915_active_request_retire(&it->base, BKL(ref));
|
||||
err = i915_active_request_retire(&it->base, BKL(ref),
|
||||
node_retire_nolock);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
__active_retire(ref);
|
||||
__active_retire(ref, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -309,7 +309,7 @@ i915_active_request_isset(const struct i915_active_request *active)
|
||||
*/
|
||||
static inline int __must_check
|
||||
i915_active_request_retire(struct i915_active_request *active,
|
||||
struct mutex *mutex)
|
||||
struct mutex *mutex, i915_active_retire_fn retire)
|
||||
{
|
||||
struct i915_request *request;
|
||||
long ret;
|
||||
@ -327,7 +327,7 @@ i915_active_request_retire(struct i915_active_request *active,
|
||||
list_del_init(&active->link);
|
||||
RCU_INIT_POINTER(active->request, NULL);
|
||||
|
||||
active->retire(active, request);
|
||||
retire(active, request);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2911,6 +2911,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
|
||||
err2:
|
||||
kfree(route->path_rec);
|
||||
route->path_rec = NULL;
|
||||
route->num_paths = 0;
|
||||
err1:
|
||||
kfree(work);
|
||||
return ret;
|
||||
@ -4719,6 +4720,19 @@ static int __init cma_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* There is a rare lock ordering dependency in cma_netdev_callback()
|
||||
* that only happens when bonding is enabled. Teach lockdep that rtnl
|
||||
* must never be nested under lock so it can find these without having
|
||||
* to test with bonding.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP)) {
|
||||
rtnl_lock();
|
||||
mutex_lock(&lock);
|
||||
mutex_unlock(&lock);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
|
||||
if (!cma_wq)
|
||||
return -ENOMEM;
|
||||
|
@ -91,6 +91,7 @@ struct ucma_context {
|
||||
|
||||
struct ucma_file *file;
|
||||
struct rdma_cm_id *cm_id;
|
||||
struct mutex mutex;
|
||||
u64 uid;
|
||||
|
||||
struct list_head list;
|
||||
@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
||||
init_completion(&ctx->comp);
|
||||
INIT_LIST_HEAD(&ctx->mc_list);
|
||||
ctx->file = file;
|
||||
mutex_init(&ctx->mutex);
|
||||
|
||||
if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
|
||||
goto error;
|
||||
@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
|
||||
}
|
||||
|
||||
events_reported = ctx->events_reported;
|
||||
mutex_destroy(&ctx->mutex);
|
||||
kfree(ctx);
|
||||
return events_reported;
|
||||
}
|
||||
@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
|
||||
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
|
||||
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
memset(&resp, 0, sizeof resp);
|
||||
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
|
||||
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
|
||||
@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
|
||||
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ctx->mutex);
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response),
|
||||
&resp, sizeof(resp)))
|
||||
ret = -EFAULT;
|
||||
@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
switch (cmd.option) {
|
||||
case RDMA_USER_CM_QUERY_ADDR:
|
||||
ret = ucma_query_addr(ctx, response, out_len);
|
||||
@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_connect(ctx->cm_id, &conn_param);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
|
||||
|
||||
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
|
||||
cmd.backlog : max_backlog;
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_listen(ctx->cm_id, ctx->backlog);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
|
||||
if (cmd.conn_param.valid) {
|
||||
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
|
||||
mutex_lock(&file->mut);
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
if (!ret)
|
||||
ctx->uid = cmd.uid;
|
||||
mutex_unlock(&file->mut);
|
||||
} else
|
||||
} else {
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = __rdma_accept(ctx->cm_id, NULL, NULL);
|
||||
|
||||
mutex_unlock(&ctx->mutex);
|
||||
}
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_disconnect(ctx->cm_id);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
|
||||
resp.qp_attr_mask = 0;
|
||||
memset(&qp_attr, 0, sizeof qp_attr);
|
||||
qp_attr.qp_state = cmd.qp_state;
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
|
||||
struct sa_path_rec opa;
|
||||
|
||||
sa_convert_path_ib_to_opa(&opa, &sa_path);
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_set_ib_path(ctx->cm_id, &opa);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
} else {
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
|
||||
|
||||
switch (level) {
|
||||
case RDMA_OPTION_ID:
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = ucma_set_option_id(ctx, optname, optval, optlen);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
break;
|
||||
case RDMA_OPTION_IB:
|
||||
ret = ucma_set_option_ib(ctx, optname, optval, optlen);
|
||||
@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
if (ctx->cm_id->device)
|
||||
ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
|
||||
mc->join_state = join_state;
|
||||
mc->uid = cmd->uid;
|
||||
memcpy(&mc->addr, addr, cmd->addr_size);
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
|
||||
join_state, mc);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&mc->ctx->mutex);
|
||||
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
|
||||
mutex_unlock(&mc->ctx->mutex);
|
||||
|
||||
mutex_lock(&mc->ctx->file->mut);
|
||||
ucma_cleanup_mc_events(mc);
|
||||
list_del(&mc->list);
|
||||
|
@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
dd_dev_err(dd,
|
||||
"Skipping sc2vl sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail;
|
||||
/*
|
||||
* Based on the documentation for kobject_init_and_add(), the
|
||||
* caller should call kobject_put even if this call fails.
|
||||
*/
|
||||
goto bail_sc2vl;
|
||||
}
|
||||
kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
|
||||
|
||||
@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
dd_dev_err(dd,
|
||||
"Skipping sl2sc sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_sc2vl;
|
||||
goto bail_sl2sc;
|
||||
}
|
||||
kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
|
||||
|
||||
@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
dd_dev_err(dd,
|
||||
"Skipping vl2mtu sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_sl2sc;
|
||||
goto bail_vl2mtu;
|
||||
}
|
||||
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
|
||||
|
||||
@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
dd_dev_err(dd,
|
||||
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_vl2mtu;
|
||||
goto bail_cc;
|
||||
}
|
||||
|
||||
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
|
||||
@ -742,7 +746,6 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
kobject_put(&ppd->sl2sc_kobj);
|
||||
bail_sc2vl:
|
||||
kobject_put(&ppd->sc2vl_kobj);
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
|
||||
|
||||
return 0;
|
||||
bail:
|
||||
for (i = 0; i < dd->num_sdma; i++)
|
||||
kobject_del(&dd->per_sdma[i].kobj);
|
||||
/*
|
||||
* The function kobject_put() will call kobject_del() if the kobject
|
||||
* has been added successfully. The sysfs files created under the
|
||||
* kobject directory will also be removed during the process.
|
||||
*/
|
||||
for (; i >= 0; i--)
|
||||
kobject_put(&dd->per_sdma[i].kobj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
|
||||
struct hfi1_pportdata *ppd;
|
||||
int i;
|
||||
|
||||
/* Unwind operations in hfi1_verbs_register_sysfs() */
|
||||
for (i = 0; i < dd->num_sdma; i++)
|
||||
kobject_put(&dd->per_sdma[i].kobj);
|
||||
|
||||
for (i = 0; i < dd->num_pports; i++) {
|
||||
ppd = &dd->pport[i];
|
||||
|
||||
|
@ -1181,12 +1181,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
|
||||
resp.tunnel_offloads_caps |=
|
||||
MLX5_IB_TUNNELED_OFFLOADS_GRE;
|
||||
if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
|
||||
MLX5_FLEX_PROTO_CW_MPLS_GRE)
|
||||
if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
|
||||
resp.tunnel_offloads_caps |=
|
||||
MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
|
||||
if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
|
||||
MLX5_FLEX_PROTO_CW_MPLS_UDP)
|
||||
if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
|
||||
resp.tunnel_offloads_caps |=
|
||||
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
|
||||
}
|
||||
|
@ -1783,14 +1783,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int siw_listen_address(struct iw_cm_id *id, int backlog,
|
||||
struct sockaddr *laddr, int addr_family)
|
||||
/*
|
||||
* siw_create_listen - Create resources for a listener's IWCM ID @id
|
||||
*
|
||||
* Starts listen on the socket address id->local_addr.
|
||||
*
|
||||
*/
|
||||
int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
{
|
||||
struct socket *s;
|
||||
struct siw_cep *cep = NULL;
|
||||
struct siw_device *sdev = to_siw_dev(id->device);
|
||||
int addr_family = id->local_addr.ss_family;
|
||||
int rv = 0, s_val;
|
||||
|
||||
if (addr_family != AF_INET && addr_family != AF_INET6)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
|
||||
if (rv < 0)
|
||||
return rv;
|
||||
@ -1805,9 +1814,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
|
||||
siw_dbg(id->device, "setsockopt error: %d\n", rv);
|
||||
goto error;
|
||||
}
|
||||
rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
|
||||
sizeof(struct sockaddr_in) :
|
||||
sizeof(struct sockaddr_in6));
|
||||
if (addr_family == AF_INET) {
|
||||
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
|
||||
|
||||
/* For wildcard addr, limit binding to current device only */
|
||||
if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
|
||||
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
|
||||
|
||||
rv = s->ops->bind(s, (struct sockaddr *)laddr,
|
||||
sizeof(struct sockaddr_in));
|
||||
} else {
|
||||
struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
|
||||
|
||||
/* For wildcard addr, limit binding to current device only */
|
||||
if (ipv6_addr_any(&laddr->sin6_addr))
|
||||
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
|
||||
|
||||
rv = s->ops->bind(s, (struct sockaddr *)laddr,
|
||||
sizeof(struct sockaddr_in6));
|
||||
}
|
||||
if (rv) {
|
||||
siw_dbg(id->device, "socket bind error: %d\n", rv);
|
||||
goto error;
|
||||
@ -1866,7 +1891,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
|
||||
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
|
||||
cep->state = SIW_EPSTATE_LISTENING;
|
||||
|
||||
siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
|
||||
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1924,114 +1949,6 @@ static void siw_drop_listeners(struct iw_cm_id *id)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* siw_create_listen - Create resources for a listener's IWCM ID @id
|
||||
*
|
||||
* Listens on the socket addresses id->local_addr and id->remote_addr.
|
||||
*
|
||||
* If the listener's @id provides a specific local IP address, at most one
|
||||
* listening socket is created and associated with @id.
|
||||
*
|
||||
* If the listener's @id provides the wildcard (zero) local IP address,
|
||||
* a separate listen is performed for each local IP address of the device
|
||||
* by creating a listening socket and binding to that local IP address.
|
||||
*
|
||||
*/
|
||||
int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
{
|
||||
struct net_device *dev = to_siw_dev(id->device)->netdev;
|
||||
int rv = 0, listeners = 0;
|
||||
|
||||
siw_dbg(id->device, "backlog %d\n", backlog);
|
||||
|
||||
/*
|
||||
* For each attached address of the interface, create a
|
||||
* listening socket, if id->local_addr is the wildcard
|
||||
* IP address or matches the IP address.
|
||||
*/
|
||||
if (id->local_addr.ss_family == AF_INET) {
|
||||
struct in_device *in_dev = in_dev_get(dev);
|
||||
struct sockaddr_in s_laddr, *s_raddr;
|
||||
const struct in_ifaddr *ifa;
|
||||
|
||||
if (!in_dev) {
|
||||
rv = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
|
||||
s_raddr = (struct sockaddr_in *)&id->remote_addr;
|
||||
|
||||
siw_dbg(id->device,
|
||||
"laddr %pI4:%d, raddr %pI4:%d\n",
|
||||
&s_laddr.sin_addr, ntohs(s_laddr.sin_port),
|
||||
&s_raddr->sin_addr, ntohs(s_raddr->sin_port));
|
||||
|
||||
rtnl_lock();
|
||||
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
|
||||
if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
|
||||
s_laddr.sin_addr.s_addr == ifa->ifa_address) {
|
||||
s_laddr.sin_addr.s_addr = ifa->ifa_address;
|
||||
|
||||
rv = siw_listen_address(id, backlog,
|
||||
(struct sockaddr *)&s_laddr,
|
||||
AF_INET);
|
||||
if (!rv)
|
||||
listeners++;
|
||||
}
|
||||
}
|
||||
rtnl_unlock();
|
||||
in_dev_put(in_dev);
|
||||
} else if (id->local_addr.ss_family == AF_INET6) {
|
||||
struct inet6_dev *in6_dev = in6_dev_get(dev);
|
||||
struct inet6_ifaddr *ifp;
|
||||
struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
|
||||
*s_raddr = &to_sockaddr_in6(id->remote_addr);
|
||||
|
||||
if (!in6_dev) {
|
||||
rv = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
siw_dbg(id->device,
|
||||
"laddr %pI6:%d, raddr %pI6:%d\n",
|
||||
&s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
|
||||
&s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
|
||||
|
||||
rtnl_lock();
|
||||
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
|
||||
if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
|
||||
continue;
|
||||
if (ipv6_addr_any(&s_laddr->sin6_addr) ||
|
||||
ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
|
||||
struct sockaddr_in6 bind_addr = {
|
||||
.sin6_family = AF_INET6,
|
||||
.sin6_port = s_laddr->sin6_port,
|
||||
.sin6_flowinfo = 0,
|
||||
.sin6_addr = ifp->addr,
|
||||
.sin6_scope_id = dev->ifindex };
|
||||
|
||||
rv = siw_listen_address(id, backlog,
|
||||
(struct sockaddr *)&bind_addr,
|
||||
AF_INET6);
|
||||
if (!rv)
|
||||
listeners++;
|
||||
}
|
||||
}
|
||||
rtnl_unlock();
|
||||
in6_dev_put(in6_dev);
|
||||
} else {
|
||||
rv = -EAFNOSUPPORT;
|
||||
}
|
||||
out:
|
||||
if (listeners)
|
||||
rv = 0;
|
||||
else if (!rv)
|
||||
rv = -EINVAL;
|
||||
|
||||
siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
int siw_destroy_listen(struct iw_cm_id *id)
|
||||
{
|
||||
if (!id->provider_data) {
|
||||
|
@ -2762,10 +2762,8 @@ static int __init si_domain_init(int hw)
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally we use DMA domains for devices which have RMRRs. But we
|
||||
* loose this requirement for graphic and usb devices. Identity map
|
||||
* the RMRRs for graphic and USB devices so that they could use the
|
||||
* si_domain.
|
||||
* Identity map the RMRRs so that devices with RMRRs could also use
|
||||
* the si_domain.
|
||||
*/
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
|
||||
@ -2773,9 +2771,6 @@ static int __init si_domain_init(int hw)
|
||||
unsigned long long start = rmrr->base_address;
|
||||
unsigned long long end = rmrr->end_address;
|
||||
|
||||
if (device_is_rmrr_locked(dev))
|
||||
continue;
|
||||
|
||||
if (WARN_ON(end < start ||
|
||||
end >> agaw_to_width(si_domain->agaw)))
|
||||
continue;
|
||||
@ -2914,9 +2909,6 @@ static int device_def_domain_type(struct device *dev)
|
||||
if (dev_is_pci(dev)) {
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
if (device_is_rmrr_locked(dev))
|
||||
return IOMMU_DOMAIN_DMA;
|
||||
|
||||
/*
|
||||
* Prevent any device marked as untrusted from getting
|
||||
* placed into the statically identity mapping domain.
|
||||
@ -2954,9 +2946,6 @@ static int device_def_domain_type(struct device *dev)
|
||||
return IOMMU_DOMAIN_DMA;
|
||||
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
|
||||
return IOMMU_DOMAIN_DMA;
|
||||
} else {
|
||||
if (device_has_rmrr(dev))
|
||||
return IOMMU_DOMAIN_DMA;
|
||||
}
|
||||
|
||||
return (iommu_identity_mapping & IDENTMAP_ALL) ?
|
||||
|
@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl)
|
||||
u32 tmpid;
|
||||
char *cmd = sl->rbuff;
|
||||
|
||||
cf.can_id = 0;
|
||||
memset(&cf, 0, sizeof(cf));
|
||||
|
||||
switch (*cmd) {
|
||||
case 'r':
|
||||
@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl)
|
||||
else
|
||||
return;
|
||||
|
||||
*(u64 *) (&cf.data) = 0; /* clear payload */
|
||||
|
||||
/* RTR frames may have a dlc > 0 but they never have any data bytes */
|
||||
if (!(cf.can_id & CAN_RTR_FLAG)) {
|
||||
for (i = 0; i < cf.can_dlc; i++) {
|
||||
|
@ -459,7 +459,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
|
||||
priv->slave_mii_bus->parent = ds->dev->parent;
|
||||
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
|
||||
|
||||
err = of_mdiobus_register(priv->slave_mii_bus, dn);
|
||||
err = mdiobus_register(priv->slave_mii_bus);
|
||||
if (err && dn)
|
||||
of_node_put(dn);
|
||||
|
||||
@ -1053,6 +1053,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
||||
const struct bcm_sf2_of_data *data;
|
||||
struct b53_platform_data *pdata;
|
||||
struct dsa_switch_ops *ops;
|
||||
struct device_node *ports;
|
||||
struct bcm_sf2_priv *priv;
|
||||
struct b53_device *dev;
|
||||
struct dsa_switch *ds;
|
||||
@ -1115,7 +1116,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
||||
set_bit(0, priv->cfp.used);
|
||||
set_bit(0, priv->cfp.unique);
|
||||
|
||||
bcm_sf2_identify_ports(priv, dn->child);
|
||||
ports = of_find_node_by_name(dn, "ports");
|
||||
if (ports) {
|
||||
bcm_sf2_identify_ports(priv, ports);
|
||||
of_node_put(ports);
|
||||
}
|
||||
|
||||
priv->irq0 = irq_of_parse_and_map(dn, 0);
|
||||
priv->irq1 = irq_of_parse_and_map(dn, 1);
|
||||
|
@ -1353,6 +1353,9 @@ mt7530_setup(struct dsa_switch *ds)
|
||||
continue;
|
||||
|
||||
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
|
||||
if (!phy_node)
|
||||
continue;
|
||||
|
||||
if (phy_node->parent == priv->dev->of_node->parent) {
|
||||
interface = of_get_phy_mode(mac_np);
|
||||
id = of_mdio_parse_addr(ds->dev, phy_node);
|
||||
|
@ -3032,7 +3032,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
|
||||
return ret;
|
||||
|
||||
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
||||
pi->xact_addr_filt = ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -123,9 +123,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
||||
u8 prio = act->vlan.prio;
|
||||
u16 vid = act->vlan.vid;
|
||||
|
||||
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
|
||||
act->id, vid,
|
||||
proto, prio, extack);
|
||||
err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
|
||||
act->id, vid,
|
||||
proto, prio, extack);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
|
||||
|
@ -7167,12 +7167,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
|
||||
|
||||
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|
||||
NETIF_F_HIGHDMA;
|
||||
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
@ -7190,25 +7188,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
if (rtl_chip_supports_csum_v2(tp)) {
|
||||
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
|
||||
dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
|
||||
dev->hw_features |= NETIF_F_IPV6_CSUM;
|
||||
dev->features |= NETIF_F_IPV6_CSUM;
|
||||
}
|
||||
|
||||
/* There has been a number of reports that using SG/TSO results in
|
||||
* tx timeouts. However for a lot of people SG/TSO works fine.
|
||||
* Therefore disable both features by default, but allow users to
|
||||
* enable them. Use at own risk!
|
||||
*/
|
||||
if (rtl_chip_supports_csum_v2(tp)) {
|
||||
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
|
||||
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
|
||||
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
|
||||
} else {
|
||||
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
|
||||
dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
|
||||
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
|
||||
}
|
||||
|
||||
/* RTL8168e-vl and one RTL8168c variant are known to have a
|
||||
* HW issue with TSO.
|
||||
*/
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
|
||||
tp->mac_version == RTL_GIGA_MAC_VER_22) {
|
||||
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
|
||||
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
|
||||
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
|
||||
}
|
||||
|
||||
dev->hw_features |= NETIF_F_RXALL;
|
||||
dev->hw_features |= NETIF_F_RXFCS;
|
||||
|
||||
|
@ -209,7 +209,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
|
||||
reg++;
|
||||
}
|
||||
|
||||
while (reg <= perfect_addr_number) {
|
||||
while (reg < perfect_addr_number) {
|
||||
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
|
||||
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
|
||||
reg++;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/micrel_phy.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
/* Operation Mode Strap Override */
|
||||
#define MII_KSZPHY_OMSO 0x16
|
||||
@ -902,6 +903,12 @@ static int kszphy_resume(struct phy_device *phydev)
|
||||
|
||||
genphy_resume(phydev);
|
||||
|
||||
/* After switching from power-down to normal mode, an internal global
|
||||
* reset is automatically generated. Wait a minimum of 1 ms before
|
||||
* read/write access to the PHY registers.
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
ret = kszphy_config_reset(phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -456,6 +456,15 @@ static struct phy_driver realtek_drvs[] = {
|
||||
.resume = genphy_resume,
|
||||
.read_page = rtl821x_read_page,
|
||||
.write_page = rtl821x_write_page,
|
||||
}, {
|
||||
PHY_ID_MATCH_MODEL(0x001cc880),
|
||||
.name = "RTL8208 Fast Ethernet",
|
||||
.read_mmd = genphy_read_mmd_unsupported,
|
||||
.write_mmd = genphy_write_mmd_unsupported,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
.read_page = rtl821x_read_page,
|
||||
.write_page = rtl821x_write_page,
|
||||
}, {
|
||||
PHY_ID_MATCH_EXACT(0x001cc910),
|
||||
.name = "RTL8211 Gigabit Ethernet",
|
||||
|
@ -1715,8 +1715,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
alloc_frag->offset += buflen;
|
||||
}
|
||||
err = tun_xdp_act(tun, xdp_prog, &xdp, act);
|
||||
if (err < 0)
|
||||
goto err_xdp;
|
||||
if (err < 0) {
|
||||
if (act == XDP_REDIRECT || act == XDP_TX)
|
||||
put_page(alloc_frag->page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (err == XDP_REDIRECT)
|
||||
xdp_do_flush_map();
|
||||
if (err != XDP_PASS)
|
||||
@ -1730,8 +1734,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
|
||||
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
|
||||
|
||||
err_xdp:
|
||||
put_page(alloc_frag->page);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
local_bh_enable();
|
||||
|
@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static bool int0002_check_wake(void *data)
|
||||
{
|
||||
u32 gpe_sts_reg;
|
||||
|
||||
gpe_sts_reg = inl(GPE0A_STS_PORT);
|
||||
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
|
||||
}
|
||||
|
||||
static struct irq_chip int0002_byt_irqchip = {
|
||||
.name = DRV_NAME,
|
||||
.irq_ack = int0002_irq_ack,
|
||||
@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev)
|
||||
|
||||
gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
|
||||
|
||||
acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
|
||||
device_init_wakeup(dev, true);
|
||||
return 0;
|
||||
}
|
||||
@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev)
|
||||
static int int0002_remove(struct platform_device *pdev)
|
||||
{
|
||||
device_init_wakeup(&pdev->dev, false);
|
||||
acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1518,7 +1518,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
|
||||
for (i = 0; i < req->num_trbs; i++) {
|
||||
struct dwc3_trb *trb;
|
||||
|
||||
trb = req->trb + i;
|
||||
trb = &dep->trb_pool[dep->trb_dequeue];
|
||||
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
|
||||
dwc3_ep_inc_deq(dep);
|
||||
}
|
||||
|
@ -1276,6 +1276,9 @@ static void fbcon_deinit(struct vc_data *vc)
|
||||
if (!con_is_bound(&fb_con))
|
||||
fbcon_exit();
|
||||
|
||||
if (vc->vc_num == logo_shown)
|
||||
logo_shown = FBCON_LOGO_CANSHOW;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int ceph_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
|
||||
@ -215,6 +214,26 @@ static match_table_t fsopt_tokens = {
|
||||
{-1, NULL}
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove adjacent slashes and then the trailing slash, unless it is
|
||||
* the only remaining character.
|
||||
*
|
||||
* E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
|
||||
*/
|
||||
static void canonicalize_path(char *path)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
||||
for (i = 0; path[i] != '\0'; i++) {
|
||||
if (path[i] != '/' || j < 1 || path[j - 1] != '/')
|
||||
path[j++] = path[i];
|
||||
}
|
||||
|
||||
if (j > 1 && path[j - 1] == '/')
|
||||
j--;
|
||||
path[j] = '\0';
|
||||
}
|
||||
|
||||
static int parse_fsopt_token(char *c, void *private)
|
||||
{
|
||||
struct ceph_mount_options *fsopt = private;
|
||||
@ -446,12 +465,15 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
|
||||
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -507,13 +529,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
|
||||
*/
|
||||
dev_name_end = strchr(dev_name, '/');
|
||||
if (dev_name_end) {
|
||||
if (strlen(dev_name_end) > 1) {
|
||||
fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
|
||||
if (!fsopt->server_path) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* The server_path will include the whole chars from userland
|
||||
* including the leading '/'.
|
||||
*/
|
||||
fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
|
||||
if (!fsopt->server_path) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
canonicalize_path(fsopt->server_path);
|
||||
} else {
|
||||
dev_name_end = dev_name + strlen(dev_name);
|
||||
}
|
||||
@ -842,7 +868,6 @@ static void destroy_caches(void)
|
||||
ceph_fscache_unregister();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ceph_umount_begin - initiate forced umount. Tear down down the
|
||||
* mount, skipping steps that may hang while waiting for server(s).
|
||||
@ -929,9 +954,6 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
|
||||
return root;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* mount: join the ceph cluster, and open root directory.
|
||||
*/
|
||||
@ -945,7 +967,9 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
|
||||
mutex_lock(&fsc->client->mount_mutex);
|
||||
|
||||
if (!fsc->sb->s_root) {
|
||||
const char *path;
|
||||
const char *path = fsc->mount_options->server_path ?
|
||||
fsc->mount_options->server_path + 1 : "";
|
||||
|
||||
err = __ceph_open_session(fsc->client, started);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
@ -957,13 +981,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!fsc->mount_options->server_path) {
|
||||
path = "";
|
||||
dout("mount opening path \\t\n");
|
||||
} else {
|
||||
path = fsc->mount_options->server_path + 1;
|
||||
dout("mount opening path %s\n", path);
|
||||
}
|
||||
dout("mount opening path '%s'\n", path);
|
||||
|
||||
ceph_fs_debugfs_init(fsc);
|
||||
|
||||
|
@ -92,7 +92,7 @@ struct ceph_mount_options {
|
||||
|
||||
char *snapdir_name; /* default ".snap" */
|
||||
char *mds_namespace; /* default NULL */
|
||||
char *server_path; /* default "/" */
|
||||
char *server_path; /* default NULL (means "/") */
|
||||
char *fscache_uniq; /* default NULL */
|
||||
};
|
||||
|
||||
|
@ -473,6 +473,11 @@ void __init acpi_nvs_nosave_s3(void);
|
||||
void __init acpi_sleep_no_blacklist(void);
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
int acpi_register_wakeup_handler(
|
||||
int wake_irq, bool (*wakeup)(void *context), void *context);
|
||||
void acpi_unregister_wakeup_handler(
|
||||
bool (*wakeup)(void *context), void *context);
|
||||
|
||||
struct acpi_osc_context {
|
||||
char *uuid_str; /* UUID string */
|
||||
int rev;
|
||||
|
@ -857,7 +857,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 swp_csum[0x1];
|
||||
u8 swp_lso[0x1];
|
||||
u8 cqe_checksum_full[0x1];
|
||||
u8 reserved_at_24[0x5];
|
||||
u8 tunnel_stateless_geneve_tx[0x1];
|
||||
u8 tunnel_stateless_mpls_over_udp[0x1];
|
||||
u8 tunnel_stateless_mpls_over_gre[0x1];
|
||||
u8 tunnel_stateless_vxlan_gpe[0x1];
|
||||
u8 tunnel_stateless_ipv4_over_vxlan[0x1];
|
||||
u8 tunnel_stateless_ip_over_ip[0x1];
|
||||
u8 reserved_at_2a[0x6];
|
||||
u8 max_vxlan_udp_ports[0x8];
|
||||
|
@ -7,6 +7,7 @@
|
||||
# define swab16 __swab16
|
||||
# define swab32 __swab32
|
||||
# define swab64 __swab64
|
||||
# define swab __swab
|
||||
# define swahw32 __swahw32
|
||||
# define swahb32 __swahb32
|
||||
# define swab16p __swab16p
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <asm/swab.h>
|
||||
|
||||
/*
|
||||
@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
|
||||
__fswab64(x))
|
||||
#endif
|
||||
|
||||
static __always_inline unsigned long __swab(const unsigned long y)
|
||||
{
|
||||
#if __BITS_PER_LONG == 64
|
||||
return __swab64(y);
|
||||
#else /* __BITS_PER_LONG == 32 */
|
||||
return __swab32(y);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* __swahw32 - return a word-swapped 32-bit value
|
||||
* @x: value to wordswap
|
||||
|
@ -149,18 +149,6 @@ EXPORT_SYMBOL(find_last_bit);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
||||
/* include/linux/byteorder does not support "unsigned long" type */
|
||||
static inline unsigned long ext2_swab(const unsigned long y)
|
||||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
return (unsigned long) __swab64((u64) y);
|
||||
#elif BITS_PER_LONG == 32
|
||||
return (unsigned long) __swab32((u32) y);
|
||||
#else
|
||||
#error BITS_PER_LONG not defined
|
||||
#endif
|
||||
}
|
||||
|
||||
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
|
||||
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long nbits,
|
||||
@ -177,7 +165,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
|
||||
tmp ^= invert;
|
||||
|
||||
/* Handle 1st word. */
|
||||
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
|
||||
tmp &= swab(BITMAP_FIRST_WORD_MASK(start));
|
||||
start = round_down(start, BITS_PER_LONG);
|
||||
|
||||
while (!tmp) {
|
||||
@ -191,7 +179,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
|
||||
tmp ^= invert;
|
||||
}
|
||||
|
||||
return min(start + __ffs(ext2_swab(tmp)), nbits);
|
||||
return min(start + __ffs(swab(tmp)), nbits);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -261,7 +261,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
|
||||
* freepointer to be restored incorrectly.
|
||||
*/
|
||||
return (void *)((unsigned long)ptr ^ s->random ^
|
||||
(unsigned long)kasan_reset_tag((void *)ptr_addr));
|
||||
swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
|
||||
#else
|
||||
return ptr;
|
||||
#endif
|
||||
|
@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
|
||||
dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
|
||||
if (IS_ERR(dlc))
|
||||
return PTR_ERR(dlc);
|
||||
else if (dlc) {
|
||||
rfcomm_dlc_put(dlc);
|
||||
if (dlc)
|
||||
return -EBUSY;
|
||||
}
|
||||
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
|
||||
if (!dlc)
|
||||
return -ENOMEM;
|
||||
|
@ -3318,6 +3318,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
|
||||
if (netif_is_l3_master(idev->dev))
|
||||
return;
|
||||
|
||||
/* no link local addresses on devices flagged as slaves */
|
||||
if (idev->dev->flags & IFF_SLAVE)
|
||||
return;
|
||||
|
||||
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
|
||||
|
||||
switch (idev->cnf.addr_gen_mode) {
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <net/act_api.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/pkt_cls.h>
|
||||
@ -26,9 +27,12 @@
|
||||
#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
|
||||
|
||||
|
||||
struct tcindex_data;
|
||||
|
||||
struct tcindex_filter_result {
|
||||
struct tcf_exts exts;
|
||||
struct tcf_result res;
|
||||
struct tcindex_data *p;
|
||||
struct rcu_work rwork;
|
||||
};
|
||||
|
||||
@ -49,6 +53,7 @@ struct tcindex_data {
|
||||
u32 hash; /* hash table size; 0 if undefined */
|
||||
u32 alloc_hash; /* allocated size */
|
||||
u32 fall_through; /* 0: only classify if explicit match */
|
||||
refcount_t refcnt; /* a temporary refcnt for perfect hash */
|
||||
struct rcu_work rwork;
|
||||
};
|
||||
|
||||
@ -57,6 +62,20 @@ static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
|
||||
return tcf_exts_has_actions(&r->exts) || r->res.classid;
|
||||
}
|
||||
|
||||
static void tcindex_data_get(struct tcindex_data *p)
|
||||
{
|
||||
refcount_inc(&p->refcnt);
|
||||
}
|
||||
|
||||
static void tcindex_data_put(struct tcindex_data *p)
|
||||
{
|
||||
if (refcount_dec_and_test(&p->refcnt)) {
|
||||
kfree(p->perfect);
|
||||
kfree(p->h);
|
||||
kfree(p);
|
||||
}
|
||||
}
|
||||
|
||||
static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
|
||||
u16 key)
|
||||
{
|
||||
@ -132,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp)
|
||||
p->mask = 0xffff;
|
||||
p->hash = DEFAULT_HASH_SIZE;
|
||||
p->fall_through = 1;
|
||||
refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
|
||||
|
||||
rcu_assign_pointer(tp->root, p);
|
||||
return 0;
|
||||
@ -141,6 +161,7 @@ static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
|
||||
{
|
||||
tcf_exts_destroy(&r->exts);
|
||||
tcf_exts_put_net(&r->exts);
|
||||
tcindex_data_put(r->p);
|
||||
}
|
||||
|
||||
static void tcindex_destroy_rexts_work(struct work_struct *work)
|
||||
@ -212,6 +233,8 @@ static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
|
||||
else
|
||||
__tcindex_destroy_fexts(f);
|
||||
} else {
|
||||
tcindex_data_get(p);
|
||||
|
||||
if (tcf_exts_get_net(&r->exts))
|
||||
tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
|
||||
else
|
||||
@ -228,9 +251,7 @@ static void tcindex_destroy_work(struct work_struct *work)
|
||||
struct tcindex_data,
|
||||
rwork);
|
||||
|
||||
kfree(p->perfect);
|
||||
kfree(p->h);
|
||||
kfree(p);
|
||||
tcindex_data_put(p);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -248,9 +269,11 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
|
||||
};
|
||||
|
||||
static int tcindex_filter_result_init(struct tcindex_filter_result *r,
|
||||
struct tcindex_data *p,
|
||||
struct net *net)
|
||||
{
|
||||
memset(r, 0, sizeof(*r));
|
||||
r->p = p;
|
||||
return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
|
||||
TCA_TCINDEX_POLICE);
|
||||
}
|
||||
@ -290,6 +313,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
|
||||
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
cp->perfect[i].p = cp;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -334,6 +358,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
cp->alloc_hash = p->alloc_hash;
|
||||
cp->fall_through = p->fall_through;
|
||||
cp->tp = tp;
|
||||
refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
|
||||
|
||||
if (tb[TCA_TCINDEX_HASH])
|
||||
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
|
||||
@ -366,7 +391,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
}
|
||||
cp->h = p->h;
|
||||
|
||||
err = tcindex_filter_result_init(&new_filter_result, net);
|
||||
err = tcindex_filter_result_init(&new_filter_result, cp, net);
|
||||
if (err < 0)
|
||||
goto errout_alloc;
|
||||
if (old_r)
|
||||
@ -434,7 +459,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
goto errout_alloc;
|
||||
f->key = handle;
|
||||
f->next = NULL;
|
||||
err = tcindex_filter_result_init(&f->result, net);
|
||||
err = tcindex_filter_result_init(&f->result, cp, net);
|
||||
if (err < 0) {
|
||||
kfree(f);
|
||||
goto errout_alloc;
|
||||
@ -447,7 +472,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
}
|
||||
|
||||
if (old_r && old_r != r) {
|
||||
err = tcindex_filter_result_init(old_r, net);
|
||||
err = tcindex_filter_result_init(old_r, cp, net);
|
||||
if (err < 0) {
|
||||
kfree(f);
|
||||
goto errout_alloc;
|
||||
@ -571,6 +596,14 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
|
||||
for (i = 0; i < p->hash; i++) {
|
||||
struct tcindex_filter_result *r = p->perfect + i;
|
||||
|
||||
/* tcf_queue_work() does not guarantee the ordering we
|
||||
* want, so we have to take this refcnt temporarily to
|
||||
* ensure 'p' is freed after all tcindex_filter_result
|
||||
* here. Imperfect hash does not need this, because it
|
||||
* uses linked lists rather than an array.
|
||||
*/
|
||||
tcindex_data_get(p);
|
||||
|
||||
tcf_unbind_filter(tp, &r->res);
|
||||
if (tcf_exts_get_net(&r->exts))
|
||||
tcf_queue_work(&r->rwork,
|
||||
|
@ -83,7 +83,7 @@
|
||||
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
|
||||
|
||||
#define JZ_AIC_CLK_DIV_MASK 0xf
|
||||
#define I2SDIV_DV_SHIFT 8
|
||||
#define I2SDIV_DV_SHIFT 0
|
||||
#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
|
||||
#define I2SDIV_IDV_SHIFT 8
|
||||
#define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
|
||||
|
@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
|
||||
msg.g.version = 0x1;
|
||||
na = (struct nlattr *) GENLMSG_DATA(&msg);
|
||||
na->nla_type = nla_type;
|
||||
na->nla_len = nla_len + 1 + NLA_HDRLEN;
|
||||
na->nla_len = nla_len + NLA_HDRLEN;
|
||||
memcpy(NLA_DATA(na), nla_data, nla_len);
|
||||
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user