This is the 5.10.84 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmGwZtwACgkQONu9yGCS aT7dQhAAjnlFKkXb+omHKQNkSHbD0ynEkxwtQfNFt1kcWcpJy5Df9xyNXQBohnqr Y0KUowpVF8gkXOelbdMrK5P6k28SpT2k+UMnUtZLNR6qMNlOY371BDasfi/dqWWR 1JdLtQe6JVvwxo+6INRqEO27Ocyc1PbLZSo7i3Ik2+7mIRjN7+k1apFG0HOLEHIP 3oMWDgnyQp3gTBvTFG0Vrd4f9AwrHq4JoVrhruNLqIYajlQ8dPPjuJ9alTifRddD eWY10Z21jAFib4WHgy6wXBVv3L5Np19liYMzv02o5pzFV1nLJCnKDA79jV7a2i2H lVmVpcWG0Yagyu8MW0hmOewqPXpAJH/C8g75mXeja546vCnvccNx1OXNOR4ux5Es IpEFAV+DnjSYgu88Cw6kF8j/B9x1n90sgywCWbRwAMJ1zX9/tvvLWSe8HpfZ2jvo Iuw6XDTL84DDuHY4yiK2fofxZvXp+Hk+c0Betu6GoQvoGaDRD8IWIceDWgiqy+V7 fOrLitl8lbk1yjD7bDZMpEIgzQaaxJu6d+YWzy+PibZxQzOKHPC5gqEmajJd7ZWm OJ48SrNxyfjRZP/3NBgXOxje3lz3WkCdiPQrSQOQxoe+kdW5ZFuXDapWSO4dZfSe 6XPOD/d+KVLNDQby3WnVB2MMlufHFnCs4wPgb13jfyiEbxifp+A= =D40k -----END PGP SIGNATURE----- Merge 5.10.84 into android12-5.10-lts Changes in 5.10.84 NFSv42: Fix pagecache invalidation after COPY/CLONE can: j1939: j1939_tp_cmd_recv(): check the dst address of TP.CM_BAM ovl: simplify file splice ovl: fix deadlock in splice write gfs2: release iopen glock early in evict gfs2: Fix length of holes reported at end-of-file powerpc/pseries/ddw: Revert "Extend upper limit for huge DMA window for persistent memory" drm/sun4i: fix unmet dependency on RESET_CONTROLLER for PHY_SUN6I_MIPI_DPHY mac80211: do not access the IV when it was stripped net/smc: Transfer remaining wait queue entries during fallback atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait net: return correct error code platform/x86: thinkpad_acpi: Add support for dual fan control platform/x86: thinkpad_acpi: Fix WWAN device disabled issue after S3 deep s390/setup: avoid using memblock_enforce_memory_limit btrfs: check-integrity: fix a warning on write caching disabled disk thermal: core: Reset previous low and high trip during thermal zone init scsi: iscsi: Unblock session then wake up error handler drm/amd/amdkfd: Fix kernel panic when reset failed and been triggered again drm/amd/amdgpu: fix potential memleak ata: ahci: Add Green Sardine vendor ID as board_ahci_mobile ethernet: hisilicon: hns: hns_dsaf_misc: fix a possible array overflow in hns_dsaf_ge_srst_by_port() ipv6: check return value of ipv6_skip_exthdr net: tulip: de4x5: fix the problem that the array 'lp->phy[8]' may be out of bound net: ethernet: dec: tulip: de4x5: fix possible array overflows in type3_infoblock() perf inject: Fix ARM SPE handling perf hist: Fix memory leak of a perf_hpp_fmt perf report: Fix memory leaks around perf_tip() net/smc: Avoid warning of possible recursive locking ACPI: Add stubs for wakeup handler functions vrf: Reset IPCB/IP6CB when processing outbound pkts in vrf dev xmit kprobes: Limit max data_size of the kretprobe instances rt2x00: do not mark device gone on EPROTO errors during start ipmi: Move remove_work to dedicated workqueue cpufreq: Fix get_cpu_device() failure in add_cpu_dev_symlink() s390/pci: move pseudo-MMIO to prevent MIO overlap fget: check that the fd still exists after getting a ref to it sata_fsl: fix UAF in sata_fsl_port_stop when rmmod sata_fsl sata_fsl: fix warning in remove_proc_entry when rmmod sata_fsl ipv6: fix memory leak in fib6_rule_suppress drm/amd/display: Allow DSC on supported MST branch devices KVM: Disallow user memslot with size that exceeds "unsigned long" KVM: nVMX: Flush current VPID (L1 vs. L2) for KVM_REQ_TLB_FLUSH_GUEST KVM: x86: Use a stable condition around all VT-d PI paths KVM: arm64: Avoid setting the upper 32 bits of TCR_EL2 and CPTR_EL2 to 1 KVM: X86: Use vcpu->arch.walk_mmu for kvm_mmu_invlpg() tracing/histograms: String compares should not care about signed values wireguard: selftests: increase default dmesg log size wireguard: allowedips: add missing __rcu annotation to satisfy sparse wireguard: selftests: actually test for routing loops wireguard: selftests: rename DEBUG_PI_LIST to DEBUG_PLIST wireguard: device: reset peer src endpoint when netns exits wireguard: receive: use ring buffer for incoming handshakes wireguard: receive: drop handshakes if queue lock is contended wireguard: ratelimiter: use kvcalloc() instead of kvzalloc() i2c: stm32f7: flush TX FIFO upon transfer errors i2c: stm32f7: recover the bus on access timeout i2c: stm32f7: stop dma transfer in case of NACK i2c: cbus-gpio: set atomic transfer callback natsemi: xtensa: fix section mismatch warnings tcp: fix page frag corruption on page fault net: qlogic: qlcnic: Fix a NULL pointer dereference in qlcnic_83xx_add_rings() net: mpls: Fix notifications when deleting a device siphash: use _unaligned version by default arm64: ftrace: add missing BTIs net/mlx4_en: Fix an use-after-free bug in mlx4_en_try_alloc_resources() selftests: net: Correct case name mt76: mt7915: fix NULL pointer dereference in mt7915_get_phy_mode ASoC: tegra: Fix wrong value type in ADMAIF ASoC: tegra: Fix wrong value type in I2S ASoC: tegra: Fix wrong value type in DMIC ASoC: tegra: Fix wrong value type in DSPK ASoC: tegra: Fix kcontrol put callback in ADMAIF ASoC: tegra: Fix kcontrol put callback in I2S ASoC: tegra: Fix kcontrol put callback in DMIC ASoC: tegra: Fix kcontrol put callback in DSPK ASoC: tegra: Fix kcontrol put callback in AHUB rxrpc: Fix rxrpc_peer leak in rxrpc_look_up_bundle() rxrpc: Fix rxrpc_local leak in rxrpc_lookup_peer() ALSA: intel-dsp-config: add quirk for CML devices based on ES8336 codec net: usb: lan78xx: lan78xx_phy_init(): use PHY_POLL instead of "0" if no IRQ is available net: marvell: mvpp2: Fix the computation of shared CPUs dpaa2-eth: destroy workqueue at the end of remove function net: annotate data-races on txq->xmit_lock_owner ipv4: convert fib_num_tclassid_users to atomic_t net/smc: fix wrong list_del in smc_lgr_cleanup_early net/rds: correct socket tunable error in rds_tcp_tune() net/smc: Keep smc_close_final rc during active close drm/msm/a6xx: Allocate enough space for GMU registers drm/msm: Do hw_init() before capturing GPU state atlantic: Increase delay for fw transactions atlatnic: enable Nbase-t speeds with base-t atlantic: Fix to display FW bundle version instead of FW mac version. atlantic: Add missing DIDs and fix 115c. Remove Half duplex mode speed capabilities. atlantic: Fix statistics logic for production hardware atlantic: Remove warn trace message. KVM: x86/pmu: Fix reserved bits for AMD PerfEvtSeln register KVM: VMX: Set failure code in prepare_vmcs02() x86/sev: Fix SEV-ES INS/OUTS instructions for word, dword, and qword x86/entry: Use the correct fence macro after swapgs in kernel CR3 x86/xen: Add xenpv_restore_regs_and_return_to_usermode() sched/uclamp: Fix rq->uclamp_max not set on first enqueue x86/pv: Switch SWAPGS to ALTERNATIVE x86/entry: Add a fence for kernel entry SWAPGS in paranoid_entry() parisc: Fix KBUILD_IMAGE for self-extracting kernel parisc: Fix "make install" on newer debian releases vgacon: Propagate console boot parameters before calling `vc_resize' xhci: Fix commad ring abort, write all 64 bits to CRCR register. USB: NO_LPM quirk Lenovo Powered USB-C Travel Hub usb: typec: tcpm: Wait in SNK_DEBOUNCED until disconnect x86/tsc: Add a timer to make sure TSC_adjust is always checked x86/tsc: Disable clocksource watchdog for TSC on qualified platorms x86/64/mm: Map all kernel memory into trampoline_pgd tty: serial: msm_serial: Deactivate RX DMA for polling support serial: pl011: Add ACPI SBSA UART match id serial: tegra: Change lower tolerance baud rate limit for tegra20 and tegra30 serial: core: fix transmit-buffer reset and memleak serial: 8250_pci: Fix ACCES entries in pci_serial_quirks array serial: 8250_pci: rewrite pericom_do_set_divisor() serial: 8250: Fix RTS modem control while in rs485 mode iwlwifi: mvm: retry init flow if failed parisc: Mark cr16 CPU clocksource unstable on all SMP machines net/tls: Fix authentication failure in CCM mode ipmi: msghandler: Make symbol 'remove_work_wq' static Linux 5.10.84 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Iad592da28c6425dea7dca35b229d14c44edb412d
This commit is contained in:
commit
1b71a028a2
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 83
|
||||
SUBLEVEL = 84
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -84,7 +84,7 @@
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
||||
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
|
||||
#define TCR_EL2_TBI (1 << 20)
|
||||
#define TCR_EL2_PS_SHIFT 16
|
||||
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
|
||||
@ -269,7 +269,7 @@
|
||||
#define CPTR_EL2_TFP_SHIFT 10
|
||||
|
||||
/* Hyp Coprocessor Trap Register */
|
||||
#define CPTR_EL2_TCPAC (1 << 31)
|
||||
#define CPTR_EL2_TCPAC (1U << 31)
|
||||
#define CPTR_EL2_TAM (1 << 30)
|
||||
#define CPTR_EL2_TTA (1 << 20)
|
||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||
|
@ -77,11 +77,17 @@
|
||||
.endm
|
||||
|
||||
SYM_CODE_START(ftrace_regs_caller)
|
||||
#ifdef BTI_C
|
||||
BTI_C
|
||||
#endif
|
||||
ftrace_regs_entry 1
|
||||
b ftrace_common
|
||||
SYM_CODE_END(ftrace_regs_caller)
|
||||
|
||||
SYM_CODE_START(ftrace_caller)
|
||||
#ifdef BTI_C
|
||||
BTI_C
|
||||
#endif
|
||||
ftrace_regs_entry 0
|
||||
b ftrace_common
|
||||
SYM_CODE_END(ftrace_caller)
|
||||
|
@ -17,7 +17,12 @@
|
||||
# Mike Shaver, Helge Deller and Martin K. Petersen
|
||||
#
|
||||
|
||||
ifdef CONFIG_PARISC_SELF_EXTRACT
|
||||
boot := arch/parisc/boot
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
else
|
||||
KBUILD_IMAGE := vmlinuz
|
||||
endif
|
||||
|
||||
NM = sh $(srctree)/arch/parisc/nm
|
||||
CHECKFLAGS += -D__hppa__=1
|
||||
|
@ -39,6 +39,7 @@ verify "$3"
|
||||
if [ -n "${INSTALLKERNEL}" ]; then
|
||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
|
||||
fi
|
||||
|
||||
# Default install
|
||||
|
@ -252,27 +252,13 @@ void __init time_init(void)
|
||||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs on
|
||||
* different sockets, so mark them unstable and lower rating on
|
||||
* multi-socket SMP systems.
|
||||
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||
* they share the same socket.
|
||||
*/
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
int cpu;
|
||||
unsigned long cpu0_loc;
|
||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
continue;
|
||||
if ((cpu0_loc != 0) &&
|
||||
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
||||
continue;
|
||||
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
break;
|
||||
}
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
}
|
||||
|
||||
/* XXX: We may want to mark sched_clock stable here if cr16 clocks are
|
||||
|
@ -1034,15 +1034,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
|
||||
phys_addr_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
|
||||
/*
|
||||
* The "ibm,pmemory" can appear anywhere in the address space.
|
||||
* Assuming it is still backed by page structs, set the upper limit
|
||||
* for the huge DMA window as MAX_PHYSMEM_BITS.
|
||||
*/
|
||||
if (of_find_node_by_type(NULL, "ibm,pmemory"))
|
||||
return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
|
||||
(phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
unsigned long start, size;
|
||||
int n_mem_addr_cells, n_mem_size_cells, len;
|
||||
|
@ -14,12 +14,13 @@
|
||||
|
||||
/* I/O Map */
|
||||
#define ZPCI_IOMAP_SHIFT 48
|
||||
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
|
||||
#define ZPCI_IOMAP_ADDR_SHIFT 62
|
||||
#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
|
||||
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
|
||||
#define ZPCI_IOMAP_MAX_ENTRIES \
|
||||
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
|
||||
(1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
|
||||
#define ZPCI_IOMAP_ADDR_IDX_MASK \
|
||||
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
|
||||
((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
|
||||
|
||||
struct zpci_iomap_entry {
|
||||
u32 fh;
|
||||
|
@ -845,9 +845,6 @@ static void __init setup_memory(void)
|
||||
storage_key_init_range(start, end);
|
||||
|
||||
psw_set_key(PAGE_DEFAULT_KEY);
|
||||
|
||||
/* Only cosmetics */
|
||||
memblock_enforce_memory_limit(memblock_end_of_DRAM());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -575,6 +575,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
||||
ud2
|
||||
1:
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_PV
|
||||
ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
|
||||
#endif
|
||||
|
||||
POP_REGS pop_rdi=0
|
||||
|
||||
/*
|
||||
@ -669,7 +673,7 @@ native_irq_return_ldt:
|
||||
*/
|
||||
|
||||
pushq %rdi /* Stash user RDI */
|
||||
SWAPGS /* to kernel GS */
|
||||
swapgs /* to kernel GS */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
|
||||
|
||||
movq PER_CPU_VAR(espfix_waddr), %rdi
|
||||
@ -699,7 +703,7 @@ native_irq_return_ldt:
|
||||
orq PER_CPU_VAR(espfix_stack), %rax
|
||||
|
||||
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
|
||||
SWAPGS /* to user GS */
|
||||
swapgs /* to user GS */
|
||||
popq %rdi /* Restore user RDI */
|
||||
|
||||
movq %rax, %rsp
|
||||
@ -932,6 +936,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
.Lparanoid_entry_checkgs:
|
||||
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
||||
movl $1, %ebx
|
||||
|
||||
/*
|
||||
* The kernel-enforced convention is a negative GSBASE indicates
|
||||
* a kernel value. No SWAPGS needed on entry and exit.
|
||||
@ -939,21 +944,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
movl $MSR_GS_BASE, %ecx
|
||||
rdmsr
|
||||
testl %edx, %edx
|
||||
jns .Lparanoid_entry_swapgs
|
||||
ret
|
||||
|
||||
.Lparanoid_entry_swapgs:
|
||||
SWAPGS
|
||||
|
||||
/*
|
||||
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
|
||||
* unconditional CR3 write, even in the PTI case. So do an lfence
|
||||
* to prevent GS speculation, regardless of whether PTI is enabled.
|
||||
*/
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
js .Lparanoid_kernel_gsbase
|
||||
|
||||
/* EBX = 0 -> SWAPGS required on exit */
|
||||
xorl %ebx, %ebx
|
||||
swapgs
|
||||
.Lparanoid_kernel_gsbase:
|
||||
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
ret
|
||||
SYM_CODE_END(paranoid_entry)
|
||||
|
||||
@ -1001,7 +999,7 @@ SYM_CODE_START_LOCAL(paranoid_exit)
|
||||
jnz restore_regs_and_return_to_kernel
|
||||
|
||||
/* We are returning to a context with user GSBASE */
|
||||
SWAPGS_UNSAFE_STACK
|
||||
swapgs
|
||||
jmp restore_regs_and_return_to_kernel
|
||||
SYM_CODE_END(paranoid_exit)
|
||||
|
||||
@ -1035,11 +1033,6 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
pushq %r12
|
||||
ret
|
||||
|
||||
.Lerror_entry_done_lfence:
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
.Lerror_entry_done:
|
||||
ret
|
||||
|
||||
/*
|
||||
* There are two places in the kernel that can potentially fault with
|
||||
* usergs. Handle them here. B stepping K8s sometimes report a
|
||||
@ -1062,8 +1055,14 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
* .Lgs_change's error handler with kernel gsbase.
|
||||
*/
|
||||
SWAPGS
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
jmp .Lerror_entry_done
|
||||
|
||||
/*
|
||||
* Issue an LFENCE to prevent GS speculation, regardless of whether it is a
|
||||
* kernel or user gsbase.
|
||||
*/
|
||||
.Lerror_entry_done_lfence:
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
ret
|
||||
|
||||
.Lbstep_iret:
|
||||
/* Fix truncated RIP */
|
||||
@ -1426,7 +1425,7 @@ nmi_no_fsgsbase:
|
||||
jnz nmi_restore
|
||||
|
||||
nmi_swapgs:
|
||||
SWAPGS_UNSAFE_STACK
|
||||
swapgs
|
||||
|
||||
nmi_restore:
|
||||
POP_REGS
|
||||
|
@ -131,18 +131,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
|
||||
#define SAVE_FLAGS(x) pushfq; popq %rax
|
||||
#endif
|
||||
|
||||
#define SWAPGS swapgs
|
||||
/*
|
||||
* Currently paravirt can't handle swapgs nicely when we
|
||||
* don't have a stack we can rely on (such as a user space
|
||||
* stack). So we either find a way around these or just fault
|
||||
* and emulate if a guest tries to call swapgs directly.
|
||||
*
|
||||
* Either way, this is a good way to document that we don't
|
||||
* have a reliable stack. x86_64 only.
|
||||
*/
|
||||
#define SWAPGS_UNSAFE_STACK swapgs
|
||||
|
||||
#define INTERRUPT_RETURN jmp native_iret
|
||||
#define USERGS_SYSRET64 \
|
||||
swapgs; \
|
||||
@ -170,6 +158,14 @@ static __always_inline int arch_irqs_disabled(void)
|
||||
|
||||
return arch_irqs_disabled_flags(flags);
|
||||
}
|
||||
#else
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
|
||||
#else
|
||||
#define SWAPGS swapgs
|
||||
#endif
|
||||
#endif
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
@ -776,26 +776,6 @@ extern void default_banner(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
/*
|
||||
* If swapgs is used while the userspace stack is still current,
|
||||
* there's no way to call a pvop. The PV replacement *must* be
|
||||
* inlined, or the swapgs instruction must be trapped and emulated.
|
||||
*/
|
||||
#define SWAPGS_UNSAFE_STACK \
|
||||
PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
|
||||
|
||||
/*
|
||||
* Note: swapgs is very special, and in practise is either going to be
|
||||
* implemented with a single "swapgs" instruction or something very
|
||||
* special. Either way, we don't need to save any registers for
|
||||
* it.
|
||||
*/
|
||||
#define SWAPGS \
|
||||
PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
|
||||
ANNOTATE_RETPOLINE_SAFE; \
|
||||
call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
|
||||
)
|
||||
|
||||
#define USERGS_SYSRET64 \
|
||||
PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
|
||||
ANNOTATE_RETPOLINE_SAFE; \
|
||||
|
@ -169,8 +169,6 @@ struct pv_cpu_ops {
|
||||
frame set up. */
|
||||
void (*iret)(void);
|
||||
|
||||
void (*swapgs)(void);
|
||||
|
||||
void (*start_context_switch)(struct task_struct *prev);
|
||||
void (*end_context_switch)(struct task_struct *next);
|
||||
#endif
|
||||
|
@ -15,7 +15,6 @@ int main(void)
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
|
||||
cpu.usergs_sysret64);
|
||||
OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs);
|
||||
#ifdef CONFIG_DEBUG_ENTRY
|
||||
OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
|
||||
#endif
|
||||
|
@ -312,7 +312,6 @@ struct paravirt_patch_template pv_ops = {
|
||||
|
||||
.cpu.usergs_sysret64 = native_usergs_sysret64,
|
||||
.cpu.iret = native_iret,
|
||||
.cpu.swapgs = native_swapgs,
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
|
||||
|
@ -28,7 +28,6 @@ struct patch_xxl {
|
||||
const unsigned char irq_restore_fl[2];
|
||||
const unsigned char cpu_wbinvd[2];
|
||||
const unsigned char cpu_usergs_sysret64[6];
|
||||
const unsigned char cpu_swapgs[3];
|
||||
const unsigned char mov64[3];
|
||||
};
|
||||
|
||||
@ -43,7 +42,6 @@ static const struct patch_xxl patch_data_xxl = {
|
||||
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
|
||||
.cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
|
||||
0x48, 0x0f, 0x07 }, // swapgs; sysretq
|
||||
.cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
|
||||
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
|
||||
};
|
||||
|
||||
@ -86,7 +84,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
|
||||
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
|
||||
|
||||
PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
|
||||
PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
|
||||
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
|
||||
#endif
|
||||
|
||||
|
@ -260,11 +260,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||
char *dst, char *buf, size_t size)
|
||||
{
|
||||
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
|
||||
char __user *target = (char __user *)dst;
|
||||
u64 d8;
|
||||
u32 d4;
|
||||
u16 d2;
|
||||
u8 d1;
|
||||
|
||||
/*
|
||||
* This function uses __put_user() independent of whether kernel or user
|
||||
@ -286,26 +281,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||
* instructions here would cause infinite nesting.
|
||||
*/
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 1: {
|
||||
u8 d1;
|
||||
u8 __user *target = (u8 __user *)dst;
|
||||
|
||||
memcpy(&d1, buf, 1);
|
||||
if (__put_user(d1, target))
|
||||
goto fault;
|
||||
break;
|
||||
case 2:
|
||||
}
|
||||
case 2: {
|
||||
u16 d2;
|
||||
u16 __user *target = (u16 __user *)dst;
|
||||
|
||||
memcpy(&d2, buf, 2);
|
||||
if (__put_user(d2, target))
|
||||
goto fault;
|
||||
break;
|
||||
case 4:
|
||||
}
|
||||
case 4: {
|
||||
u32 d4;
|
||||
u32 __user *target = (u32 __user *)dst;
|
||||
|
||||
memcpy(&d4, buf, 4);
|
||||
if (__put_user(d4, target))
|
||||
goto fault;
|
||||
break;
|
||||
case 8:
|
||||
}
|
||||
case 8: {
|
||||
u64 d8;
|
||||
u64 __user *target = (u64 __user *)dst;
|
||||
|
||||
memcpy(&d8, buf, 8);
|
||||
if (__put_user(d8, target))
|
||||
goto fault;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
||||
return ES_UNSUPPORTED;
|
||||
@ -328,11 +339,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
||||
char *src, char *buf, size_t size)
|
||||
{
|
||||
unsigned long error_code = X86_PF_PROT;
|
||||
char __user *s = (char __user *)src;
|
||||
u64 d8;
|
||||
u32 d4;
|
||||
u16 d2;
|
||||
u8 d1;
|
||||
|
||||
/*
|
||||
* This function uses __get_user() independent of whether kernel or user
|
||||
@ -354,26 +360,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
||||
* instructions here would cause infinite nesting.
|
||||
*/
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 1: {
|
||||
u8 d1;
|
||||
u8 __user *s = (u8 __user *)src;
|
||||
|
||||
if (__get_user(d1, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d1, 1);
|
||||
break;
|
||||
case 2:
|
||||
}
|
||||
case 2: {
|
||||
u16 d2;
|
||||
u16 __user *s = (u16 __user *)src;
|
||||
|
||||
if (__get_user(d2, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d2, 2);
|
||||
break;
|
||||
case 4:
|
||||
}
|
||||
case 4: {
|
||||
u32 d4;
|
||||
u32 __user *s = (u32 __user *)src;
|
||||
|
||||
if (__get_user(d4, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d4, 4);
|
||||
break;
|
||||
case 8:
|
||||
}
|
||||
case 8: {
|
||||
u64 d8;
|
||||
u64 __user *s = (u64 __user *)src;
|
||||
if (__get_user(d8, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d8, 8);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
||||
return ES_UNSUPPORTED;
|
||||
|
@ -1178,6 +1178,12 @@ void mark_tsc_unstable(char *reason)
|
||||
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
|
||||
static void __init tsc_disable_clocksource_watchdog(void)
|
||||
{
|
||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
}
|
||||
|
||||
static void __init check_system_tsc_reliable(void)
|
||||
{
|
||||
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
||||
@ -1194,6 +1200,23 @@ static void __init check_system_tsc_reliable(void)
|
||||
#endif
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
||||
tsc_clocksource_reliable = 1;
|
||||
|
||||
/*
|
||||
* Disable the clocksource watchdog when the system has:
|
||||
* - TSC running at constant frequency
|
||||
* - TSC which does not stop in C-States
|
||||
* - the TSC_ADJUST register which allows to detect even minimal
|
||||
* modifications
|
||||
* - not more than two sockets. As the number of sockets cannot be
|
||||
* evaluated at the early boot stage where this has to be
|
||||
* invoked, check the number of online memory nodes as a
|
||||
* fallback solution which is an reasonable estimate.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
|
||||
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
|
||||
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
|
||||
nr_online_nodes <= 2)
|
||||
tsc_disable_clocksource_watchdog();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1385,9 +1408,6 @@ static int __init init_tsc_clocksource(void)
|
||||
if (tsc_unstable)
|
||||
goto unreg;
|
||||
|
||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
||||
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||
|
||||
@ -1525,7 +1545,7 @@ void __init tsc_init(void)
|
||||
}
|
||||
|
||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
tsc_disable_clocksource_watchdog();
|
||||
|
||||
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
||||
detect_art();
|
||||
|
@ -30,6 +30,7 @@ struct tsc_adjust {
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
||||
static struct timer_list tsc_sync_check_timer;
|
||||
|
||||
/*
|
||||
* TSC's on different sockets may be reset asynchronously.
|
||||
@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally the tsc_sync will be checked every time system enters idle
|
||||
* state, but there is still caveat that a system won't enter idle,
|
||||
* either because it's too busy or configured purposely to not enter
|
||||
* idle.
|
||||
*
|
||||
* So setup a periodic timer (every 10 minutes) to make sure the check
|
||||
* is always on.
|
||||
*/
|
||||
|
||||
#define SYNC_CHECK_INTERVAL (HZ * 600)
|
||||
|
||||
static void tsc_sync_check_timer_fn(struct timer_list *unused)
|
||||
{
|
||||
int next_cpu;
|
||||
|
||||
tsc_verify_tsc_adjust(false);
|
||||
|
||||
/* Run the check for all onlined CPUs in turn */
|
||||
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
|
||||
if (next_cpu >= nr_cpu_ids)
|
||||
next_cpu = cpumask_first(cpu_online_mask);
|
||||
|
||||
tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
|
||||
add_timer_on(&tsc_sync_check_timer, next_cpu);
|
||||
}
|
||||
|
||||
static int __init start_sync_check_timer(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
|
||||
return 0;
|
||||
|
||||
timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
|
||||
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
|
||||
add_timer(&tsc_sync_check_timer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(start_sync_check_timer);
|
||||
|
||||
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
|
||||
unsigned int cpu, bool bootcpu)
|
||||
{
|
||||
|
@ -5152,7 +5152,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
|
||||
|
||||
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
||||
{
|
||||
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
|
||||
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
|
||||
++vcpu->stat.invlpg;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
||||
|
@ -274,7 +274,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
|
||||
|
||||
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
|
||||
pmu->reserved_bits = 0xffffffff00200000ull;
|
||||
pmu->reserved_bits = 0xfffffff000280000ull;
|
||||
pmu->version = 1;
|
||||
/* not applicable to AMD; but clean them to prevent any fall out */
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
||||
|
@ -2619,8 +2619,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
||||
|
||||
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
|
||||
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
|
||||
vmcs12->guest_ia32_perf_global_ctrl)))
|
||||
vmcs12->guest_ia32_perf_global_ctrl))) {
|
||||
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
|
||||
kvm_rip_write(vcpu, vmcs12->guest_rip);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <asm/cpu.h>
|
||||
|
||||
#include "lapic.h"
|
||||
#include "irq.h"
|
||||
#include "posted_intr.h"
|
||||
#include "trace.h"
|
||||
#include "vmx.h"
|
||||
@ -77,13 +78,18 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
pi_set_on(pi_desc);
|
||||
}
|
||||
|
||||
static bool vmx_can_use_vtd_pi(struct kvm *kvm)
|
||||
{
|
||||
return irqchip_in_kernel(kvm) && enable_apicv &&
|
||||
kvm_arch_has_assigned_device(kvm) &&
|
||||
irq_remapping_cap(IRQ_POSTING_CAP);
|
||||
}
|
||||
|
||||
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
||||
|
||||
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
|
||||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
|
||||
!kvm_vcpu_apicv_active(vcpu))
|
||||
if (!vmx_can_use_vtd_pi(vcpu->kvm))
|
||||
return;
|
||||
|
||||
/* Set SN when the vCPU is preempted */
|
||||
@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
|
||||
struct pi_desc old, new;
|
||||
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
||||
|
||||
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
|
||||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
|
||||
!kvm_vcpu_apicv_active(vcpu))
|
||||
if (!vmx_can_use_vtd_pi(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
@ -256,9 +260,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
|
||||
struct vcpu_data vcpu_info;
|
||||
int idx, ret = 0;
|
||||
|
||||
if (!kvm_arch_has_assigned_device(kvm) ||
|
||||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
|
||||
!kvm_vcpu_apicv_active(kvm->vcpus[0]))
|
||||
if (!vmx_can_use_vtd_pi(kvm))
|
||||
return 0;
|
||||
|
||||
idx = srcu_read_lock(&kvm->irq_srcu);
|
||||
|
@ -2908,6 +2908,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (is_guest_mode(vcpu))
|
||||
return nested_get_vpid02(vcpu);
|
||||
return to_vmx(vcpu)->vpid;
|
||||
}
|
||||
|
||||
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
@ -2920,31 +2927,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
|
||||
if (enable_ept)
|
||||
ept_sync_context(construct_eptp(vcpu, root_hpa,
|
||||
mmu->shadow_root_level));
|
||||
else if (!is_guest_mode(vcpu))
|
||||
vpid_sync_context(to_vmx(vcpu)->vpid);
|
||||
else
|
||||
vpid_sync_context(nested_get_vpid02(vcpu));
|
||||
vpid_sync_context(vmx_get_current_vpid(vcpu));
|
||||
}
|
||||
|
||||
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
|
||||
{
|
||||
/*
|
||||
* vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
|
||||
* vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
|
||||
* vmx_flush_tlb_guest() for an explanation of why this is ok.
|
||||
*/
|
||||
vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
|
||||
vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
|
||||
}
|
||||
|
||||
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
|
||||
* or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
|
||||
* are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
|
||||
* vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
|
||||
* vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
|
||||
* required to flush GVA->{G,H}PA mappings from the TLB if vpid is
|
||||
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
|
||||
* i.e. no explicit INVVPID is necessary.
|
||||
*/
|
||||
vpid_sync_context(to_vmx(vcpu)->vpid);
|
||||
vpid_sync_context(vmx_get_current_vpid(vcpu));
|
||||
}
|
||||
|
||||
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
|
||||
|
@ -70,6 +70,7 @@ static void __init setup_real_mode(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 *trampoline_pgd;
|
||||
u64 efer;
|
||||
int i;
|
||||
#endif
|
||||
|
||||
base = (unsigned char *)real_mode_header;
|
||||
@ -126,8 +127,17 @@ static void __init setup_real_mode(void)
|
||||
trampoline_header->flags = 0;
|
||||
|
||||
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
||||
|
||||
/* Map the real mode stub as virtual == physical */
|
||||
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
|
||||
trampoline_pgd[511] = init_top_pgt[511].pgd;
|
||||
|
||||
/*
|
||||
* Include the entirety of the kernel mapping into the trampoline
|
||||
* PGD. This way, all mappings present in the normal kernel page
|
||||
* tables are usable while running on trampoline_pgd.
|
||||
*/
|
||||
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
|
||||
trampoline_pgd[i] = init_top_pgt[i].pgd;
|
||||
#endif
|
||||
|
||||
sme_sev_setup_real_mode(trampoline_header);
|
||||
|
@ -1083,9 +1083,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
||||
#endif
|
||||
.io_delay = xen_io_delay,
|
||||
|
||||
/* Xen takes care of %gs when switching to usermode for us */
|
||||
.swapgs = paravirt_nop,
|
||||
|
||||
.start_context_switch = paravirt_start_context_switch,
|
||||
.end_context_switch = xen_end_context_switch,
|
||||
};
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <../entry/calling.h>
|
||||
|
||||
/*
|
||||
* Enable events. This clears the event mask and tests the pending
|
||||
@ -235,6 +236,25 @@ SYM_CODE_START(xen_sysret64)
|
||||
jmp hypercall_iret
|
||||
SYM_CODE_END(xen_sysret64)
|
||||
|
||||
/*
|
||||
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
|
||||
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
|
||||
* in XEN pv would cause %rsp to move up to the top of the kernel stack and
|
||||
* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
|
||||
* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
|
||||
* frame at the same address is useless.
|
||||
*/
|
||||
SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
|
||||
UNWIND_HINT_REGS
|
||||
POP_REGS
|
||||
|
||||
/* stackleak_erase() can work safely on the kernel stack. */
|
||||
STACKLEAK_ERASE_NOCLOBBER
|
||||
|
||||
addq $8, %rsp /* skip regs->orig_ax */
|
||||
jmp xen_iret
|
||||
SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
|
||||
|
||||
/*
|
||||
* Xen handles syscall callbacks much like ordinary exceptions, which
|
||||
* means we have:
|
||||
|
@ -442,6 +442,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
/* AMD */
|
||||
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
|
||||
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
|
||||
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
|
||||
/* AMD is using RAID class only for ahci controllers */
|
||||
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
|
||||
|
@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sata_fsl_host_stop(struct ata_host *host)
|
||||
{
|
||||
struct sata_fsl_host_priv *host_priv = host->private_data;
|
||||
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* scsi mid-layer and libata interface structures
|
||||
*/
|
||||
@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
|
||||
.port_start = sata_fsl_port_start,
|
||||
.port_stop = sata_fsl_port_stop,
|
||||
|
||||
.host_stop = sata_fsl_host_stop,
|
||||
|
||||
.pmp_attach = sata_fsl_pmp_attach,
|
||||
.pmp_detach = sata_fsl_pmp_detach,
|
||||
};
|
||||
@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
||||
host_priv->ssr_base = ssr_base;
|
||||
host_priv->csr_base = csr_base;
|
||||
|
||||
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
||||
if (!irq) {
|
||||
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
||||
irq = platform_get_irq(ofdev, 0);
|
||||
if (irq < 0) {
|
||||
retval = irq;
|
||||
goto error_exit_with_cleanup;
|
||||
}
|
||||
host_priv->irq = irq;
|
||||
@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
|
||||
|
||||
ata_host_detach(host);
|
||||
|
||||
irq_dispose_mapping(host_priv->irq);
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -203,6 +203,8 @@ struct ipmi_user {
|
||||
struct work_struct remove_work;
|
||||
};
|
||||
|
||||
static struct workqueue_struct *remove_work_wq;
|
||||
|
||||
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
||||
__acquires(user->release_barrier)
|
||||
{
|
||||
@ -1272,7 +1274,7 @@ static void free_user(struct kref *ref)
|
||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||
|
||||
/* SRCU cleanup must happen in task context. */
|
||||
schedule_work(&user->remove_work);
|
||||
queue_work(remove_work_wq, &user->remove_work);
|
||||
}
|
||||
|
||||
static void _ipmi_destroy_user(struct ipmi_user *user)
|
||||
@ -5166,6 +5168,13 @@ static int ipmi_init_msghandler(void)
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||
|
||||
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
|
||||
if (!remove_work_wq) {
|
||||
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
|
||||
rv = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
initialized = true;
|
||||
|
||||
out:
|
||||
@ -5191,6 +5200,8 @@ static void __exit cleanup_ipmi(void)
|
||||
int count;
|
||||
|
||||
if (initialized) {
|
||||
destroy_workqueue(remove_work_wq);
|
||||
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&panic_block);
|
||||
|
||||
|
@ -1019,10 +1019,9 @@ static struct kobj_type ktype_cpufreq = {
|
||||
.release = cpufreq_sysfs_release,
|
||||
};
|
||||
|
||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
|
||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
if (unlikely(!dev))
|
||||
return;
|
||||
|
||||
@ -1406,7 +1405,7 @@ static int cpufreq_online(unsigned int cpu)
|
||||
if (new_policy) {
|
||||
for_each_cpu(j, policy->related_cpus) {
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
add_cpu_dev_symlink(policy, j);
|
||||
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
|
||||
}
|
||||
|
||||
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
|
||||
@ -1569,7 +1568,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
/* Create sysfs link on CPU registration */
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
if (policy)
|
||||
add_cpu_dev_symlink(policy, cpu);
|
||||
add_cpu_dev_symlink(policy, cpu, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -358,6 +358,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
|
||||
"%s", "xgmi_hive_info");
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
|
||||
kobject_put(&hive->kobj);
|
||||
kfree(hive);
|
||||
hive = NULL;
|
||||
goto pro_end;
|
||||
|
@ -1207,6 +1207,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
||||
bool hanging;
|
||||
|
||||
dqm_lock(dqm);
|
||||
if (!dqm->sched_running) {
|
||||
dqm_unlock(dqm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dqm->is_hws_hang)
|
||||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
||||
hanging = dqm->is_hws_hang || dqm->is_resetting;
|
||||
|
@ -36,6 +36,8 @@
|
||||
#include "dm_helpers.h"
|
||||
|
||||
#include "dc_link_ddc.h"
|
||||
#include "ddc_service_types.h"
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
@ -152,6 +154,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static bool needs_dsc_aux_workaround(struct dc_link *link)
|
||||
{
|
||||
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||
(link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
|
||||
link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink *dc_sink = aconnector->dc_sink;
|
||||
@ -159,7 +171,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
||||
u8 dsc_caps[16] = { 0 };
|
||||
|
||||
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
|
||||
#if defined(CONFIG_HP_HOOK_WORKAROUND)
|
||||
|
||||
/*
|
||||
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
|
||||
* because it only check the dsc/fec caps of the "port variable" and not the dock
|
||||
@ -169,10 +181,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
||||
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
|
||||
*
|
||||
*/
|
||||
|
||||
if (!aconnector->dsc_aux && !port->parent->port_parent)
|
||||
if (!aconnector->dsc_aux && !port->parent->port_parent &&
|
||||
needs_dsc_aux_workaround(aconnector->dc_link))
|
||||
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
|
||||
#endif
|
||||
|
||||
if (!aconnector->dsc_aux)
|
||||
return false;
|
||||
|
||||
|
@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
||||
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
|
||||
2, sizeof(*a6xx_state->gmu_registers));
|
||||
3, sizeof(*a6xx_state->gmu_registers));
|
||||
|
||||
if (!a6xx_state->gmu_registers)
|
||||
return;
|
||||
|
||||
a6xx_state->nr_gmu_registers = 2;
|
||||
a6xx_state->nr_gmu_registers = 3;
|
||||
|
||||
/* Get the CX GMU registers from AHB */
|
||||
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
|
||||
|
@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
||||
goto free_priv;
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
msm_gpu_hw_init(gpu);
|
||||
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
|
||||
|
@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
|
||||
default MACH_SUN8I
|
||||
select CRC_CCITT
|
||||
select DRM_MIPI_DSI
|
||||
select RESET_CONTROLLER
|
||||
select PHY_SUN6I_MIPI_DPHY
|
||||
help
|
||||
Choose this option if you want have an Allwinner SoC with
|
||||
|
@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm cbus_i2c_algo = {
|
||||
.smbus_xfer = cbus_i2c_smbus_xfer,
|
||||
.functionality = cbus_i2c_func,
|
||||
.smbus_xfer = cbus_i2c_smbus_xfer,
|
||||
.smbus_xfer_atomic = cbus_i2c_smbus_xfer,
|
||||
.functionality = cbus_i2c_func,
|
||||
};
|
||||
|
||||
static int cbus_i2c_remove(struct platform_device *pdev)
|
||||
|
@ -1472,6 +1472,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
{
|
||||
struct stm32f7_i2c_dev *i2c_dev = data;
|
||||
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
||||
struct stm32_i2c_dma *dma = i2c_dev->dma;
|
||||
void __iomem *base = i2c_dev->base;
|
||||
u32 status, mask;
|
||||
int ret = IRQ_HANDLED;
|
||||
@ -1497,6 +1498,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
|
||||
__func__, f7_msg->addr);
|
||||
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
|
||||
if (i2c_dev->use_dma) {
|
||||
stm32f7_i2c_disable_dma_req(i2c_dev);
|
||||
dmaengine_terminate_all(dma->chan_using);
|
||||
}
|
||||
f7_msg->result = -ENXIO;
|
||||
}
|
||||
|
||||
@ -1512,7 +1517,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
/* Clear STOP flag */
|
||||
writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
|
||||
|
||||
if (i2c_dev->use_dma) {
|
||||
if (i2c_dev->use_dma && !f7_msg->result) {
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
} else {
|
||||
i2c_dev->master_mode = false;
|
||||
@ -1525,7 +1530,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
if (f7_msg->stop) {
|
||||
mask = STM32F7_I2C_CR2_STOP;
|
||||
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
|
||||
} else if (i2c_dev->use_dma) {
|
||||
} else if (i2c_dev->use_dma && !f7_msg->result) {
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
} else if (f7_msg->smbus) {
|
||||
stm32f7_i2c_smbus_rep_start(i2c_dev);
|
||||
@ -1665,12 +1670,23 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
time_left = wait_for_completion_timeout(&i2c_dev->complete,
|
||||
i2c_dev->adap.timeout);
|
||||
ret = f7_msg->result;
|
||||
if (ret) {
|
||||
/*
|
||||
* It is possible that some unsent data have already been
|
||||
* written into TXDR. To avoid sending old data in a
|
||||
* further transfer, flush TXDR in case of any error
|
||||
*/
|
||||
writel_relaxed(STM32F7_I2C_ISR_TXE,
|
||||
i2c_dev->base + STM32F7_I2C_ISR);
|
||||
goto pm_free;
|
||||
}
|
||||
|
||||
if (!time_left) {
|
||||
dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
|
||||
i2c_dev->msg->addr);
|
||||
if (i2c_dev->use_dma)
|
||||
dmaengine_terminate_all(dma->chan_using);
|
||||
stm32f7_i2c_wait_free_bus(i2c_dev);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -1713,13 +1729,22 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
||||
timeout = wait_for_completion_timeout(&i2c_dev->complete,
|
||||
i2c_dev->adap.timeout);
|
||||
ret = f7_msg->result;
|
||||
if (ret)
|
||||
if (ret) {
|
||||
/*
|
||||
* It is possible that some unsent data have already been
|
||||
* written into TXDR. To avoid sending old data in a
|
||||
* further transfer, flush TXDR in case of any error
|
||||
*/
|
||||
writel_relaxed(STM32F7_I2C_ISR_TXE,
|
||||
i2c_dev->base + STM32F7_I2C_ISR);
|
||||
goto pm_free;
|
||||
}
|
||||
|
||||
if (!timeout) {
|
||||
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
|
||||
if (i2c_dev->use_dma)
|
||||
dmaengine_terminate_all(dma->chan_using);
|
||||
stm32f7_i2c_wait_free_bus(i2c_dev);
|
||||
ret = -ETIMEDOUT;
|
||||
goto pm_free;
|
||||
}
|
||||
|
@ -40,10 +40,12 @@
|
||||
|
||||
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
|
||||
#define AQ_DEVICE_ID_AQC113CS 0x94C0
|
||||
#define AQ_DEVICE_ID_AQC113CA 0x34C0
|
||||
#define AQ_DEVICE_ID_AQC114CS 0x93C0
|
||||
#define AQ_DEVICE_ID_AQC113 0x04C0
|
||||
#define AQ_DEVICE_ID_AQC113C 0x14C0
|
||||
#define AQ_DEVICE_ID_AQC115C 0x12C0
|
||||
#define AQ_DEVICE_ID_AQC116C 0x11C0
|
||||
|
||||
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
|
||||
|
||||
@ -53,20 +55,19 @@
|
||||
|
||||
#define AQ_NIC_RATE_10G BIT(0)
|
||||
#define AQ_NIC_RATE_5G BIT(1)
|
||||
#define AQ_NIC_RATE_5GSR BIT(2)
|
||||
#define AQ_NIC_RATE_2G5 BIT(3)
|
||||
#define AQ_NIC_RATE_1G BIT(4)
|
||||
#define AQ_NIC_RATE_100M BIT(5)
|
||||
#define AQ_NIC_RATE_10M BIT(6)
|
||||
#define AQ_NIC_RATE_1G_HALF BIT(7)
|
||||
#define AQ_NIC_RATE_100M_HALF BIT(8)
|
||||
#define AQ_NIC_RATE_10M_HALF BIT(9)
|
||||
#define AQ_NIC_RATE_2G5 BIT(2)
|
||||
#define AQ_NIC_RATE_1G BIT(3)
|
||||
#define AQ_NIC_RATE_100M BIT(4)
|
||||
#define AQ_NIC_RATE_10M BIT(5)
|
||||
#define AQ_NIC_RATE_1G_HALF BIT(6)
|
||||
#define AQ_NIC_RATE_100M_HALF BIT(7)
|
||||
#define AQ_NIC_RATE_10M_HALF BIT(8)
|
||||
|
||||
#define AQ_NIC_RATE_EEE_10G BIT(10)
|
||||
#define AQ_NIC_RATE_EEE_5G BIT(11)
|
||||
#define AQ_NIC_RATE_EEE_2G5 BIT(12)
|
||||
#define AQ_NIC_RATE_EEE_1G BIT(13)
|
||||
#define AQ_NIC_RATE_EEE_100M BIT(14)
|
||||
#define AQ_NIC_RATE_EEE_10G BIT(9)
|
||||
#define AQ_NIC_RATE_EEE_5G BIT(10)
|
||||
#define AQ_NIC_RATE_EEE_2G5 BIT(11)
|
||||
#define AQ_NIC_RATE_EEE_1G BIT(12)
|
||||
#define AQ_NIC_RATE_EEE_100M BIT(13)
|
||||
#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
|
||||
AQ_NIC_RATE_EEE_5G |\
|
||||
AQ_NIC_RATE_EEE_2G5 |\
|
||||
|
@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
|
||||
};
|
||||
|
||||
struct aq_stats_s {
|
||||
u64 brc;
|
||||
u64 btc;
|
||||
u64 uprc;
|
||||
u64 mprc;
|
||||
u64 bprc;
|
||||
|
@ -903,8 +903,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
|
||||
data[++i] = stats->mbtc;
|
||||
data[++i] = stats->bbrc;
|
||||
data[++i] = stats->bbtc;
|
||||
data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
|
||||
data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
|
||||
if (stats->brc)
|
||||
data[++i] = stats->brc;
|
||||
else
|
||||
data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
|
||||
if (stats->btc)
|
||||
data[++i] = stats->btc;
|
||||
else
|
||||
data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
|
||||
data[++i] = stats->dma_pkt_rc;
|
||||
data[++i] = stats->dma_pkt_tc;
|
||||
data[++i] = stats->dma_oct_rc;
|
||||
|
@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
|
||||
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
|
||||
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
|
||||
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
|
||||
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
|
||||
|
||||
{}
|
||||
};
|
||||
@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
|
||||
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
|
||||
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
|
||||
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
|
||||
{ AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
|
||||
{ AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
|
||||
{ AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
|
||||
{ AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
|
||||
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
|
||||
|
@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
|
||||
{
|
||||
unsigned int count;
|
||||
|
||||
WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
|
||||
"Invalid tc %u (#rx=%u, #tx=%u)\n",
|
||||
tc, self->rx_rings, self->tx_rings);
|
||||
if (!aq_vec_is_valid_tc(self, tc))
|
||||
return 0;
|
||||
|
||||
|
@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
|
||||
goto err_exit;
|
||||
|
||||
if (fw.len == 0xFFFFU) {
|
||||
if (sw.len > sizeof(self->rpc)) {
|
||||
printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
err = hw_atl_utils_fw_rpc_call(self, sw.len);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
|
||||
|
||||
if (rpc) {
|
||||
if (fw.len) {
|
||||
if (fw.len > sizeof(self->rpc)) {
|
||||
printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
err =
|
||||
hw_atl_utils_fw_downld_dwords(self,
|
||||
self->rpc_addr,
|
||||
@ -857,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
|
||||
int hw_atl_utils_update_stats(struct aq_hw_s *self)
|
||||
{
|
||||
struct aq_stats_s *cs = &self->curr_stats;
|
||||
struct aq_stats_s curr_stats = *cs;
|
||||
struct hw_atl_utils_mbox mbox;
|
||||
bool corrupted_stats = false;
|
||||
|
||||
hw_atl_utils_mpi_read_stats(self, &mbox);
|
||||
|
||||
#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
|
||||
mbox.stats._N_ - self->last_stats._N_)
|
||||
#define AQ_SDELTA(_N_) \
|
||||
do { \
|
||||
if (!corrupted_stats && \
|
||||
((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
|
||||
curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
|
||||
else \
|
||||
corrupted_stats = true; \
|
||||
} while (0)
|
||||
|
||||
if (self->aq_link_status.mbps) {
|
||||
AQ_SDELTA(uprc);
|
||||
@ -882,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
|
||||
AQ_SDELTA(bbrc);
|
||||
AQ_SDELTA(bbtc);
|
||||
AQ_SDELTA(dpc);
|
||||
|
||||
if (!corrupted_stats)
|
||||
*cs = curr_stats;
|
||||
}
|
||||
#undef AQ_SDELTA
|
||||
|
||||
|
@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
|
||||
if (speed & AQ_NIC_RATE_5G)
|
||||
rate |= FW2X_RATE_5G;
|
||||
|
||||
if (speed & AQ_NIC_RATE_5GSR)
|
||||
rate |= FW2X_RATE_5G;
|
||||
|
||||
if (speed & AQ_NIC_RATE_2G5)
|
||||
rate |= FW2X_RATE_2G5;
|
||||
|
||||
|
@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
|
||||
AQ_NIC_RATE_5G |
|
||||
AQ_NIC_RATE_2G5 |
|
||||
AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_1G_HALF |
|
||||
AQ_NIC_RATE_100M |
|
||||
AQ_NIC_RATE_100M_HALF |
|
||||
AQ_NIC_RATE_10M |
|
||||
AQ_NIC_RATE_10M_HALF,
|
||||
AQ_NIC_RATE_10M,
|
||||
};
|
||||
|
||||
const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
|
||||
DEFAULT_BOARD_BASIC_CAPABILITIES,
|
||||
.media_type = AQ_HW_MEDIA_TYPE_TP,
|
||||
.link_speed_msk = AQ_NIC_RATE_2G5 |
|
||||
AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_100M |
|
||||
AQ_NIC_RATE_10M,
|
||||
};
|
||||
|
||||
const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
|
||||
DEFAULT_BOARD_BASIC_CAPABILITIES,
|
||||
.media_type = AQ_HW_MEDIA_TYPE_TP,
|
||||
.link_speed_msk = AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_100M |
|
||||
AQ_NIC_RATE_10M,
|
||||
};
|
||||
|
||||
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
|
||||
|
@ -9,6 +9,8 @@
|
||||
#include "aq_common.h"
|
||||
|
||||
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
|
||||
extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
|
||||
extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
|
||||
extern const struct aq_hw_ops hw_atl2_ops;
|
||||
|
||||
#endif /* HW_ATL2_H */
|
||||
|
@ -239,7 +239,8 @@ struct version_s {
|
||||
u8 minor;
|
||||
u16 build;
|
||||
} phy;
|
||||
u32 rsvd;
|
||||
u32 drv_iface_ver:4;
|
||||
u32 rsvd:28;
|
||||
};
|
||||
|
||||
struct link_status_s {
|
||||
@ -424,7 +425,7 @@ struct cable_diag_status_s {
|
||||
u16 rsvd2;
|
||||
};
|
||||
|
||||
struct statistics_s {
|
||||
struct statistics_a0_s {
|
||||
struct {
|
||||
u32 link_up;
|
||||
u32 link_down;
|
||||
@ -457,6 +458,33 @@ struct statistics_s {
|
||||
u32 reserve_fw_gap;
|
||||
};
|
||||
|
||||
struct __packed statistics_b0_s {
|
||||
u64 rx_good_octets;
|
||||
u64 rx_pause_frames;
|
||||
u64 rx_good_frames;
|
||||
u64 rx_errors;
|
||||
u64 rx_unicast_frames;
|
||||
u64 rx_multicast_frames;
|
||||
u64 rx_broadcast_frames;
|
||||
|
||||
u64 tx_good_octets;
|
||||
u64 tx_pause_frames;
|
||||
u64 tx_good_frames;
|
||||
u64 tx_errors;
|
||||
u64 tx_unicast_frames;
|
||||
u64 tx_multicast_frames;
|
||||
u64 tx_broadcast_frames;
|
||||
|
||||
u32 main_loop_cycles;
|
||||
};
|
||||
|
||||
struct __packed statistics_s {
|
||||
union __packed {
|
||||
struct statistics_a0_s a0;
|
||||
struct statistics_b0_s b0;
|
||||
};
|
||||
};
|
||||
|
||||
struct filter_caps_s {
|
||||
u8 l2_filters_base_index:6;
|
||||
u8 flexible_filter_mask:2;
|
||||
@ -545,7 +573,7 @@ struct management_status_s {
|
||||
u32 rsvd5;
|
||||
};
|
||||
|
||||
struct fw_interface_out {
|
||||
struct __packed fw_interface_out {
|
||||
struct transaction_counter_s transaction_id;
|
||||
struct version_s version;
|
||||
struct link_status_s link_status;
|
||||
@ -569,7 +597,6 @@ struct fw_interface_out {
|
||||
struct core_dump_s core_dump;
|
||||
u32 rsvd11;
|
||||
struct statistics_s stats;
|
||||
u32 rsvd12;
|
||||
struct filter_caps_s filter_caps;
|
||||
struct device_caps_s device_caps;
|
||||
u32 rsvd13;
|
||||
@ -592,6 +619,9 @@ struct fw_interface_out {
|
||||
#define AQ_HOST_MODE_LOW_POWER 3U
|
||||
#define AQ_HOST_MODE_SHUTDOWN 4U
|
||||
|
||||
#define AQ_A2_FW_INTERFACE_A0 0
|
||||
#define AQ_A2_FW_INTERFACE_B0 1
|
||||
|
||||
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
|
||||
|
||||
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
|
||||
|
@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
|
||||
if (cnt > AQ_A2_FW_READ_TRY_MAX)
|
||||
return -ETIME;
|
||||
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
|
||||
udelay(1);
|
||||
mdelay(1);
|
||||
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
|
||||
|
||||
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
|
||||
@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
|
||||
{
|
||||
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
|
||||
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
|
||||
link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
|
||||
link_options->rate_N5G = link_options->rate_5G;
|
||||
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
|
||||
link_options->rate_N2P5G = link_options->rate_2P5G;
|
||||
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
|
||||
@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
|
||||
rate |= AQ_NIC_RATE_10G;
|
||||
if (lkp_link_caps->rate_5G)
|
||||
rate |= AQ_NIC_RATE_5G;
|
||||
if (lkp_link_caps->rate_N5G)
|
||||
rate |= AQ_NIC_RATE_5GSR;
|
||||
if (lkp_link_caps->rate_2P5G)
|
||||
rate |= AQ_NIC_RATE_2G5;
|
||||
if (lkp_link_caps->rate_1G)
|
||||
@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
|
||||
static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
|
||||
struct statistics_s *stats)
|
||||
{
|
||||
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
|
||||
struct statistics_s stats;
|
||||
struct aq_stats_s *cs = &self->curr_stats;
|
||||
struct aq_stats_s curr_stats = *cs;
|
||||
bool corrupted_stats = false;
|
||||
|
||||
hw_atl2_shared_buffer_read_safe(self, stats, &stats);
|
||||
|
||||
#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
|
||||
stats.msm._F_ - priv->last_stats.msm._F_)
|
||||
#define AQ_SDELTA(_N, _F) \
|
||||
do { \
|
||||
if (!corrupted_stats && \
|
||||
((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
|
||||
curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
|
||||
else \
|
||||
corrupted_stats = true; \
|
||||
} while (0)
|
||||
|
||||
if (self->aq_link_status.mbps) {
|
||||
AQ_SDELTA(uprc, rx_unicast_frames);
|
||||
@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
|
||||
AQ_SDELTA(mbtc, tx_multicast_octets);
|
||||
AQ_SDELTA(bbrc, rx_broadcast_octets);
|
||||
AQ_SDELTA(bbtc, tx_broadcast_octets);
|
||||
|
||||
if (!corrupted_stats)
|
||||
*cs = curr_stats;
|
||||
}
|
||||
#undef AQ_SDELTA
|
||||
self->curr_stats.dma_pkt_rc =
|
||||
hw_atl_stats_rx_dma_good_pkt_counter_get(self);
|
||||
self->curr_stats.dma_pkt_tc =
|
||||
hw_atl_stats_tx_dma_good_pkt_counter_get(self);
|
||||
self->curr_stats.dma_oct_rc =
|
||||
hw_atl_stats_rx_dma_good_octet_counter_get(self);
|
||||
self->curr_stats.dma_oct_tc =
|
||||
hw_atl_stats_tx_dma_good_octet_counter_get(self);
|
||||
self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
|
||||
|
||||
}
|
||||
|
||||
static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
|
||||
struct statistics_s *stats)
|
||||
{
|
||||
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
|
||||
struct aq_stats_s *cs = &self->curr_stats;
|
||||
struct aq_stats_s curr_stats = *cs;
|
||||
bool corrupted_stats = false;
|
||||
|
||||
#define AQ_SDELTA(_N, _F) \
|
||||
do { \
|
||||
if (!corrupted_stats && \
|
||||
((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
|
||||
curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
|
||||
else \
|
||||
corrupted_stats = true; \
|
||||
} while (0)
|
||||
|
||||
if (self->aq_link_status.mbps) {
|
||||
AQ_SDELTA(uprc, rx_unicast_frames);
|
||||
AQ_SDELTA(mprc, rx_multicast_frames);
|
||||
AQ_SDELTA(bprc, rx_broadcast_frames);
|
||||
AQ_SDELTA(erpr, rx_errors);
|
||||
AQ_SDELTA(brc, rx_good_octets);
|
||||
|
||||
AQ_SDELTA(uptc, tx_unicast_frames);
|
||||
AQ_SDELTA(mptc, tx_multicast_frames);
|
||||
AQ_SDELTA(bptc, tx_broadcast_frames);
|
||||
AQ_SDELTA(erpt, tx_errors);
|
||||
AQ_SDELTA(btc, tx_good_octets);
|
||||
|
||||
if (!corrupted_stats)
|
||||
*cs = curr_stats;
|
||||
}
|
||||
#undef AQ_SDELTA
|
||||
}
|
||||
|
||||
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
|
||||
{
|
||||
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
|
||||
struct aq_stats_s *cs = &self->curr_stats;
|
||||
struct statistics_s stats;
|
||||
struct version_s version;
|
||||
int err;
|
||||
|
||||
err = hw_atl2_shared_buffer_read_safe(self, version, &version);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
|
||||
aq_a2_fill_a0_stats(self, &stats);
|
||||
else
|
||||
aq_a2_fill_b0_stats(self, &stats);
|
||||
|
||||
cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
|
||||
cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
|
||||
cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
|
||||
cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
|
||||
cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
|
||||
|
||||
memcpy(&priv->last_stats, &stats, sizeof(stats));
|
||||
|
||||
@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
|
||||
hw_atl2_shared_buffer_read_safe(self, version, &version);
|
||||
|
||||
/* A2 FW version is stored in reverse order */
|
||||
return version.mac.major << 24 |
|
||||
version.mac.minor << 16 |
|
||||
version.mac.build;
|
||||
return version.bundle.major << 24 |
|
||||
version.bundle.minor << 16 |
|
||||
version.bundle.build;
|
||||
}
|
||||
|
||||
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
|
||||
|
@ -4706,6 +4706,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
|
||||
lp->ibn = 3;
|
||||
lp->active = *p++;
|
||||
if (MOTO_SROM_BUG) lp->active = 0;
|
||||
/* if (MOTO_SROM_BUG) statement indicates lp->active could
|
||||
* be 8 (i.e. the size of array lp->phy) */
|
||||
if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
|
||||
return -EINVAL;
|
||||
lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
|
||||
@ -4997,19 +5001,23 @@ mii_get_phy(struct net_device *dev)
|
||||
}
|
||||
if ((j == limit) && (i < DE4X5_MAX_MII)) {
|
||||
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
if (k < DE4X5_MAX_PHY) {
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
} else {
|
||||
goto purgatory;
|
||||
}
|
||||
}
|
||||
}
|
||||
purgatory:
|
||||
|
@ -4432,6 +4432,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
|
||||
|
||||
fsl_mc_portal_free(priv->mc_io);
|
||||
|
||||
destroy_workqueue(priv->dpaa2_ptp_wq);
|
||||
|
||||
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
|
||||
return;
|
||||
|
||||
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
|
||||
/* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
|
||||
We need check to prevent array overflow */
|
||||
if (port >= DSAF_MAX_PORT_NUM)
|
||||
return;
|
||||
reg_val_1 = 0x1 << port;
|
||||
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
|
||||
/* there is difference between V1 and V2 in register.*/
|
||||
|
@ -6918,7 +6918,7 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
|
||||
shared = num_present_cpus() - priv->nthreads;
|
||||
if (shared > 0)
|
||||
bitmap_fill(&priv->lock_map,
|
||||
bitmap_set(&priv->lock_map, 0,
|
||||
min_t(int, shared, MVPP2_MAX_THREADS));
|
||||
|
||||
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
|
||||
|
@ -2276,9 +2276,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
bool carry_xdp_prog)
|
||||
{
|
||||
struct bpf_prog *xdp_prog;
|
||||
int i, t;
|
||||
int i, t, ret;
|
||||
|
||||
mlx4_en_copy_priv(tmp, priv, prof);
|
||||
ret = mlx4_en_copy_priv(tmp, priv, prof);
|
||||
if (ret) {
|
||||
en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (mlx4_en_alloc_resources(tmp)) {
|
||||
en_warn(priv,
|
||||
|
@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int __init sonic_probe1(struct net_device *dev)
|
||||
static int sonic_probe1(struct net_device *dev)
|
||||
{
|
||||
unsigned int silicon_revision;
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
|
@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
|
||||
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
|
||||
context_id = recv_ctx->context_id;
|
||||
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
|
||||
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
|
||||
QLCNIC_CMD_ADD_RCV_RINGS);
|
||||
err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
|
||||
QLCNIC_CMD_ADD_RCV_RINGS);
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Failed to alloc mbx args %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
|
||||
|
||||
/* set up status rings, mbx 2-81 */
|
||||
|
@ -2128,7 +2128,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
|
||||
if (dev->domain_data.phyirq > 0)
|
||||
phydev->irq = dev->domain_data.phyirq;
|
||||
else
|
||||
phydev->irq = 0;
|
||||
phydev->irq = PHY_POLL;
|
||||
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
|
||||
|
||||
/* set to AUTOMDIX */
|
||||
|
@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
|
||||
/* strip the ethernet header added for pass through VRF device */
|
||||
__skb_pull(skb, skb_network_offset(skb));
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
ret = vrf_ip6_local_out(net, skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(ret)))
|
||||
dev->stats.tx_errors++;
|
||||
@ -580,6 +581,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
|
||||
RT_SCOPE_LINK);
|
||||
}
|
||||
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(ret)))
|
||||
vrf_dev->stats.tx_errors++;
|
||||
|
@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
|
||||
return exact;
|
||||
}
|
||||
|
||||
static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
|
||||
static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
|
||||
{
|
||||
node->parent_bit_packed = (unsigned long)parent | bit;
|
||||
rcu_assign_pointer(*parent, node);
|
||||
|
@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
|
||||
{
|
||||
struct wg_device *wg = netdev_priv(dev);
|
||||
struct wg_peer *peer;
|
||||
struct sk_buff *skb;
|
||||
|
||||
mutex_lock(&wg->device_update_lock);
|
||||
list_for_each_entry(peer, &wg->peer_list, peer_list) {
|
||||
@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
|
||||
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
|
||||
}
|
||||
mutex_unlock(&wg->device_update_lock);
|
||||
skb_queue_purge(&wg->incoming_handshakes);
|
||||
while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
|
||||
kfree_skb(skb);
|
||||
atomic_set(&wg->handshake_queue_len, 0);
|
||||
wg_socket_reinit(wg, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
|
||||
destroy_workqueue(wg->handshake_receive_wq);
|
||||
destroy_workqueue(wg->handshake_send_wq);
|
||||
destroy_workqueue(wg->packet_crypt_wq);
|
||||
wg_packet_queue_free(&wg->decrypt_queue);
|
||||
wg_packet_queue_free(&wg->encrypt_queue);
|
||||
wg_packet_queue_free(&wg->handshake_queue, true);
|
||||
wg_packet_queue_free(&wg->decrypt_queue, false);
|
||||
wg_packet_queue_free(&wg->encrypt_queue, false);
|
||||
rcu_barrier(); /* Wait for all the peers to be actually freed. */
|
||||
wg_ratelimiter_uninit();
|
||||
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
|
||||
skb_queue_purge(&wg->incoming_handshakes);
|
||||
free_percpu(dev->tstats);
|
||||
free_percpu(wg->incoming_handshakes_worker);
|
||||
kvfree(wg->index_hashtable);
|
||||
kvfree(wg->peer_hashtable);
|
||||
mutex_unlock(&wg->device_update_lock);
|
||||
@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
|
||||
init_rwsem(&wg->static_identity.lock);
|
||||
mutex_init(&wg->socket_update_lock);
|
||||
mutex_init(&wg->device_update_lock);
|
||||
skb_queue_head_init(&wg->incoming_handshakes);
|
||||
wg_allowedips_init(&wg->peer_allowedips);
|
||||
wg_cookie_checker_init(&wg->cookie_checker, wg);
|
||||
INIT_LIST_HEAD(&wg->peer_list);
|
||||
@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
|
||||
if (!dev->tstats)
|
||||
goto err_free_index_hashtable;
|
||||
|
||||
wg->incoming_handshakes_worker =
|
||||
wg_packet_percpu_multicore_worker_alloc(
|
||||
wg_packet_handshake_receive_worker, wg);
|
||||
if (!wg->incoming_handshakes_worker)
|
||||
goto err_free_tstats;
|
||||
|
||||
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
|
||||
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
|
||||
if (!wg->handshake_receive_wq)
|
||||
goto err_free_incoming_handshakes;
|
||||
goto err_free_tstats;
|
||||
|
||||
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
|
||||
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
|
||||
@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
|
||||
if (ret < 0)
|
||||
goto err_free_encrypt_queue;
|
||||
|
||||
ret = wg_ratelimiter_init();
|
||||
ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
|
||||
MAX_QUEUED_INCOMING_HANDSHAKES);
|
||||
if (ret < 0)
|
||||
goto err_free_decrypt_queue;
|
||||
|
||||
ret = wg_ratelimiter_init();
|
||||
if (ret < 0)
|
||||
goto err_free_handshake_queue;
|
||||
|
||||
ret = register_netdevice(dev);
|
||||
if (ret < 0)
|
||||
goto err_uninit_ratelimiter;
|
||||
@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
|
||||
|
||||
err_uninit_ratelimiter:
|
||||
wg_ratelimiter_uninit();
|
||||
err_free_handshake_queue:
|
||||
wg_packet_queue_free(&wg->handshake_queue, false);
|
||||
err_free_decrypt_queue:
|
||||
wg_packet_queue_free(&wg->decrypt_queue);
|
||||
wg_packet_queue_free(&wg->decrypt_queue, false);
|
||||
err_free_encrypt_queue:
|
||||
wg_packet_queue_free(&wg->encrypt_queue);
|
||||
wg_packet_queue_free(&wg->encrypt_queue, false);
|
||||
err_destroy_packet_crypt:
|
||||
destroy_workqueue(wg->packet_crypt_wq);
|
||||
err_destroy_handshake_send:
|
||||
destroy_workqueue(wg->handshake_send_wq);
|
||||
err_destroy_handshake_receive:
|
||||
destroy_workqueue(wg->handshake_receive_wq);
|
||||
err_free_incoming_handshakes:
|
||||
free_percpu(wg->incoming_handshakes_worker);
|
||||
err_free_tstats:
|
||||
free_percpu(dev->tstats);
|
||||
err_free_index_hashtable:
|
||||
@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
|
||||
static void wg_netns_pre_exit(struct net *net)
|
||||
{
|
||||
struct wg_device *wg;
|
||||
struct wg_peer *peer;
|
||||
|
||||
rtnl_lock();
|
||||
list_for_each_entry(wg, &device_list, device_list) {
|
||||
@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
|
||||
mutex_lock(&wg->device_update_lock);
|
||||
rcu_assign_pointer(wg->creating_net, NULL);
|
||||
wg_socket_reinit(wg, NULL, NULL);
|
||||
list_for_each_entry(peer, &wg->peer_list, peer_list)
|
||||
wg_socket_clear_peer_endpoint_src(peer);
|
||||
mutex_unlock(&wg->device_update_lock);
|
||||
}
|
||||
}
|
||||
|
@ -39,21 +39,18 @@ struct prev_queue {
|
||||
|
||||
struct wg_device {
|
||||
struct net_device *dev;
|
||||
struct crypt_queue encrypt_queue, decrypt_queue;
|
||||
struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
|
||||
struct sock __rcu *sock4, *sock6;
|
||||
struct net __rcu *creating_net;
|
||||
struct noise_static_identity static_identity;
|
||||
struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
|
||||
struct workqueue_struct *packet_crypt_wq;
|
||||
struct sk_buff_head incoming_handshakes;
|
||||
int incoming_handshake_cpu;
|
||||
struct multicore_worker __percpu *incoming_handshakes_worker;
|
||||
struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
|
||||
struct cookie_checker cookie_checker;
|
||||
struct pubkey_hashtable *peer_hashtable;
|
||||
struct index_hashtable *index_hashtable;
|
||||
struct allowedips peer_allowedips;
|
||||
struct mutex device_update_lock, socket_update_lock;
|
||||
struct list_head device_list, peer_list;
|
||||
atomic_t handshake_queue_len;
|
||||
unsigned int num_peers, device_update_gen;
|
||||
u32 fwmark;
|
||||
u16 incoming_port;
|
||||
|
@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void wg_packet_queue_free(struct crypt_queue *queue)
|
||||
void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
|
||||
{
|
||||
free_percpu(queue->worker);
|
||||
WARN_ON(!__ptr_ring_empty(&queue->ring));
|
||||
ptr_ring_cleanup(&queue->ring, NULL);
|
||||
WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
|
||||
ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
|
||||
}
|
||||
|
||||
#define NEXT(skb) ((skb)->prev)
|
||||
|
@ -23,7 +23,7 @@ struct sk_buff;
|
||||
/* queueing.c APIs: */
|
||||
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
|
||||
unsigned int len);
|
||||
void wg_packet_queue_free(struct crypt_queue *queue);
|
||||
void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
|
||||
struct multicore_worker __percpu *
|
||||
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
|
||||
|
||||
|
@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
|
||||
(1U << 14) / sizeof(struct hlist_head)));
|
||||
max_entries = table_size * 8;
|
||||
|
||||
table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
|
||||
table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
|
||||
if (unlikely(!table_v4))
|
||||
goto err_kmemcache;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
|
||||
table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
|
||||
if (unlikely(!table_v6)) {
|
||||
kvfree(table_v4);
|
||||
goto err_kmemcache;
|
||||
|
@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
|
||||
return;
|
||||
}
|
||||
|
||||
under_load = skb_queue_len(&wg->incoming_handshakes) >=
|
||||
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
|
||||
under_load = atomic_read(&wg->handshake_queue_len) >=
|
||||
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
|
||||
if (under_load) {
|
||||
last_under_load = ktime_get_coarse_boottime_ns();
|
||||
} else if (last_under_load) {
|
||||
@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
|
||||
|
||||
void wg_packet_handshake_receive_worker(struct work_struct *work)
|
||||
{
|
||||
struct wg_device *wg = container_of(work, struct multicore_worker,
|
||||
work)->ptr;
|
||||
struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
|
||||
struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
|
||||
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
|
||||
wg_receive_handshake_packet(wg, skb);
|
||||
dev_kfree_skb(skb);
|
||||
atomic_dec(&wg->handshake_queue_len);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
|
||||
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
|
||||
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
|
||||
case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
|
||||
int cpu;
|
||||
int cpu, ret = -EBUSY;
|
||||
|
||||
if (skb_queue_len(&wg->incoming_handshakes) >
|
||||
MAX_QUEUED_INCOMING_HANDSHAKES ||
|
||||
unlikely(!rng_is_initialized())) {
|
||||
if (unlikely(!rng_is_initialized()))
|
||||
goto drop;
|
||||
if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
|
||||
if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
|
||||
ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
|
||||
spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
|
||||
}
|
||||
} else
|
||||
ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
|
||||
if (ret) {
|
||||
drop:
|
||||
net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
|
||||
wg->dev->name, skb);
|
||||
goto err;
|
||||
}
|
||||
skb_queue_tail(&wg->incoming_handshakes, skb);
|
||||
/* Queues up a call to packet_process_queued_handshake_
|
||||
* packets(skb):
|
||||
*/
|
||||
cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
|
||||
atomic_inc(&wg->handshake_queue_len);
|
||||
cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
|
||||
/* Queues up a call to packet_process_queued_handshake_packets(skb): */
|
||||
queue_work_on(cpu, wg->handshake_receive_wq,
|
||||
&per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
|
||||
&per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
|
||||
break;
|
||||
}
|
||||
case cpu_to_le32(MESSAGE_DATA):
|
||||
|
@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
|
||||
{
|
||||
write_lock_bh(&peer->endpoint_lock);
|
||||
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
|
||||
dst_cache_reset(&peer->endpoint_cache);
|
||||
dst_cache_reset_now(&peer->endpoint_cache);
|
||||
write_unlock_bh(&peer->endpoint_lock);
|
||||
}
|
||||
|
||||
|
@ -1303,23 +1303,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
|
||||
const struct iwl_op_mode_ops *ops = op->ops;
|
||||
struct dentry *dbgfs_dir = NULL;
|
||||
struct iwl_op_mode *op_mode = NULL;
|
||||
int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
|
||||
|
||||
for (retry = 0; retry <= max_retry; retry++) {
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
|
||||
drv->dbgfs_drv);
|
||||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
|
||||
drv->dbgfs_drv);
|
||||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
#endif
|
||||
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg,
|
||||
&drv->fw, dbgfs_dir);
|
||||
|
||||
if (op_mode)
|
||||
return op_mode;
|
||||
|
||||
IWL_ERR(drv, "retry init count %d\n", retry);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (!op_mode) {
|
||||
debugfs_remove_recursive(drv->dbgfs_op_mode);
|
||||
drv->dbgfs_op_mode = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return op_mode;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void _iwl_op_mode_stop(struct iwl_drv *drv)
|
||||
|
@ -144,4 +144,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
|
||||
#define IWL_EXPORT_SYMBOL(sym)
|
||||
#endif
|
||||
|
||||
/* max retry for init flow */
|
||||
#define IWL_MAX_INIT_RETRY 2
|
||||
|
||||
#endif /* __iwl_drv_h__ */
|
||||
|
@ -71,6 +71,7 @@
|
||||
#include <net/ieee80211_radiotap.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#include "iwl-drv.h"
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-io.h"
|
||||
#include "mvm.h"
|
||||
@ -1163,9 +1164,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
int ret;
|
||||
int retry, max_retry = 0;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = __iwl_mvm_mac_start(mvm);
|
||||
|
||||
/* we are starting the mac not in error flow, and restart is enabled */
|
||||
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
|
||||
iwlwifi_mod_params.fw_restart) {
|
||||
max_retry = IWL_MAX_INIT_RETRY;
|
||||
/*
|
||||
* This will prevent mac80211 recovery flows to trigger during
|
||||
* init failures
|
||||
*/
|
||||
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
|
||||
}
|
||||
|
||||
for (retry = 0; retry <= max_retry; retry++) {
|
||||
ret = __iwl_mvm_mac_start(mvm);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
IWL_ERR(mvm, "mac start retry %d\n", retry);
|
||||
}
|
||||
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -1162,6 +1162,8 @@ struct iwl_mvm {
|
||||
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
|
||||
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
|
||||
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
|
||||
* @IWL_MVM_STATUS_STARTING: starting mac,
|
||||
* used to disable restart flow while in STARTING state
|
||||
*/
|
||||
enum iwl_mvm_status {
|
||||
IWL_MVM_STATUS_HW_RFKILL,
|
||||
@ -1173,6 +1175,7 @@ enum iwl_mvm_status {
|
||||
IWL_MVM_STATUS_FIRMWARE_RUNNING,
|
||||
IWL_MVM_STATUS_NEED_FLUSH_P2P,
|
||||
IWL_MVM_STATUS_IN_D3,
|
||||
IWL_MVM_STATUS_STARTING,
|
||||
};
|
||||
|
||||
/* Keep track of completed init configuration */
|
||||
|
@ -1295,6 +1295,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
*/
|
||||
if (!mvm->fw_restart && fw_error) {
|
||||
iwl_fw_error_collect(&mvm->fwrt);
|
||||
} else if (test_bit(IWL_MVM_STATUS_STARTING,
|
||||
&mvm->status)) {
|
||||
IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
|
||||
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
|
@ -182,7 +182,7 @@ mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif,
|
||||
if (ht_cap->ht_supported)
|
||||
mode |= PHY_MODE_GN;
|
||||
|
||||
if (he_cap->has_he)
|
||||
if (he_cap && he_cap->has_he)
|
||||
mode |= PHY_MODE_AX_24G;
|
||||
} else if (band == NL80211_BAND_5GHZ) {
|
||||
mode |= PHY_MODE_A;
|
||||
@ -193,7 +193,7 @@ mt7915_get_phy_mode(struct mt7915_dev *dev, struct ieee80211_vif *vif,
|
||||
if (vht_cap->vht_supported)
|
||||
mode |= PHY_MODE_AC;
|
||||
|
||||
if (he_cap->has_he)
|
||||
if (he_cap && he_cap->has_he)
|
||||
mode |= PHY_MODE_AX_5G;
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
|
||||
if (status == -ENODEV || status == -ENOENT)
|
||||
return true;
|
||||
|
||||
if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
|
||||
return false;
|
||||
|
||||
if (status == -EPROTO || status == -ETIMEDOUT)
|
||||
rt2x00dev->num_proto_errs++;
|
||||
else
|
||||
|
@ -1170,15 +1170,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Query FW and update rfkill sw state for all rfkill switches */
|
||||
static void tpacpi_rfk_update_swstate_all(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
|
||||
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync the HW-blocking state of all rfkill switches,
|
||||
* do notice it causes the rfkill core to schedule uevents
|
||||
@ -3121,9 +3112,6 @@ static void tpacpi_send_radiosw_update(void)
|
||||
if (wlsw == TPACPI_RFK_RADIO_OFF)
|
||||
tpacpi_rfk_update_hwblock_state(true);
|
||||
|
||||
/* Sync sw blocking state */
|
||||
tpacpi_rfk_update_swstate_all();
|
||||
|
||||
/* Sync hw blocking state last if it is hw-unblocked */
|
||||
if (wlsw == TPACPI_RFK_RADIO_ON)
|
||||
tpacpi_rfk_update_hwblock_state(false);
|
||||
@ -8805,6 +8793,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
|
||||
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
|
||||
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
|
||||
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
|
||||
TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
|
||||
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
|
||||
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
|
||||
};
|
||||
|
@ -1909,12 +1909,12 @@ static void session_recovery_timedout(struct work_struct *work)
|
||||
}
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
|
||||
if (session->transport->session_recovery_timedout)
|
||||
session->transport->session_recovery_timedout(session);
|
||||
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
|
||||
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
|
||||
|
||||
if (session->transport->session_recovery_timedout)
|
||||
session->transport->session_recovery_timedout(session);
|
||||
}
|
||||
|
||||
static void __iscsi_unblock_session(struct work_struct *work)
|
||||
|
@ -1349,29 +1349,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
|
||||
{
|
||||
int scr;
|
||||
int lcr;
|
||||
int actual_baud;
|
||||
int tolerance;
|
||||
|
||||
for (scr = 5 ; scr <= 15 ; scr++) {
|
||||
actual_baud = 921600 * 16 / scr;
|
||||
tolerance = actual_baud / 50;
|
||||
for (scr = 16; scr > 4; scr--) {
|
||||
unsigned int maxrate = port->uartclk / scr;
|
||||
unsigned int divisor = max(maxrate / baud, 1U);
|
||||
int delta = maxrate / divisor - baud;
|
||||
|
||||
if ((baud < actual_baud + tolerance) &&
|
||||
(baud > actual_baud - tolerance)) {
|
||||
if (baud > maxrate + baud / 50)
|
||||
continue;
|
||||
|
||||
if (delta > baud / 50)
|
||||
divisor++;
|
||||
|
||||
if (divisor > 0xffff)
|
||||
continue;
|
||||
|
||||
/* Update delta due to possible divisor change */
|
||||
delta = maxrate / divisor - baud;
|
||||
if (abs(delta) < baud / 50) {
|
||||
lcr = serial_port_in(port, UART_LCR);
|
||||
serial_port_out(port, UART_LCR, lcr | 0x80);
|
||||
|
||||
serial_port_out(port, UART_DLL, 1);
|
||||
serial_port_out(port, UART_DLM, 0);
|
||||
serial_port_out(port, UART_DLL, divisor & 0xff);
|
||||
serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
|
||||
serial_port_out(port, 2, 16 - scr);
|
||||
serial_port_out(port, UART_LCR, lcr);
|
||||
return;
|
||||
} else if (baud > actual_baud) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
serial8250_do_set_divisor(port, baud, quot, quot_frac);
|
||||
}
|
||||
static int pci_pericom_setup(struct serial_private *priv,
|
||||
const struct pciserial_board *board,
|
||||
@ -2317,12 +2321,19 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
||||
.setup = pci_pericom_setup_four_at_eight,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
|
||||
.vendor = PCI_VENDOR_ID_ACCESIO,
|
||||
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.setup = pci_pericom_setup_four_at_eight,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ACCESIO,
|
||||
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.setup = pci_pericom_setup_four_at_eight,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ACCESIO,
|
||||
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
|
||||
|
@ -2029,13 +2029,6 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
||||
struct uart_8250_port *up = up_to_u8250p(port);
|
||||
unsigned char mcr;
|
||||
|
||||
if (port->rs485.flags & SER_RS485_ENABLED) {
|
||||
if (serial8250_in_MCR(up) & UART_MCR_RTS)
|
||||
mctrl |= TIOCM_RTS;
|
||||
else
|
||||
mctrl &= ~TIOCM_RTS;
|
||||
}
|
||||
|
||||
mcr = serial8250_TIOCM_to_MCR(mctrl);
|
||||
|
||||
mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
|
||||
|
@ -2790,6 +2790,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
|
||||
|
||||
static const struct acpi_device_id sbsa_uart_acpi_match[] = {
|
||||
{ "ARMH0011", 0 },
|
||||
{ "ARMHB000", 0 },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
|
||||
|
@ -599,6 +599,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CONSOLE_POLL))
|
||||
return;
|
||||
|
||||
if (!dma->chan)
|
||||
return;
|
||||
|
||||
|
@ -1501,7 +1501,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = {
|
||||
.fifo_mode_enable_status = false,
|
||||
.uart_max_port = 5,
|
||||
.max_dma_burst_bytes = 4,
|
||||
.error_tolerance_low_range = 0,
|
||||
.error_tolerance_low_range = -4,
|
||||
.error_tolerance_high_range = 4,
|
||||
};
|
||||
|
||||
@ -1512,7 +1512,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = {
|
||||
.fifo_mode_enable_status = false,
|
||||
.uart_max_port = 5,
|
||||
.max_dma_burst_bytes = 4,
|
||||
.error_tolerance_low_range = 0,
|
||||
.error_tolerance_low_range = -4,
|
||||
.error_tolerance_high_range = 4,
|
||||
};
|
||||
|
||||
|
@ -1095,6 +1095,11 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
|
||||
goto out;
|
||||
|
||||
if (!tty_io_error(tty)) {
|
||||
if (uport->rs485.flags & SER_RS485_ENABLED) {
|
||||
set &= ~TIOCM_RTS;
|
||||
clear &= ~TIOCM_RTS;
|
||||
}
|
||||
|
||||
uart_update_mctrl(uport, set, clear);
|
||||
ret = 0;
|
||||
}
|
||||
@ -1569,6 +1574,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||||
{
|
||||
struct uart_state *state = container_of(port, struct uart_state, port);
|
||||
struct uart_port *uport = uart_port_check(state);
|
||||
char *buf;
|
||||
|
||||
/*
|
||||
* At this point, we stop accepting input. To do this, we
|
||||
@ -1590,8 +1596,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||||
*/
|
||||
tty_port_set_suspended(port, 0);
|
||||
|
||||
uart_change_pm(state, UART_PM_STATE_OFF);
|
||||
/*
|
||||
* Free the transmit buffer.
|
||||
*/
|
||||
spin_lock_irq(&uport->lock);
|
||||
buf = state->xmit.buf;
|
||||
state->xmit.buf = NULL;
|
||||
spin_unlock_irq(&uport->lock);
|
||||
|
||||
if (buf)
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
uart_change_pm(state, UART_PM_STATE_OFF);
|
||||
}
|
||||
|
||||
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
|
||||
|
@ -435,6 +435,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
|
||||
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
|
||||
|
||||
/* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
|
||||
{ USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
|
||||
{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
|
||||
USB_QUIRK_DISCONNECT_SUSPEND },
|
||||
|
@ -368,7 +368,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
||||
/* Must be called with xhci->lock held, releases and aquires lock back */
|
||||
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
||||
{
|
||||
u32 temp_32;
|
||||
struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
|
||||
union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
|
||||
u64 crcr;
|
||||
int ret;
|
||||
|
||||
xhci_dbg(xhci, "Abort command ring\n");
|
||||
@ -377,13 +379,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
||||
|
||||
/*
|
||||
* The control bits like command stop, abort are located in lower
|
||||
* dword of the command ring control register. Limit the write
|
||||
* to the lower dword to avoid corrupting the command ring pointer
|
||||
* in case if the command ring is stopped by the time upper dword
|
||||
* is written.
|
||||
* dword of the command ring control register.
|
||||
* Some controllers require all 64 bits to be written to abort the ring.
|
||||
* Make sure the upper dword is valid, pointing to the next command,
|
||||
* avoiding corrupting the command ring pointer in case the command ring
|
||||
* is stopped by the time the upper dword is written.
|
||||
*/
|
||||
temp_32 = readl(&xhci->op_regs->cmd_ring);
|
||||
writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
|
||||
next_trb(xhci, NULL, &new_seg, &new_deq);
|
||||
if (trb_is_link(new_deq))
|
||||
next_trb(xhci, NULL, &new_seg, &new_deq);
|
||||
|
||||
crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
|
||||
xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
|
||||
|
||||
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
|
||||
* completion of the Command Abort operation. If CRR is not negated in 5
|
||||
|
@ -4157,12 +4157,9 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
0);
|
||||
port->debouncing = false;
|
||||
} else {
|
||||
/* Wait for VBUS, but not forever */
|
||||
tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
|
||||
port->debouncing = false;
|
||||
}
|
||||
break;
|
||||
|
||||
case SRC_TRY:
|
||||
port->try_src_count++;
|
||||
tcpm_set_cc(port, tcpm_rp_cc(port));
|
||||
|
@ -370,11 +370,17 @@ static void vgacon_init(struct vc_data *c, int init)
|
||||
struct uni_pagedir *p;
|
||||
|
||||
/*
|
||||
* We cannot be loaded as a module, therefore init is always 1,
|
||||
* but vgacon_init can be called more than once, and init will
|
||||
* not be 1.
|
||||
* We cannot be loaded as a module, therefore init will be 1
|
||||
* if we are the default console, however if we are a fallback
|
||||
* console, for example if fbcon has failed registration, then
|
||||
* init will be 0, so we need to make sure our boot parameters
|
||||
* have been copied to the console structure for vgacon_resize
|
||||
* ultimately called by vc_resize. Any subsequent calls to
|
||||
* vgacon_init init will have init set to 0 too.
|
||||
*/
|
||||
c->vc_can_do_color = vga_can_do_color;
|
||||
c->vc_scan_lines = vga_scan_lines;
|
||||
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
|
||||
|
||||
/* set dimensions manually if init != 0 since vc_resize() will fail */
|
||||
if (init) {
|
||||
@ -383,8 +389,6 @@ static void vgacon_init(struct vc_data *c, int init)
|
||||
} else
|
||||
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
|
||||
|
||||
c->vc_scan_lines = vga_scan_lines;
|
||||
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
|
||||
c->vc_complement_mask = 0x7700;
|
||||
if (vga_512_chars)
|
||||
c->vc_hi_font_mask = 0x0800;
|
||||
|
@ -3692,11 +3692,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
|
||||
*/
|
||||
static void write_dev_flush(struct btrfs_device *device)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
struct bio *bio = device->flush_bio;
|
||||
|
||||
#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
/*
|
||||
* When a disk has write caching disabled, we skip submission of a bio
|
||||
* with flush and sync requests before writing the superblock, since
|
||||
* it's not needed. However when the integrity checker is enabled, this
|
||||
* results in reports that there are metadata blocks referred by a
|
||||
* superblock that were not properly flushed. So don't skip the bio
|
||||
* submission only when the integrity checker is enabled for the sake
|
||||
* of simplicity, since this is a debug tool and not meant for use in
|
||||
* non-debug builds.
|
||||
*/
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
return;
|
||||
#endif
|
||||
|
||||
bio_reset(bio);
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
|
@ -834,6 +834,10 @@ static struct file *__fget_files(struct files_struct *files, unsigned int fd,
|
||||
file = NULL;
|
||||
else if (!get_file_rcu_many(file, refs))
|
||||
goto loop;
|
||||
else if (__fcheck_files(files, fd) != file) {
|
||||
fput_many(file, refs);
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -940,7 +940,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
|
||||
else if (height == ip->i_height)
|
||||
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
|
||||
else
|
||||
iomap->length = size - pos;
|
||||
iomap->length = size - iomap->offset;
|
||||
} else if (flags & IOMAP_WRITE) {
|
||||
u64 alloc_size;
|
||||
|
||||
|
@ -1438,13 +1438,6 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
gfs2_ordered_del_inode(ip);
|
||||
clear_inode(inode);
|
||||
gfs2_dir_hash_inval(ip);
|
||||
if (ip->i_gl) {
|
||||
glock_clear_object(ip->i_gl, ip);
|
||||
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
|
||||
gfs2_glock_add_to_lru(ip->i_gl);
|
||||
gfs2_glock_put_eventually(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
}
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
|
||||
|
||||
@ -1457,6 +1450,13 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||
gfs2_glock_put_eventually(gl);
|
||||
}
|
||||
if (ip->i_gl) {
|
||||
glock_clear_object(ip->i_gl, ip);
|
||||
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
|
||||
gfs2_glock_add_to_lru(ip->i_gl);
|
||||
gfs2_glock_put_eventually(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct inode *gfs2_alloc_inode(struct super_block *sb)
|
||||
|
@ -362,8 +362,9 @@ static ssize_t _nfs42_proc_copy(struct file *src,
|
||||
goto out;
|
||||
}
|
||||
|
||||
truncate_pagecache_range(dst_inode, pos_dst,
|
||||
pos_dst + res->write_res.count);
|
||||
WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
|
||||
pos_dst >> PAGE_SHIFT,
|
||||
(pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
|
||||
spin_lock(&dst_inode->i_lock);
|
||||
NFS_I(dst_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE |
|
||||
NFS_INO_REVAL_FORCED | NFS_INO_INVALID_SIZE |
|
||||
|
@ -410,45 +410,48 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags)
|
||||
/*
|
||||
* Calling iter_file_splice_write() directly from overlay's f_op may deadlock
|
||||
* due to lock order inversion between pipe->mutex in iter_file_splice_write()
|
||||
* and file_start_write(real.file) in ovl_write_iter().
|
||||
*
|
||||
* So do everything ovl_write_iter() does and call iter_file_splice_write() on
|
||||
* the real file.
|
||||
*/
|
||||
static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
|
||||
loff_t *ppos, size_t len, unsigned int flags)
|
||||
{
|
||||
ssize_t ret;
|
||||
struct fd real;
|
||||
const struct cred *old_cred;
|
||||
struct inode *inode = file_inode(out);
|
||||
struct inode *realinode = ovl_inode_real(inode);
|
||||
ssize_t ret;
|
||||
|
||||
ret = ovl_real_fdget(in, &real);
|
||||
inode_lock(inode);
|
||||
/* Update mode */
|
||||
ovl_copyattr(realinode, inode);
|
||||
ret = file_remove_privs(out);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(in)->i_sb);
|
||||
ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
|
||||
ovl_revert_creds(file_inode(in)->i_sb, old_cred);
|
||||
|
||||
ovl_file_accessed(in);
|
||||
fdput(real);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
|
||||
loff_t *ppos, size_t len, unsigned int flags)
|
||||
{
|
||||
struct fd real;
|
||||
const struct cred *old_cred;
|
||||
ssize_t ret;
|
||||
goto out_unlock;
|
||||
|
||||
ret = ovl_real_fdget(out, &real);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_unlock;
|
||||
|
||||
old_cred = ovl_override_creds(inode->i_sb);
|
||||
file_start_write(real.file);
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(out)->i_sb);
|
||||
ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
|
||||
ovl_revert_creds(file_inode(out)->i_sb, old_cred);
|
||||
|
||||
ovl_file_accessed(out);
|
||||
file_end_write(real.file);
|
||||
/* Update size */
|
||||
ovl_copyattr(realinode, inode);
|
||||
ovl_revert_creds(inode->i_sb, old_cred);
|
||||
fdput(real);
|
||||
|
||||
out_unlock:
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -760,7 +763,7 @@ const struct file_operations ovl_file_operations = {
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ovl_compat_ioctl,
|
||||
#endif
|
||||
.splice_read = ovl_splice_read,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = ovl_splice_write,
|
||||
|
||||
.copy_file_range = ovl_copy_file_range,
|
||||
|
@ -953,6 +953,15 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int acpi_register_wakeup_handler(int wake_irq,
|
||||
bool (*wakeup)(void *context), void *context)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static inline void acpi_unregister_wakeup_handler(
|
||||
bool (*wakeup)(void *context), void *context) { }
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
|
@ -155,6 +155,8 @@ struct kretprobe {
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
#define KRETPROBE_MAX_DATA_SIZE 4096
|
||||
|
||||
struct kretprobe_instance {
|
||||
union {
|
||||
struct hlist_node hlist;
|
||||
|
@ -4259,7 +4259,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
|
||||
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
||||
{
|
||||
spin_lock(&txq->_xmit_lock);
|
||||
txq->xmit_lock_owner = cpu;
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, cpu);
|
||||
}
|
||||
|
||||
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
|
||||
@ -4276,26 +4277,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
|
||||
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
||||
{
|
||||
spin_lock_bh(&txq->_xmit_lock);
|
||||
txq->xmit_lock_owner = smp_processor_id();
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
|
||||
}
|
||||
|
||||
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
|
||||
{
|
||||
bool ok = spin_trylock(&txq->_xmit_lock);
|
||||
if (likely(ok))
|
||||
txq->xmit_lock_owner = smp_processor_id();
|
||||
|
||||
if (likely(ok)) {
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
||||
{
|
||||
txq->xmit_lock_owner = -1;
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, -1);
|
||||
spin_unlock(&txq->_xmit_lock);
|
||||
}
|
||||
|
||||
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
|
||||
{
|
||||
txq->xmit_lock_owner = -1;
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, -1);
|
||||
spin_unlock_bh(&txq->_xmit_lock);
|
||||
}
|
||||
|
||||
|
@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
|
||||
}
|
||||
|
||||
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#endif
|
||||
|
||||
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
|
||||
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
|
||||
@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
|
||||
static inline u64 siphash(const void *data, size_t len,
|
||||
const siphash_key_t *key)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
|
||||
return __siphash_unaligned(data, len, key);
|
||||
#endif
|
||||
return ___siphash_aligned(data, len, key);
|
||||
}
|
||||
|
||||
@ -96,10 +93,8 @@ typedef struct {
|
||||
|
||||
u32 __hsiphash_aligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key);
|
||||
#endif
|
||||
|
||||
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
|
||||
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
|
||||
@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
|
||||
static inline u32 hsiphash(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
|
||||
return __hsiphash_unaligned(data, len, key);
|
||||
#endif
|
||||
return ___hsiphash_aligned(data, len, key);
|
||||
}
|
||||
|
||||
|
@ -79,6 +79,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
|
||||
dst_cache->reset_ts = jiffies;
|
||||
}
|
||||
|
||||
/**
|
||||
* dst_cache_reset_now - invalidate the cache contents immediately
|
||||
* @dst_cache: the cache
|
||||
*
|
||||
* The caller must be sure there are no concurrent users, as this frees
|
||||
* all dst_cache users immediately, rather than waiting for the next
|
||||
* per-cpu usage like dst_cache_reset does. Most callers should use the
|
||||
* higher speed lazily-freed dst_cache_reset function instead.
|
||||
*/
|
||||
void dst_cache_reset_now(struct dst_cache *dst_cache);
|
||||
|
||||
/**
|
||||
* dst_cache_init - initialize the cache, allocating the required storage
|
||||
* @dst_cache: the cache
|
||||
|
@ -69,7 +69,7 @@ struct fib_rules_ops {
|
||||
int (*action)(struct fib_rule *,
|
||||
struct flowi *, int,
|
||||
struct fib_lookup_arg *);
|
||||
bool (*suppress)(struct fib_rule *,
|
||||
bool (*suppress)(struct fib_rule *, int,
|
||||
struct fib_lookup_arg *);
|
||||
int (*match)(struct fib_rule *,
|
||||
struct flowi *, int);
|
||||
@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
|
||||
struct fib_lookup_arg *arg));
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
|
||||
int flags,
|
||||
struct fib_lookup_arg *arg));
|
||||
INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
|
||||
int flags,
|
||||
struct fib_lookup_arg *arg));
|
||||
#endif
|
||||
|
@ -437,7 +437,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
static inline int fib_num_tclassid_users(struct net *net)
|
||||
{
|
||||
return net->ipv4.fib_num_tclassid_users;
|
||||
return atomic_read(&net->ipv4.fib_num_tclassid_users);
|
||||
}
|
||||
#else
|
||||
static inline int fib_num_tclassid_users(struct net *net)
|
||||
|
@ -62,7 +62,7 @@ struct netns_ipv4 {
|
||||
#endif
|
||||
bool fib_has_custom_local_routes;
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
int fib_num_tclassid_users;
|
||||
atomic_t fib_num_tclassid_users;
|
||||
#endif
|
||||
struct hlist_head *fib_table_hash;
|
||||
bool fib_offload_disabled;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user