This is the 5.10.46 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmDTLFUACgkQONu9yGCS aT5eThAApQAh1A++P729NJOTeoewU5YH0/1c+ZVN4nfxxEOApeBpfA4tTDvfHJeI MYx10AI1UiLPHfLtHI5exvG00/Ll4lb0fs2bpVL2b/SQKCm2G3kZf7xOdJOBtoy4 DEaTORhmZ001weapZN+G4oz+FEnNZEyR/rThqKTA0G/PS1MxNl4ZBhY9BrySpH1V Cq7OFX18IbTh3/XXmcPotZa2sXE6Z+jjWQb5GLZ+ZjicbzgLiWWcnrm8bzLahVC4 N7TToeGv9zOLKgrE+HVR52UoFB1+2vRUEaRVOiFbDViLjoF5KWw5rAzioTCvfXW+ g/ldoAuDQBNGUrYfVUrSNwj5JuWCI2Cltt//9f/xGfPPn0HNjAxSM7ExpnMNVhVK 1gjTco+0kWzv2BGjgpNAe7+aLka5sQkLEOYlSExI6VVuF5CCcIywWjWZ6zHG0CF1 7kW8CfINV4BFP+IYw5Gnt5K3hUTulDt+alX9WgsdPxpsZ9gbIscO1/awnRrAyDyO 2EeCbZ3WWSuvFL6qAjJERiDbhDPRaZV0cwGPxzLZ7NN8ZPXLxTVv7Nc6QoiNXYkk E+LYcMua9dxFXjoHA0imKxlxqJD64mh3oUkdpTGOwIxrE5bavnKGrO2B3Nl7zWVn u8mazeKHWpJ+t+dDZ47CjrNTul0SOvryKmog//DCkvAIYSjRzVc= =WRWw -----END PGP SIGNATURE----- Merge 5.10.46 into android12-5.10-lts Changes in 5.10.46 dmaengine: idxd: add missing dsa driver unregister dmaengine: fsl-dpaa2-qdma: Fix error return code in two functions dmaengine: xilinx: dpdma: initialize registers before request_irq dmaengine: ALTERA_MSGDMA depends on HAS_IOMEM dmaengine: QCOM_HIDMA_MGMT depends on HAS_IOMEM dmaengine: SF_PDMA depends on HAS_IOMEM dmaengine: stedma40: add missing iounmap() on error in d40_probe() afs: Fix an IS_ERR() vs NULL check mm/memory-failure: make sure wait for page writeback in memory_failure kvm: LAPIC: Restore guard to prevent illegal APIC register access fanotify: fix copy_event_to_user() fid error clean up batman-adv: Avoid WARN_ON timing related checks mac80211: fix skb length check in ieee80211_scan_rx() mlxsw: reg: Spectrum-3: Enforce lowest max-shaper burst size of 11 mlxsw: core: Set thermal zone polling delay argument to real value at init libbpf: Fixes incorrect rx_ring_setup_done net: ipv4: fix memory leak in netlbl_cipsov4_add_std vrf: fix maximum MTU net: rds: fix memory leak in rds_recvmsg net: dsa: felix: re-enable TX flow control in ocelot_port_flush() net: lantiq: disable interrupt before sheduling NAPI netfilter: nft_fib_ipv6: skip ipv6 packets from any to link-local ice: add ndo_bpf callback for safe mode netdev ops ice: parameterize functions responsible for Tx ring management udp: fix race between close() and udp_abort() rtnetlink: Fix regression in bridge VLAN configuration net/sched: act_ct: handle DNAT tuple collision net/mlx5e: Remove dependency in IPsec initialization flows net/mlx5e: Fix page reclaim for dead peer hairpin net/mlx5: Consider RoCE cap before init RDMA resources net/mlx5: DR, Allow SW steering for sw_owner_v2 devices net/mlx5: DR, Don't use SW steering when RoCE is not supported net/mlx5e: Block offload of outer header csum for UDP tunnels netfilter: synproxy: Fix out of bounds when parsing TCP options mptcp: Fix out of bounds when parsing TCP options sch_cake: Fix out of bounds when parsing TCP options and header mptcp: try harder to borrow memory from subflow under pressure mptcp: do not warn on bad input from the network selftests: mptcp: enable syncookie only in absence of reorders alx: Fix an error handling path in 'alx_probe()' cxgb4: fix endianness when flashing boot image cxgb4: fix sleep in atomic when flashing PHY firmware cxgb4: halt chip before flashing PHY firmware image net: stmmac: dwmac1000: Fix extended MAC address registers definition net: make get_net_ns return error if NET_NS is disabled net: qualcomm: rmnet: Update rmnet device MTU based on real device net: qualcomm: rmnet: don't over-count statistics ethtool: strset: fix message length calculation qlcnic: Fix an error handling path in 'qlcnic_probe()' netxen_nic: Fix an error handling path in 'netxen_nic_probe()' cxgb4: fix wrong ethtool n-tuple rule lookup ipv4: Fix device used for dst_alloc with local routes net: qrtr: fix OOB Read in qrtr_endpoint_post bpf: Fix leakage under speculation on mispredicted branches ptp: improve max_adj check against unreasonable values net: cdc_ncm: switch to eth%d interface naming lantiq: net: fix duplicated skb in rx descriptor ring net: usb: fix possible use-after-free in smsc75xx_bind net: fec_ptp: fix issue caused by refactor the fec_devtype net: ipv4: fix memory leak in ip_mc_add1_src net/af_unix: fix a data-race in unix_dgram_sendmsg / unix_release_sock net/mlx5: E-Switch, Read PF mac address net/mlx5: E-Switch, Allow setting GUID for host PF vport net/mlx5: Reset mkey index on creation be2net: Fix an error handling path in 'be_probe()' net: hamradio: fix memory leak in mkiss_close net: cdc_eem: fix tx fixup skb leak cxgb4: fix wrong shift. bnxt_en: Rediscover PHY capabilities after firmware reset bnxt_en: Fix TQM fastpath ring backing store computation bnxt_en: Call bnxt_ethtool_free() in bnxt_init_one() error path icmp: don't send out ICMP messages with a source address of 0.0.0.0 net: ethernet: fix potential use-after-free in ec_bhf_remove regulator: cros-ec: Fix error code in dev_err message regulator: bd70528: Fix off-by-one for buck123 .n_voltages setting platform/x86: thinkpad_acpi: Add X1 Carbon Gen 9 second fan support ASoC: rt5659: Fix the lost powers for the HDA header phy: phy-mtk-tphy: Fix some resource leaks in mtk_phy_init() ASoC: fsl-asoc-card: Set .owner attribute when registering card. regulator: rtmv20: Fix to make regcache value first reading back from HW spi: spi-zynq-qspi: Fix some wrong goto jumps & missing error code sched/pelt: Ensure that *_sum is always synced with *_avg ASoC: tas2562: Fix TDM_CFG0_SAMPRATE values spi: stm32-qspi: Always wait BUSY bit to be cleared in stm32_qspi_wait_cmd() regulator: rt4801: Fix NULL pointer dereference if priv->enable_gpios is NULL ASoC: rt5682: Fix the fast discharge for headset unplugging in soundwire mode pinctrl: ralink: rt2880: avoid to error in calls is pin is already enabled drm/sun4i: dw-hdmi: Make HDMI PHY into a platform device ASoC: qcom: lpass-cpu: Fix pop noise during audio capture begin radeon: use memcpy_to/fromio for UVD fw upload hwmon: (scpi-hwmon) shows the negative temperature properly mm: relocate 'write_protect_seq' in struct mm_struct irqchip/gic-v3: Workaround inconsistent PMR setting on NMI entry bpf: Inherit expanded/patched seen count from old aux data bpf: Do not mark insn as seen under speculative path verification can: bcm: fix infoleak in struct bcm_msg_head can: bcm/raw/isotp: use per module netdevice notifier can: j1939: fix Use-after-Free, hold skb ref while in use can: mcba_usb: fix memory leak in mcba_usb usb: core: hub: Disable autosuspend for Cypress CY7C65632 usb: chipidea: imx: Fix Battery Charger 1.2 CDP detection tracing: Do not stop recording cmdlines when tracing is off tracing: Do not stop recording comms if the trace file is being read tracing: Do no increment trace_clock_global() by one PCI: Mark TI C667X to avoid bus reset PCI: Mark some NVIDIA GPUs to avoid bus reset PCI: aardvark: Fix kernel panic during PIO transfer PCI: Add ACS quirk for Broadcom BCM57414 NIC PCI: Work around Huawei Intelligent NIC VF FLR erratum KVM: x86: Immediately reset the MMU context when the SMM flag is cleared KVM: x86/mmu: Calculate and check "full" mmu_role for nested MMU KVM: X86: Fix x86_emulator slab cache leak s390/mcck: fix calculation of SIE critical section size s390/ap: Fix hanging ioctl caused by wrong msg counter ARCv2: save ABI registers across signal handling x86/mm: Avoid truncating memblocks for SGX memory x86/process: Check PF_KTHREAD and not current->mm for kernel threads x86/ioremap: Map EFI-reserved memory as encrypted for SEV x86/pkru: Write hardware init value to PKRU when xstate is init x86/fpu: Prevent state corruption in __fpu__restore_sig() x86/fpu: Invalidate FPU state after a failed XRSTOR from a user buffer x86/fpu: Reset state for all signal restore failures crash_core, vmcoreinfo: append 'SECTION_SIZE_BITS' to vmcoreinfo dmaengine: pl330: fix wrong usage of spinlock flags in dma_cyclc mac80211: Fix NULL ptr deref for injected rate info cfg80211: make certificate generation more robust cfg80211: avoid double free of PMSR request drm/amdgpu/gfx10: enlarge CP_MEC_DOORBELL_RANGE_UPPER to cover full doorbell. drm/amdgpu/gfx9: fix the doorbell missing when in CGPG issue. net: ll_temac: Make sure to free skb when it is completely used net: ll_temac: Fix TX BD buffer overwrite net: bridge: fix vlan tunnel dst null pointer dereference net: bridge: fix vlan tunnel dst refcnt when egressing mm/swap: fix pte_same_as_swp() not removing uffd-wp bit when compare mm/slub: clarify verification reporting mm/slub: fix redzoning for small allocations mm/slub: actually fix freelist pointer vs redzoning mm/slub.c: include swab.h net: stmmac: disable clocks in stmmac_remove_config_dt() net: fec_ptp: add clock rate zero check tools headers UAPI: Sync linux/in.h copy with the kernel sources perf beauty: Update copy of linux/socket.h with the kernel sources usb: dwc3: debugfs: Add and remove endpoint dirs dynamically usb: dwc3: core: fix kernel panic when do reboot Linux 5.10.46 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I99f37c9f257f90ccdb091306f3d4cfb7c32e3880
This commit is contained in:
commit
948d38f94d
@ -181,7 +181,7 @@ SLUB Debug output
|
||||
Here is a sample of slub debug output::
|
||||
|
||||
====================================================================
|
||||
BUG kmalloc-8: Redzone overwritten
|
||||
BUG kmalloc-8: Right Redzone overwritten
|
||||
--------------------------------------------------------------------
|
||||
|
||||
INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
|
||||
@ -189,10 +189,10 @@ Here is a sample of slub debug output::
|
||||
INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
|
||||
INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
|
||||
|
||||
Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
|
||||
Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
|
||||
Redzone 0xc90f6d28: 00 cc cc cc .
|
||||
Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
|
||||
Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
|
||||
Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
|
||||
Redzone (0xc90f6d28): 00 cc cc cc .
|
||||
Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
|
||||
|
||||
[<c010523d>] dump_trace+0x63/0x1eb
|
||||
[<c01053df>] show_trace_log_lvl+0x1a/0x2f
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 45
|
||||
SUBLEVEL = 46
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
struct sigcontext {
|
||||
struct user_regs_struct regs;
|
||||
struct user_regs_arcv2 v2abi;
|
||||
};
|
||||
|
||||
#endif /* _ASM_ARC_SIGCONTEXT_H */
|
||||
|
@ -61,6 +61,41 @@ struct rt_sigframe {
|
||||
unsigned int sigret_magic;
|
||||
};
|
||||
|
||||
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
struct user_regs_arcv2 v2abi;
|
||||
|
||||
v2abi.r30 = regs->r30;
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
v2abi.r58 = regs->r58;
|
||||
v2abi.r59 = regs->r59;
|
||||
#else
|
||||
v2abi.r58 = v2abi.r59 = 0;
|
||||
#endif
|
||||
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
struct user_regs_arcv2 v2abi;
|
||||
|
||||
err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
|
||||
|
||||
regs->r30 = v2abi.r30;
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
regs->r58 = v2abi.r58;
|
||||
regs->r59 = v2abi.r59;
|
||||
#endif
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
||||
sigset_t *set)
|
||||
@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
||||
|
||||
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
|
||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||
|
||||
if (is_isa_arcv2())
|
||||
err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
|
||||
|
||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
|
||||
|
||||
return err ? -EFAULT : 0;
|
||||
@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
||||
err |= __copy_from_user(&uregs.scratch,
|
||||
&(sf->uc.uc_mcontext.regs.scratch),
|
||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||
|
||||
if (is_isa_arcv2())
|
||||
err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -1284,7 +1284,7 @@ ENDPROC(stack_overflow)
|
||||
je 1f
|
||||
larl %r13,.Lsie_entry
|
||||
slgr %r9,%r13
|
||||
larl %r13,.Lsie_skip
|
||||
lghi %r13,.Lsie_skip - .Lsie_entry
|
||||
clgr %r9,%r13
|
||||
jh 1f
|
||||
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
|
||||
|
@ -578,10 +578,17 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
|
||||
* PKRU state is switched eagerly because it needs to be valid before we
|
||||
* return to userland e.g. for a copy_to_user() operation.
|
||||
*/
|
||||
if (current->mm) {
|
||||
if (!(current->flags & PF_KTHREAD)) {
|
||||
/*
|
||||
* If the PKRU bit in xsave.header.xfeatures is not set,
|
||||
* then the PKRU component was in init state, which means
|
||||
* XRSTOR will set PKRU to 0. If the bit is not set then
|
||||
* get_xsave_addr() will return NULL because the PKRU value
|
||||
* in memory is not valid. This means pkru_val has to be
|
||||
* set to 0 and not to init_pkru_value.
|
||||
*/
|
||||
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
|
||||
if (pk)
|
||||
pkru_val = pk->pkru;
|
||||
pkru_val = pk ? pk->pkru : 0;
|
||||
}
|
||||
__write_pkru(pkru_val);
|
||||
}
|
||||
|
@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!access_ok(buf, size))
|
||||
return -EACCES;
|
||||
if (!access_ok(buf, size)) {
|
||||
ret = -EACCES;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return fpregs_soft_set(current, NULL,
|
||||
0, sizeof(struct user_i387_ia32_struct),
|
||||
NULL, buf) != 0;
|
||||
if (!static_cpu_has(X86_FEATURE_FPU)) {
|
||||
ret = fpregs_soft_set(current, NULL, 0,
|
||||
sizeof(struct user_i387_ia32_struct),
|
||||
NULL, buf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (use_xsave()) {
|
||||
struct _fpx_sw_bytes fx_sw_user;
|
||||
@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
fpregs_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The above did an FPU restore operation, restricted to
|
||||
* the user portion of the registers, and failed, but the
|
||||
* microcode might have modified the FPU registers
|
||||
* nevertheless.
|
||||
*
|
||||
* If the FPU registers do not belong to current, then
|
||||
* invalidate the FPU register state otherwise the task might
|
||||
* preempt current and return to user space with corrupted
|
||||
* FPU registers.
|
||||
*
|
||||
* In case current owns the FPU registers then no further
|
||||
* action is required. The fixup below will handle it
|
||||
* correctly.
|
||||
*/
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
__cpu_invalidate_fpregs_state();
|
||||
|
||||
fpregs_unlock();
|
||||
} else {
|
||||
/*
|
||||
@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
*/
|
||||
ret = __copy_from_user(&env, buf, sizeof(env));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto out;
|
||||
envp = &env;
|
||||
}
|
||||
|
||||
@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
if (use_xsave() && !fx_only) {
|
||||
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
|
||||
|
||||
if (using_compacted_format()) {
|
||||
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
|
||||
} else {
|
||||
ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
|
||||
|
||||
if (!ret && state_size > offsetof(struct xregs_state, header))
|
||||
ret = validate_user_xstate_header(&fpu->state.xsave.header);
|
||||
}
|
||||
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto out;
|
||||
|
||||
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
|
||||
fx_only);
|
||||
@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto err_out;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
|
||||
@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
} else {
|
||||
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto out;
|
||||
|
||||
fpregs_lock();
|
||||
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
|
||||
@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
fpregs_deactivate(fpu);
|
||||
fpregs_unlock();
|
||||
|
||||
err_out:
|
||||
out:
|
||||
if (ret)
|
||||
fpu__clear_user_states(fpu);
|
||||
return ret;
|
||||
|
@ -1405,6 +1405,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
||||
if (!apic_x2apic_mode(apic))
|
||||
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
|
||||
|
||||
if (alignment + len > 4)
|
||||
return 1;
|
||||
|
||||
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
|
||||
return 1;
|
||||
|
||||
|
@ -4705,9 +4705,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
|
||||
context->inject_page_fault = kvm_inject_page_fault;
|
||||
}
|
||||
|
||||
static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
|
||||
|
||||
/*
|
||||
* Nested MMUs are used only for walking L2's gva->gpa, they never have
|
||||
* shadow pages of their own and so "direct" has no meaning. Set it
|
||||
* to "true" to try to detect bogus usage of the nested MMU.
|
||||
*/
|
||||
role.base.direct = true;
|
||||
|
||||
if (!is_paging(vcpu))
|
||||
role.base.level = 0;
|
||||
else if (is_long_mode(vcpu))
|
||||
role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
|
||||
PT64_ROOT_4LEVEL;
|
||||
else if (is_pae(vcpu))
|
||||
role.base.level = PT32E_ROOT_LEVEL;
|
||||
else
|
||||
role.base.level = PT32_ROOT_LEVEL;
|
||||
|
||||
return role;
|
||||
}
|
||||
|
||||
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
|
||||
union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
|
||||
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
|
||||
|
||||
if (new_role.as_u64 == g_context->mmu_role.as_u64)
|
||||
|
@ -6876,7 +6876,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
|
||||
{
|
||||
emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
|
||||
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
||||
|
||||
vcpu->arch.hflags = emul_flags;
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
}
|
||||
|
||||
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
|
||||
@ -8018,6 +8021,7 @@ void kvm_arch_exit(void)
|
||||
kvm_x86_ops.hardware_enable = NULL;
|
||||
kvm_mmu_module_exit();
|
||||
free_percpu(user_return_msrs);
|
||||
kmem_cache_destroy(x86_emulator_cache);
|
||||
kmem_cache_destroy(x86_fpu_cache);
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
|
||||
if (!IS_ENABLED(CONFIG_EFI))
|
||||
return;
|
||||
|
||||
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
|
||||
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
|
||||
(efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
|
||||
efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
|
||||
desc->flags |= IORES_MAP_ENCRYPTED;
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
||||
|
||||
/* make sure all non-reserved blocks are inside the limits */
|
||||
bi->start = max(bi->start, low);
|
||||
bi->end = min(bi->end, high);
|
||||
|
||||
/* preserve info for non-RAM areas above 'max_pfn': */
|
||||
if (bi->end > high) {
|
||||
numa_add_memblk_to(bi->nid, high, bi->end,
|
||||
&numa_reserved_meminfo);
|
||||
bi->end = high;
|
||||
}
|
||||
|
||||
/* and there's no empty block */
|
||||
if (bi->start >= bi->end)
|
||||
|
@ -59,6 +59,7 @@ config DMA_OF
|
||||
#devices
|
||||
config ALTERA_MSGDMA
|
||||
tristate "Altera / Intel mSGDMA Engine"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for Altera / Intel mSGDMA controller.
|
||||
|
@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
|
||||
}
|
||||
|
||||
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
|
||||
err = -EINVAL;
|
||||
dev_err(dev, "DPDMAI major version mismatch\n"
|
||||
"Found %u.%u, supported version is %u.%u\n",
|
||||
priv->dpdmai_attr.version.major,
|
||||
@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
|
||||
}
|
||||
|
||||
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
|
||||
err = -EINVAL;
|
||||
dev_err(dev, "DPDMAI minor version mismatch\n"
|
||||
"Found %u.%u, supported version is %u.%u\n",
|
||||
priv->dpdmai_attr.version.major,
|
||||
@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
|
||||
ppriv->store =
|
||||
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
|
||||
if (!ppriv->store) {
|
||||
err = -ENOMEM;
|
||||
dev_err(dev, "dpaa2_io_store_create() failed\n");
|
||||
goto err_store;
|
||||
}
|
||||
|
@ -518,6 +518,7 @@ module_init(idxd_init_module);
|
||||
|
||||
static void __exit idxd_exit_module(void)
|
||||
{
|
||||
idxd_unregister_driver();
|
||||
pci_unregister_driver(&idxd_pci_driver);
|
||||
idxd_cdev_remove();
|
||||
idxd_unregister_bus_type();
|
||||
|
@ -2696,13 +2696,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||
for (i = 0; i < len / period_len; i++) {
|
||||
desc = pl330_get_desc(pch);
|
||||
if (!desc) {
|
||||
unsigned long iflags;
|
||||
|
||||
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
|
||||
__func__, __LINE__);
|
||||
|
||||
if (!first)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&pl330->pool_lock, flags);
|
||||
spin_lock_irqsave(&pl330->pool_lock, iflags);
|
||||
|
||||
while (!list_empty(&first->node)) {
|
||||
desc = list_entry(first->node.next,
|
||||
@ -2712,7 +2714,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||
|
||||
list_move_tail(&first->node, &pl330->desc_pool);
|
||||
|
||||
spin_unlock_irqrestore(&pl330->pool_lock, flags);
|
||||
spin_unlock_irqrestore(&pl330->pool_lock, iflags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ config QCOM_BAM_DMA
|
||||
|
||||
config QCOM_HIDMA_MGMT
|
||||
tristate "Qualcomm Technologies HIDMA Management support"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for the Qualcomm Technologies HIDMA Management.
|
||||
|
@ -1,5 +1,6 @@
|
||||
config SF_PDMA
|
||||
tristate "Sifive PDMA controller driver"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -3676,6 +3676,9 @@ static int __init d40_probe(struct platform_device *pdev)
|
||||
|
||||
kfree(base->lcla_pool.base_unaligned);
|
||||
|
||||
if (base->lcpa_base)
|
||||
iounmap(base->lcpa_base);
|
||||
|
||||
if (base->phy_lcpa)
|
||||
release_mem_region(base->phy_lcpa,
|
||||
base->lcpa_size);
|
||||
|
@ -1459,7 +1459,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
|
||||
*/
|
||||
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
|
||||
{
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
|
||||
}
|
||||
|
||||
@ -1596,6 +1596,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
|
||||
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
|
||||
}
|
||||
|
||||
static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
|
||||
{
|
||||
unsigned int i;
|
||||
void __iomem *reg;
|
||||
|
||||
/* Disable all interrupts */
|
||||
xilinx_dpdma_disable_irq(xdev);
|
||||
|
||||
/* Stop all channels */
|
||||
for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
|
||||
reg = xdev->reg + XILINX_DPDMA_CH_BASE
|
||||
+ XILINX_DPDMA_CH_OFFSET * i;
|
||||
dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
|
||||
}
|
||||
|
||||
/* Clear the interrupt status registers */
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
|
||||
}
|
||||
|
||||
static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct xilinx_dpdma_device *xdev;
|
||||
@ -1622,6 +1642,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(xdev->reg))
|
||||
return PTR_ERR(xdev->reg);
|
||||
|
||||
dpdma_hw_init(xdev);
|
||||
|
||||
xdev->irq = platform_get_irq(pdev, 0);
|
||||
if (xdev->irq < 0) {
|
||||
dev_err(xdev->dev, "failed to get platform irq\n");
|
||||
|
@ -6590,8 +6590,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
|
||||
if (ring->use_doorbell) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
(adev->doorbell_index.kiq * 2) << 2);
|
||||
/* If GC has entered CGPG, ringing doorbell > first page doesn't
|
||||
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
|
||||
* this issue.
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
(adev->doorbell_index.userqueue_end * 2) << 2);
|
||||
(adev->doorbell.size - 4));
|
||||
}
|
||||
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
|
||||
|
@ -3619,8 +3619,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
|
||||
if (ring->use_doorbell) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
(adev->doorbell_index.kiq * 2) << 2);
|
||||
/* If GC has entered CGPG, ringing doorbell > first page doesn't
|
||||
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
|
||||
* this issue.
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
(adev->doorbell_index.userqueue_end * 2) << 2);
|
||||
(adev->doorbell.size - 4));
|
||||
}
|
||||
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
|
||||
|
@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
|
||||
if (rdev->uvd.vcpu_bo == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
|
||||
memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
|
||||
|
||||
size = radeon_bo_size(rdev->uvd.vcpu_bo);
|
||||
size -= rdev->uvd_fw->size;
|
||||
@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
|
||||
ptr = rdev->uvd.cpu_addr;
|
||||
ptr += rdev->uvd_fw->size;
|
||||
|
||||
memset(ptr, 0, size);
|
||||
memset_io((void __iomem *)ptr, 0, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
|
||||
goto err_disable_clk_tmds;
|
||||
}
|
||||
|
||||
ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
|
||||
ret = sun8i_hdmi_phy_get(hdmi, phy_node);
|
||||
of_node_put(phy_node);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't get the HDMI PHY\n");
|
||||
@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
|
||||
|
||||
cleanup_encoder:
|
||||
drm_encoder_cleanup(encoder);
|
||||
sun8i_hdmi_phy_remove(hdmi);
|
||||
err_disable_clk_tmds:
|
||||
clk_disable_unprepare(hdmi->clk_tmds);
|
||||
err_assert_ctrl_reset:
|
||||
@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
|
||||
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
dw_hdmi_unbind(hdmi->hdmi);
|
||||
sun8i_hdmi_phy_remove(hdmi);
|
||||
clk_disable_unprepare(hdmi->clk_tmds);
|
||||
reset_control_assert(hdmi->rst_ctrl);
|
||||
gpiod_set_value(hdmi->ddc_en, 0);
|
||||
@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
|
||||
.of_match_table = sun8i_dw_hdmi_dt_ids,
|
||||
},
|
||||
};
|
||||
module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
|
||||
|
||||
static int __init sun8i_dw_hdmi_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = platform_driver_register(&sun8i_hdmi_phy_driver);
|
||||
if (ret) {
|
||||
platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit sun8i_dw_hdmi_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
|
||||
platform_driver_unregister(&sun8i_hdmi_phy_driver);
|
||||
}
|
||||
|
||||
module_init(sun8i_dw_hdmi_init);
|
||||
module_exit(sun8i_dw_hdmi_exit);
|
||||
|
||||
MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
|
||||
MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
|
||||
|
@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
|
||||
struct gpio_desc *ddc_en;
|
||||
};
|
||||
|
||||
extern struct platform_driver sun8i_hdmi_phy_driver;
|
||||
|
||||
static inline struct sun8i_dw_hdmi *
|
||||
encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct sun8i_dw_hdmi, encoder);
|
||||
}
|
||||
|
||||
int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
|
||||
void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
|
||||
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
|
||||
|
||||
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
|
||||
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include "sun8i_dw_hdmi.h"
|
||||
|
||||
@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
||||
int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
||||
{
|
||||
struct platform_device *pdev = of_find_device_by_node(node);
|
||||
struct sun8i_hdmi_phy *phy;
|
||||
|
||||
if (!pdev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
phy = platform_get_drvdata(pdev);
|
||||
if (!phy)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
hdmi->phy = phy;
|
||||
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct device *dev = hdmi->dev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct sun8i_hdmi_phy *phy;
|
||||
struct resource res;
|
||||
void __iomem *regs;
|
||||
@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
||||
clk_prepare_enable(phy->clk_phy);
|
||||
}
|
||||
|
||||
hdmi->phy = phy;
|
||||
platform_set_drvdata(pdev, phy);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -728,9 +749,9 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
|
||||
static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct sun8i_hdmi_phy *phy = hdmi->phy;
|
||||
struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
|
||||
|
||||
clk_disable_unprepare(phy->clk_mod);
|
||||
clk_disable_unprepare(phy->clk_bus);
|
||||
@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
|
||||
clk_put(phy->clk_pll1);
|
||||
clk_put(phy->clk_mod);
|
||||
clk_put(phy->clk_bus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct platform_driver sun8i_hdmi_phy_driver = {
|
||||
.probe = sun8i_hdmi_phy_probe,
|
||||
.remove = sun8i_hdmi_phy_remove,
|
||||
.driver = {
|
||||
.name = "sun8i-hdmi-phy",
|
||||
.of_match_table = sun8i_hdmi_phy_of_table,
|
||||
},
|
||||
};
|
||||
|
@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
||||
scpi_scale_reading(&value, sensor);
|
||||
|
||||
/*
|
||||
* Temperature sensor values are treated as signed values based on
|
||||
* observation even though that is not explicitly specified, and
|
||||
* because an unsigned u64 temperature does not really make practical
|
||||
* sense especially when the temperature is below zero degrees Celsius.
|
||||
*/
|
||||
if (sensor->info.class == TEMPERATURE)
|
||||
return sprintf(buf, "%lld\n", (s64)value);
|
||||
|
||||
return sprintf(buf, "%llu\n", value);
|
||||
}
|
||||
|
||||
|
@ -648,11 +648,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
|
||||
nmi_exit();
|
||||
}
|
||||
|
||||
static u32 do_read_iar(struct pt_regs *regs)
|
||||
{
|
||||
u32 iar;
|
||||
|
||||
if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
|
||||
u64 pmr;
|
||||
|
||||
/*
|
||||
* We were in a context with IRQs disabled. However, the
|
||||
* entry code has set PMR to a value that allows any
|
||||
* interrupt to be acknowledged, and not just NMIs. This can
|
||||
* lead to surprising effects if the NMI has been retired in
|
||||
* the meantime, and that there is an IRQ pending. The IRQ
|
||||
* would then be taken in NMI context, something that nobody
|
||||
* wants to debug twice.
|
||||
*
|
||||
* Until we sort this, drop PMR again to a level that will
|
||||
* actually only allow NMIs before reading IAR, and then
|
||||
* restore it to what it was.
|
||||
*/
|
||||
pmr = gic_read_pmr();
|
||||
gic_pmr_mask_irqs();
|
||||
isb();
|
||||
|
||||
iar = gic_read_iar();
|
||||
|
||||
gic_write_pmr(pmr);
|
||||
} else {
|
||||
iar = gic_read_iar();
|
||||
}
|
||||
|
||||
return iar;
|
||||
}
|
||||
|
||||
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqnr;
|
||||
|
||||
irqnr = gic_read_iar();
|
||||
irqnr = do_read_iar(regs);
|
||||
|
||||
/* Check for special IDs first */
|
||||
if ((irqnr >= 1020 && irqnr <= 1023))
|
||||
|
@ -82,6 +82,8 @@ struct mcba_priv {
|
||||
bool can_ka_first_pass;
|
||||
bool can_speed_check;
|
||||
atomic_t free_ctx_cnt;
|
||||
void *rxbuf[MCBA_MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
|
||||
};
|
||||
|
||||
/* CAN frame */
|
||||
@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
GFP_KERNEL, &urb->transfer_dma);
|
||||
GFP_KERNEL, &buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
if (err) {
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
buf, urb->transfer_dma);
|
||||
buf, buf_dma);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
priv->rxbuf[i] = buf;
|
||||
priv->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
|
||||
|
||||
static void mcba_urb_unlink(struct mcba_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
usb_kill_anchored_urbs(&priv->rx_submitted);
|
||||
|
||||
for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
priv->rxbuf[i], priv->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
}
|
||||
|
||||
|
@ -1849,6 +1849,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
free_netdev(netdev);
|
||||
out_pci_release:
|
||||
pci_release_mem_regions(pdev);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
out_pci_disable:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
|
@ -7184,7 +7184,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
|
||||
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
|
||||
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
|
||||
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
|
||||
entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
|
||||
entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
|
||||
entries = roundup(entries, ctx->tqm_entries_multiple);
|
||||
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
|
||||
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
|
||||
@ -11353,6 +11353,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
|
||||
bnxt_hwrm_coal_params_qcaps(bp);
|
||||
}
|
||||
|
||||
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
|
||||
|
||||
static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
{
|
||||
int rc;
|
||||
@ -11367,6 +11369,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
|
||||
return rc;
|
||||
}
|
||||
rc = bnxt_probe_phy(bp, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -12741,6 +12746,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
bnxt_hwrm_func_drv_unrgtr(bp);
|
||||
bnxt_free_hwrm_short_cmd_req(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_ethtool_free(bp);
|
||||
kfree(bp->fw_health);
|
||||
bp->fw_health = NULL;
|
||||
bnxt_cleanup_pci(bp);
|
||||
|
@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret)
|
||||
dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
|
||||
/* We have to RESET the chip/firmware because we need the
|
||||
* chip in uninitialized state for loading new PHY image.
|
||||
* Otherwise, the running firmware will only store the PHY
|
||||
* image in local RAM which will be lost after next reset.
|
||||
*/
|
||||
ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
|
||||
if (ret < 0) {
|
||||
dev_err(adap->pdev_dev,
|
||||
"Set FW to RESET for flashing PHY FW failed. ret: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
|
||||
if (ret < 0) {
|
||||
dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
|
||||
@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
|
||||
u32 ftid)
|
||||
{
|
||||
struct tid_info *t = &adap->tids;
|
||||
struct filter_entry *f;
|
||||
|
||||
if (ftid < t->nhpftids)
|
||||
f = &adap->tids.hpftid_tab[ftid];
|
||||
else if (ftid < t->nftids)
|
||||
f = &adap->tids.ftid_tab[ftid - t->nhpftids];
|
||||
else
|
||||
f = lookup_tid(&adap->tids, ftid);
|
||||
if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
|
||||
return &t->hpftid_tab[ftid - t->hpftid_base];
|
||||
|
||||
return f;
|
||||
if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
|
||||
return &t->ftid_tab[ftid - t->ftid_base];
|
||||
|
||||
return lookup_tid(t, ftid);
|
||||
}
|
||||
|
||||
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
|
||||
@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
|
||||
filter_id = filter_info->loc_array[cmd->fs.location];
|
||||
f = cxgb4_get_filter_entry(adapter, filter_id);
|
||||
|
||||
if (f->fs.prio)
|
||||
filter_id -= adapter->tids.hpftid_base;
|
||||
else if (!f->fs.hash)
|
||||
filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
|
||||
|
||||
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
|
||||
|
||||
filter_info = &adapter->ethtool_filters->port[pi->port_id];
|
||||
|
||||
if (fs.prio)
|
||||
tid += adapter->tids.hpftid_base;
|
||||
else if (!fs.hash)
|
||||
tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
|
||||
|
||||
filter_info->loc_array[cmd->fs.location] = tid;
|
||||
set_bit(cmd->fs.location, filter_info->bmap);
|
||||
filter_info->in_use++;
|
||||
|
@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
|
||||
WORD_MASK, f->fs.nat_lip[3] |
|
||||
f->fs.nat_lip[2] << 8 |
|
||||
f->fs.nat_lip[1] << 16 |
|
||||
(u64)f->fs.nat_lip[0] << 25, 1);
|
||||
(u64)f->fs.nat_lip[0] << 24, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4428,10 +4428,8 @@ static int adap_init0_phy(struct adapter *adap)
|
||||
|
||||
/* Load PHY Firmware onto adapter.
|
||||
*/
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
|
||||
(u8 *)phyf->data, phyf->size);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret < 0)
|
||||
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
|
||||
-ret);
|
||||
|
@ -3067,16 +3067,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
|
||||
* @addr: the start address to write
|
||||
* @n: length of data to write in bytes
|
||||
* @data: the data to write
|
||||
* @byte_oriented: whether to store data as bytes or as words
|
||||
*
|
||||
* Writes up to a page of data (256 bytes) to the serial flash starting
|
||||
* at the given address. All the data must be written to the same page.
|
||||
* If @byte_oriented is set the write data is stored as byte stream
|
||||
* (i.e. matches what on disk), otherwise in big-endian.
|
||||
*/
|
||||
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
unsigned int n, const u8 *data)
|
||||
unsigned int n, const u8 *data, bool byte_oriented)
|
||||
{
|
||||
int ret;
|
||||
u32 buf[64];
|
||||
unsigned int i, c, left, val, offset = addr & 0xff;
|
||||
u32 buf[64];
|
||||
int ret;
|
||||
|
||||
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
@ -3087,10 +3090,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
|
||||
goto unlock;
|
||||
|
||||
for (left = n; left; left -= c) {
|
||||
for (left = n; left; left -= c, data += c) {
|
||||
c = min(left, 4U);
|
||||
for (val = 0, i = 0; i < c; ++i)
|
||||
val = (val << 8) + *data++;
|
||||
for (val = 0, i = 0; i < c; ++i) {
|
||||
if (byte_oriented)
|
||||
val = (val << 8) + data[i];
|
||||
else
|
||||
val = (val << 8) + data[c - i - 1];
|
||||
}
|
||||
|
||||
ret = sf1_write(adapter, c, c != left, 1, val);
|
||||
if (ret)
|
||||
@ -3103,7 +3110,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
|
||||
|
||||
/* Read the page to verify the write succeeded */
|
||||
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
|
||||
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
|
||||
byte_oriented);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -3699,7 +3707,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
*/
|
||||
memcpy(first_page, fw_data, SF_PAGE_SIZE);
|
||||
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -3707,14 +3715,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
fw_data += SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap,
|
||||
fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
|
||||
ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
|
||||
true);
|
||||
out:
|
||||
if (ret)
|
||||
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
|
||||
@ -3819,9 +3827,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
|
||||
/* Copy the supplied PHY Firmware image to the adapter memory location
|
||||
* allocated by the adapter firmware.
|
||||
*/
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_memory_rw(adap, win, mtype, maddr,
|
||||
phy_fw_size, (__be32 *)phy_fw_data,
|
||||
T4_MEMORY_WRITE);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -10215,7 +10225,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
n = size - i;
|
||||
else
|
||||
n = SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data);
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -10684,13 +10694,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
boot_data += SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
|
||||
false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
|
||||
(const u8 *)header);
|
||||
(const u8 *)header, false);
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
@ -10765,7 +10776,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
for (i = 0; i < size; i += SF_PAGE_SIZE) {
|
||||
n = min_t(u32, size - i, SF_PAGE_SIZE);
|
||||
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data);
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -10777,7 +10788,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
for (i = 0; i < npad; i++) {
|
||||
u8 data = 0;
|
||||
|
||||
ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
|
||||
ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
|
||||
false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
|
||||
struct ec_bhf_priv *priv = netdev_priv(net_dev);
|
||||
|
||||
unregister_netdev(net_dev);
|
||||
free_netdev(net_dev);
|
||||
|
||||
pci_iounmap(dev, priv->dma_io);
|
||||
pci_iounmap(dev, priv->io);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
||||
pci_release_regions(dev);
|
||||
pci_clear_master(dev);
|
||||
pci_disable_device(dev);
|
||||
|
@ -5905,6 +5905,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
|
||||
unmap_bars:
|
||||
be_unmap_pci_bars(adapter);
|
||||
free_netdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
free_netdev(netdev);
|
||||
rel_reg:
|
||||
pci_release_regions(pdev);
|
||||
|
@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
|
||||
{
|
||||
struct fec_enet_private *fep =
|
||||
container_of(cc, struct fec_enet_private, cc);
|
||||
const struct platform_device_id *id_entry =
|
||||
platform_get_device_id(fep->pdev);
|
||||
u32 tempval;
|
||||
|
||||
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
|
||||
tempval |= FEC_T_CTRL_CAPTURE;
|
||||
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
|
||||
|
||||
if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
|
||||
if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
|
||||
udelay(1);
|
||||
|
||||
return readl(fep->hwp + FEC_ATIME);
|
||||
@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
|
||||
fep->ptp_caps.enable = fec_ptp_enable;
|
||||
|
||||
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
|
||||
if (!fep->cycle_speed) {
|
||||
fep->cycle_speed = NSEC_PER_SEC;
|
||||
dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
|
||||
}
|
||||
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
|
||||
|
||||
spin_lock_init(&fep->tmreg_lock);
|
||||
|
@ -1705,12 +1705,13 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
|
||||
* ice_vsi_cfg_txqs - Configure the VSI for Tx
|
||||
* @vsi: the VSI being configured
|
||||
* @rings: Tx ring array to be configured
|
||||
* @count: number of Tx ring array elements
|
||||
*
|
||||
* Return 0 on success and a negative value on error
|
||||
* Configure the Tx VSI for operation.
|
||||
*/
|
||||
static int
|
||||
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
|
||||
{
|
||||
struct ice_aqc_add_tx_qgrp *qg_buf;
|
||||
u16 q_idx = 0;
|
||||
@ -1722,7 +1723,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
|
||||
qg_buf->num_txqs = 1;
|
||||
|
||||
for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
|
||||
for (q_idx = 0; q_idx < count; q_idx++) {
|
||||
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
|
||||
if (err)
|
||||
goto err_cfg_txqs;
|
||||
@ -1742,7 +1743,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
*/
|
||||
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
|
||||
{
|
||||
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
|
||||
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1757,7 +1758,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
|
||||
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1955,17 +1956,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
|
||||
* @rst_src: reset source
|
||||
* @rel_vmvf_num: Relative ID of VF/VM
|
||||
* @rings: Tx ring array to be stopped
|
||||
* @count: number of Tx ring array elements
|
||||
*/
|
||||
static int
|
||||
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num, struct ice_ring **rings)
|
||||
u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
|
||||
{
|
||||
u16 q_idx;
|
||||
|
||||
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
|
||||
return -EINVAL;
|
||||
|
||||
for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
|
||||
for (q_idx = 0; q_idx < count; q_idx++) {
|
||||
struct ice_txq_meta txq_meta = { };
|
||||
int status;
|
||||
|
||||
@ -1993,7 +1995,7 @@ int
|
||||
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num)
|
||||
{
|
||||
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
|
||||
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2002,7 +2004,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
*/
|
||||
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
|
||||
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2539,6 +2539,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
return (ret || xdp_ring_err) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_safe_mode - XDP handler for safe mode
|
||||
* @dev: netdevice
|
||||
* @xdp: XDP command
|
||||
*/
|
||||
static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
|
||||
struct netdev_bpf *xdp)
|
||||
{
|
||||
NL_SET_ERR_MSG_MOD(xdp->extack,
|
||||
"Please provide working DDP firmware package in order to use XDP\n"
|
||||
"Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp - implements XDP handler
|
||||
* @dev: netdevice
|
||||
@ -6786,6 +6800,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
|
||||
.ndo_change_mtu = ice_change_mtu,
|
||||
.ndo_get_stats64 = ice_get_stats64,
|
||||
.ndo_tx_timeout = ice_tx_timeout,
|
||||
.ndo_bpf = ice_xdp_safe_mode,
|
||||
};
|
||||
|
||||
static const struct net_device_ops ice_netdev_ops = {
|
||||
|
@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
|
||||
|
||||
static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
{
|
||||
struct sk_buff *skb = ch->skb[ch->dma.desc];
|
||||
dma_addr_t mapping;
|
||||
int ret = 0;
|
||||
|
||||
@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
|
||||
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
ret = -ENOMEM;
|
||||
goto skip;
|
||||
}
|
||||
@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
|
||||
ch->dma.desc %= LTQ_DESC_NUM;
|
||||
|
||||
if (ret) {
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
net_dev->stats.rx_dropped++;
|
||||
netdev_err(net_dev, "failed to allocate new rx buffer\n");
|
||||
return ret;
|
||||
@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
|
||||
struct xrx200_chan *ch = ptr;
|
||||
|
||||
if (napi_schedule_prep(&ch->napi)) {
|
||||
__napi_schedule(&ch->napi);
|
||||
ltq_dma_disable_irq(&ch->dma);
|
||||
__napi_schedule(&ch->napi);
|
||||
}
|
||||
|
||||
ltq_dma_ack_irq(&ch->dma);
|
||||
|
@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
|
||||
if (!priv->ipsec)
|
||||
return;
|
||||
|
||||
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
|
||||
!MLX5_CAP_ETH(mdev, swp)) {
|
||||
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
|
||||
|
@ -4958,13 +4958,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
}
|
||||
|
||||
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
|
||||
|
@ -5206,7 +5206,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
|
||||
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
|
||||
wait_for_completion(&hpe->res_ready);
|
||||
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
|
||||
hpe->hp->pair->peer_gone = true;
|
||||
mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
|
||||
|
||||
mlx5e_hairpin_put(priv, hpe);
|
||||
}
|
||||
|
@ -1302,6 +1302,12 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
|
||||
vport->info.trusted = true;
|
||||
|
||||
/* External controller host PF has factory programmed MAC.
|
||||
* Read it from the device.
|
||||
*/
|
||||
if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
|
||||
|
||||
esw_vport_change_handle_locked(vport);
|
||||
|
||||
esw->enabled_vports++;
|
||||
|
@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
|
||||
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
|
||||
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
|
||||
mkey->size = MLX5_GET64(mkc, mkc, len);
|
||||
mkey->key |= mlx5_idx_to_mkey(mkey_index);
|
||||
mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
|
||||
mkey->pd = MLX5_GET(mkc, mkc, pd);
|
||||
|
||||
mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
|
||||
|
@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, roce))
|
||||
return;
|
||||
|
||||
err = mlx5_nic_vport_enable_roce(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
||||
|
@ -78,9 +78,9 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
|
||||
caps->uplink_icm_address_tx =
|
||||
MLX5_CAP64_ESW_FLOWTABLE(mdev,
|
||||
sw_steering_uplink_icm_address_tx);
|
||||
caps->sw_owner =
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
|
||||
sw_owner);
|
||||
caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
|
||||
if (!caps->sw_owner_v2)
|
||||
caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -113,10 +113,15 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
|
||||
caps->nic_tx_allow_address =
|
||||
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
|
||||
|
||||
caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
|
||||
caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
|
||||
caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
|
||||
caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
|
||||
|
||||
caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
|
||||
if (!caps->rx_sw_owner_v2)
|
||||
caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
|
||||
if (!caps->tx_sw_owner_v2)
|
||||
caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
|
||||
|
||||
caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
|
||||
|
||||
caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
|
||||
caps->hdr_modify_icm_addr =
|
||||
|
@ -4,6 +4,11 @@
|
||||
#include <linux/mlx5/eswitch.h>
|
||||
#include "dr_types.h"
|
||||
|
||||
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
|
||||
((dmn)->info.caps.dmn_type##_sw_owner || \
|
||||
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
|
||||
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
|
||||
|
||||
static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
/* Per vport cached FW FT for checksum recalculation, this
|
||||
@ -181,6 +186,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
|
||||
return ret;
|
||||
|
||||
dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
|
||||
dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
|
||||
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
|
||||
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
|
||||
|
||||
@ -223,18 +229,13 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
|
||||
mlx5dr_err(dmn, "SW steering is not supported on this device\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ret = dr_domain_query_fdb_caps(mdev, dmn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (dmn->type) {
|
||||
case MLX5DR_DOMAIN_TYPE_NIC_RX:
|
||||
if (!dmn->info.caps.rx_sw_owner)
|
||||
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
|
||||
return -ENOTSUPP;
|
||||
|
||||
dmn->info.supp_sw_steering = true;
|
||||
@ -243,7 +244,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
|
||||
dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
|
||||
break;
|
||||
case MLX5DR_DOMAIN_TYPE_NIC_TX:
|
||||
if (!dmn->info.caps.tx_sw_owner)
|
||||
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
|
||||
return -ENOTSUPP;
|
||||
|
||||
dmn->info.supp_sw_steering = true;
|
||||
@ -255,7 +256,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
|
||||
if (!dmn->info.caps.eswitch_manager)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (!dmn->info.caps.fdb_sw_owner)
|
||||
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
|
||||
|
@ -597,7 +597,8 @@ struct mlx5dr_esw_caps {
|
||||
u64 drop_icm_address_tx;
|
||||
u64 uplink_icm_address_rx;
|
||||
u64 uplink_icm_address_tx;
|
||||
bool sw_owner;
|
||||
u8 sw_owner:1;
|
||||
u8 sw_owner_v2:1;
|
||||
};
|
||||
|
||||
struct mlx5dr_cmd_vport_cap {
|
||||
@ -630,6 +631,9 @@ struct mlx5dr_cmd_caps {
|
||||
bool rx_sw_owner;
|
||||
bool tx_sw_owner;
|
||||
bool fdb_sw_owner;
|
||||
u8 rx_sw_owner_v2:1;
|
||||
u8 tx_sw_owner_v2:1;
|
||||
u8 fdb_sw_owner_v2:1;
|
||||
u32 num_vports;
|
||||
struct mlx5dr_esw_caps esw_caps;
|
||||
struct mlx5dr_cmd_vport_cap *vports_caps;
|
||||
|
@ -124,7 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
|
||||
static inline bool
|
||||
mlx5dr_is_supported(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
|
||||
return MLX5_CAP_GEN(dev, roce) &&
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
|
||||
(MLX5_CAP_GEN(dev, steering_format_version) <=
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX)));
|
||||
}
|
||||
|
||||
#endif /* _MLX5DR_H_ */
|
||||
|
@ -424,6 +424,15 @@ static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
}
|
||||
|
||||
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
|
||||
MLX5_RQC_STATE_RST, 0, 0);
|
||||
|
||||
/* unset peer SQs */
|
||||
if (hp->peer_gone)
|
||||
return;
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
if (!hp->peer_gone)
|
||||
mlx5_hairpin_unpair_peer_sq(hp);
|
||||
}
|
||||
|
||||
struct mlx5_hairpin *
|
||||
@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
|
||||
mlx5_hairpin_destroy_queues(hp);
|
||||
kfree(hp);
|
||||
}
|
||||
|
||||
void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
|
||||
mlx5_hairpin_unpair_peer_sq(hp);
|
||||
|
||||
/* destroy peer SQ */
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
|
||||
|
||||
hp->peer_gone = true;
|
||||
}
|
||||
|
@ -464,8 +464,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
if (!vport)
|
||||
return -EINVAL;
|
||||
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||
return -EACCES;
|
||||
|
||||
|
@ -708,7 +708,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
|
||||
MLXSW_THERMAL_TRIP_MASK,
|
||||
module_tz,
|
||||
&mlxsw_thermal_module_ops,
|
||||
NULL, 0, 0);
|
||||
NULL, 0,
|
||||
module_tz->parent->polling_delay);
|
||||
if (IS_ERR(module_tz->tzdev)) {
|
||||
err = PTR_ERR(module_tz->tzdev);
|
||||
return err;
|
||||
@ -830,7 +831,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
|
||||
MLXSW_THERMAL_TRIP_MASK,
|
||||
gearbox_tz,
|
||||
&mlxsw_thermal_gearbox_ops,
|
||||
NULL, 0, 0);
|
||||
NULL, 0,
|
||||
gearbox_tz->parent->polling_delay);
|
||||
if (IS_ERR(gearbox_tz->tzdev))
|
||||
return PTR_ERR(gearbox_tz->tzdev);
|
||||
|
||||
|
@ -3641,7 +3641,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
|
||||
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
|
||||
|
||||
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
|
||||
enum mlxsw_reg_qeec_hr hr, u8 index,
|
||||
|
@ -355,6 +355,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
|
||||
|
||||
int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
{
|
||||
unsigned int pause_ena;
|
||||
int err, val;
|
||||
|
||||
/* Disable dequeuing from the egress queues */
|
||||
@ -363,6 +364,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
QSYS_PORT_MODE, port);
|
||||
|
||||
/* Disable flow control */
|
||||
ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
|
||||
|
||||
/* Disable priority flow control */
|
||||
@ -398,6 +400,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
/* Clear flushing again. */
|
||||
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
|
||||
|
||||
/* Re-enable flow control */
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_flush);
|
||||
|
@ -1602,6 +1602,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
free_netdev(netdev);
|
||||
|
||||
err_out_free_res:
|
||||
if (NX_IS_REVISION_P3(pdev->revision))
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_out_disable_pdev:
|
||||
|
@ -2692,6 +2692,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
kfree(ahw);
|
||||
|
||||
err_out_free_res:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_out_disable_pdev:
|
||||
|
@ -26,7 +26,7 @@ static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
|
||||
}
|
||||
|
||||
/* Needs rtnl lock */
|
||||
static struct rmnet_port*
|
||||
struct rmnet_port*
|
||||
rmnet_get_port_rtnl(const struct net_device *real_dev)
|
||||
{
|
||||
return rtnl_dereference(real_dev->rx_handler_data);
|
||||
@ -253,7 +253,10 @@ static int rmnet_config_notify_cb(struct notifier_block *nb,
|
||||
netdev_dbg(real_dev, "Kernel unregister\n");
|
||||
rmnet_force_unassociate_device(real_dev);
|
||||
break;
|
||||
|
||||
case NETDEV_CHANGEMTU:
|
||||
if (rmnet_vnd_validate_real_dev_mtu(real_dev))
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -329,9 +332,17 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
|
||||
if (data[IFLA_RMNET_FLAGS]) {
|
||||
struct ifla_rmnet_flags *flags;
|
||||
u32 old_data_format;
|
||||
|
||||
old_data_format = port->data_format;
|
||||
flags = nla_data(data[IFLA_RMNET_FLAGS]);
|
||||
port->data_format = flags->flags & flags->mask;
|
||||
|
||||
if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
|
||||
port->data_format = old_data_format;
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -73,4 +73,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
|
||||
struct netlink_ext_ack *extack);
|
||||
int rmnet_del_bridge(struct net_device *rmnet_dev,
|
||||
struct net_device *slave_dev);
|
||||
struct rmnet_port*
|
||||
rmnet_get_port_rtnl(const struct net_device *real_dev);
|
||||
#endif /* _RMNET_CONFIG_H_ */
|
||||
|
@ -58,9 +58,30 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static int rmnet_vnd_headroom(struct rmnet_port *port)
|
||||
{
|
||||
u32 headroom;
|
||||
|
||||
headroom = sizeof(struct rmnet_map_header);
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
|
||||
headroom += sizeof(struct rmnet_map_ul_csum_header);
|
||||
|
||||
return headroom;
|
||||
}
|
||||
|
||||
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
|
||||
{
|
||||
if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
|
||||
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
|
||||
struct rmnet_port *port;
|
||||
u32 headroom;
|
||||
|
||||
port = rmnet_get_port_rtnl(priv->real_dev);
|
||||
|
||||
headroom = rmnet_vnd_headroom(port);
|
||||
|
||||
if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
|
||||
new_mtu > (priv->real_dev->mtu - headroom))
|
||||
return -EINVAL;
|
||||
|
||||
rmnet_dev->mtu = new_mtu;
|
||||
@ -104,24 +125,24 @@ static void rmnet_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *s)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_vnd_stats total_stats;
|
||||
struct rmnet_vnd_stats total_stats = { };
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
struct rmnet_vnd_stats snapshot;
|
||||
unsigned int cpu, start;
|
||||
|
||||
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
|
||||
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
|
||||
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
|
||||
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
|
||||
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
|
||||
snapshot = pcpu_ptr->stats; /* struct assignment */
|
||||
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
|
||||
|
||||
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
|
||||
total_stats.rx_pkts += snapshot.rx_pkts;
|
||||
total_stats.rx_bytes += snapshot.rx_bytes;
|
||||
total_stats.tx_pkts += snapshot.tx_pkts;
|
||||
total_stats.tx_bytes += snapshot.tx_bytes;
|
||||
total_stats.tx_drops += snapshot.tx_drops;
|
||||
}
|
||||
|
||||
s->rx_packets = total_stats.rx_pkts;
|
||||
@ -229,6 +250,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
||||
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
|
||||
u32 headroom;
|
||||
int rc;
|
||||
|
||||
if (rmnet_get_endpoint(port, id)) {
|
||||
@ -242,6 +264,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
||||
|
||||
priv->real_dev = real_dev;
|
||||
|
||||
headroom = rmnet_vnd_headroom(port);
|
||||
|
||||
if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = register_netdevice(rmnet_dev);
|
||||
if (!rc) {
|
||||
ep->egress_dev = rmnet_dev;
|
||||
@ -283,3 +312,45 @@ int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
|
||||
{
|
||||
struct hlist_node *tmp_ep;
|
||||
struct rmnet_endpoint *ep;
|
||||
struct rmnet_port *port;
|
||||
unsigned long bkt_ep;
|
||||
u32 headroom;
|
||||
|
||||
port = rmnet_get_port_rtnl(real_dev);
|
||||
|
||||
headroom = rmnet_vnd_headroom(port);
|
||||
|
||||
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
|
||||
if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
|
||||
struct net_device *real_dev)
|
||||
{
|
||||
struct hlist_node *tmp_ep;
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt_ep;
|
||||
u32 headroom;
|
||||
|
||||
headroom = rmnet_vnd_headroom(port);
|
||||
|
||||
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
|
||||
if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
|
||||
continue;
|
||||
|
||||
if (rmnet_vnd_change_mtu(ep->egress_dev,
|
||||
real_dev->mtu - headroom))
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -18,4 +18,7 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
|
||||
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
|
||||
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
|
||||
void rmnet_vnd_setup(struct net_device *dev);
|
||||
int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
|
||||
int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
|
||||
struct net_device *real_dev);
|
||||
#endif /* _RMNET_VND_H_ */
|
||||
|
@ -76,10 +76,10 @@ enum power_event {
|
||||
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
|
||||
|
||||
/* GMAC HW ADDR regs */
|
||||
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
|
||||
(reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
|
||||
(reg * 8))
|
||||
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
|
||||
0x00000040 + (reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
|
||||
0x00000044 + (reg * 8))
|
||||
#define GMAC_MAX_PERFECT_ADDRESSES 1
|
||||
|
||||
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
|
||||
|
@ -626,6 +626,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
|
||||
void stmmac_remove_config_dt(struct platform_device *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->phy_node);
|
||||
of_node_put(plat->mdio_node);
|
||||
}
|
||||
|
@ -849,7 +849,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
smp_mb();
|
||||
|
||||
/* Space might have just been freed - check again */
|
||||
if (temac_check_tx_bd_space(lp, num_frag))
|
||||
if (temac_check_tx_bd_space(lp, num_frag + 1))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
netif_wake_queue(ndev);
|
||||
@ -876,7 +876,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
cur_p->phys = cpu_to_be32(skb_dma_addr);
|
||||
ptr_to_txbd((void *)skb, cur_p);
|
||||
|
||||
for (ii = 0; ii < num_frag; ii++) {
|
||||
if (++lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
@ -915,6 +914,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
|
||||
|
||||
/* Mark last fragment with skb address, so it can be consumed
|
||||
* in temac_start_xmit_done()
|
||||
*/
|
||||
ptr_to_txbd((void *)skb, cur_p);
|
||||
|
||||
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
|
||||
lp->tx_bd_tail++;
|
||||
if (lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
|
@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
|
||||
ax->tty = NULL;
|
||||
|
||||
unregister_netdev(ax->dev);
|
||||
free_netdev(ax->dev);
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active ax25 channel. */
|
||||
|
@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
|
||||
dev_kfree_skb_any(skb);
|
||||
if (!skb2)
|
||||
return NULL;
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
|
||||
done:
|
||||
|
@ -1890,7 +1890,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
|
||||
static const struct driver_info cdc_ncm_info = {
|
||||
.description = "CDC NCM",
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
||||
| FLAG_LINK_INTR,
|
||||
| FLAG_LINK_INTR | FLAG_ETHER,
|
||||
.bind = cdc_ncm_bind,
|
||||
.unbind = cdc_ncm_unbind,
|
||||
.manage_power = usbnet_manage_power,
|
||||
|
@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
ret = smsc75xx_wait_ready(dev, 0);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
|
||||
goto err;
|
||||
goto free_pdata;
|
||||
}
|
||||
|
||||
smsc75xx_init_mac_address(dev);
|
||||
@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
ret = smsc75xx_reset(dev);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
|
||||
goto err;
|
||||
goto cancel_work;
|
||||
}
|
||||
|
||||
dev->net->netdev_ops = &smsc75xx_netdev_ops;
|
||||
@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
cancel_work:
|
||||
cancel_work_sync(&pdata->set_multicast);
|
||||
free_pdata:
|
||||
kfree(pdata);
|
||||
dev->data[0] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
cancel_work_sync(&pdata->set_multicast);
|
||||
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
|
||||
kfree(pdata);
|
||||
pdata = NULL;
|
||||
dev->data[0] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1184,9 +1184,6 @@ static int vrf_dev_init(struct net_device *dev)
|
||||
|
||||
dev->flags = IFF_MASTER | IFF_NOARP;
|
||||
|
||||
/* MTU is irrelevant for VRF device; set to 64k similar to lo */
|
||||
dev->mtu = 64 * 1024;
|
||||
|
||||
/* similarly, oper state is irrelevant; set to up to avoid confusion */
|
||||
dev->operstate = IF_OPER_UP;
|
||||
netdev_lockdep_set_classes(dev);
|
||||
@ -1620,7 +1617,8 @@ static void vrf_setup(struct net_device *dev)
|
||||
* which breaks networking.
|
||||
*/
|
||||
dev->min_mtu = IPV6_MIN_MTU;
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
dev->max_mtu = IP6_MAX_MTU;
|
||||
dev->mtu = dev->max_mtu;
|
||||
}
|
||||
|
||||
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
|
@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
|
||||
udelay(PIO_RETRY_DELAY);
|
||||
}
|
||||
|
||||
dev_err(dev, "config read/write timed out\n");
|
||||
dev_err(dev, "PIO read/write transfer time out\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
|
||||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
|
||||
/*
|
||||
* Trying to start a new PIO transfer when previous has not completed
|
||||
* cause External Abort on CPU which results in kernel panic:
|
||||
*
|
||||
* SError Interrupt on CPU0, code 0xbf000002 -- SError
|
||||
* Kernel panic - not syncing: Asynchronous SError Interrupt
|
||||
*
|
||||
* Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
|
||||
* by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
|
||||
* concurrent calls at the same time. But because PIO transfer may take
|
||||
* about 1.5s when link is down or card is disconnected, it means that
|
||||
* advk_pcie_wait_pio() does not always have to wait for completion.
|
||||
*
|
||||
* Some versions of ARM Trusted Firmware handles this External Abort at
|
||||
* EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
|
||||
* https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
|
||||
*/
|
||||
if (advk_readl(pcie, PIO_START)) {
|
||||
dev_err(dev, "Previous PIO read/write transfer is still running\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
int where, int size, u32 *val)
|
||||
{
|
||||
@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
return pci_bridge_emul_conf_read(&pcie->bridge, where,
|
||||
size, val);
|
||||
|
||||
/* Start PIO */
|
||||
advk_writel(pcie, 0, PIO_START);
|
||||
advk_writel(pcie, 1, PIO_ISR);
|
||||
if (advk_pcie_pio_is_running(pcie)) {
|
||||
*val = 0xffffffff;
|
||||
return PCIBIOS_SET_FAILED;
|
||||
}
|
||||
|
||||
/* Program the control register */
|
||||
reg = advk_readl(pcie, PIO_CTRL);
|
||||
@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
/* Program the data strobe */
|
||||
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
|
||||
|
||||
/* Start the transfer */
|
||||
/* Clear PIO DONE ISR and start the transfer */
|
||||
advk_writel(pcie, 1, PIO_ISR);
|
||||
advk_writel(pcie, 1, PIO_START);
|
||||
|
||||
ret = advk_pcie_wait_pio(pcie);
|
||||
@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
||||
if (where % size)
|
||||
return PCIBIOS_SET_FAILED;
|
||||
|
||||
/* Start PIO */
|
||||
advk_writel(pcie, 0, PIO_START);
|
||||
advk_writel(pcie, 1, PIO_ISR);
|
||||
if (advk_pcie_pio_is_running(pcie))
|
||||
return PCIBIOS_SET_FAILED;
|
||||
|
||||
/* Program the control register */
|
||||
reg = advk_readl(pcie, PIO_CTRL);
|
||||
@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
||||
/* Program the data strobe */
|
||||
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
|
||||
|
||||
/* Start the transfer */
|
||||
/* Clear PIO DONE ISR and start the transfer */
|
||||
advk_writel(pcie, 1, PIO_ISR);
|
||||
advk_writel(pcie, 1, PIO_START);
|
||||
|
||||
ret = advk_pcie_wait_pio(pcie);
|
||||
|
@ -3557,6 +3557,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
|
||||
dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
|
||||
* prevented for those affected devices.
|
||||
*/
|
||||
static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
|
||||
{
|
||||
if ((dev->device & 0xffc0) == 0x2340)
|
||||
quirk_no_bus_reset(dev);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
|
||||
quirk_nvidia_no_bus_reset);
|
||||
|
||||
/*
|
||||
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
|
||||
* The device will throw a Link Down error on AER-capable systems and
|
||||
@ -3577,6 +3589,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
|
||||
*/
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
|
||||
|
||||
/*
|
||||
* Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
|
||||
* automatically disables LTSSM when Secondary Bus Reset is received and
|
||||
* the device stops working. Prevent bus reset for these devices. With
|
||||
* this change, the device can be assigned to VMs with VFIO, but it will
|
||||
* leak state between VMs. Reference
|
||||
* https://e2e.ti.com/support/processors/f/791/t/954382
|
||||
*/
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
|
||||
|
||||
static void quirk_no_pm_reset(struct pci_dev *dev)
|
||||
{
|
||||
/*
|
||||
@ -3912,6 +3934,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define PCI_DEVICE_ID_HINIC_VF 0x375E
|
||||
#define HINIC_VF_FLR_TYPE 0x1000
|
||||
#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
|
||||
#define HINIC_VF_OP 0xE80
|
||||
#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
|
||||
#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
|
||||
|
||||
/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
|
||||
static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
|
||||
{
|
||||
unsigned long timeout;
|
||||
void __iomem *bar;
|
||||
u32 val;
|
||||
|
||||
if (probe)
|
||||
return 0;
|
||||
|
||||
bar = pci_iomap(pdev, 0, 0);
|
||||
if (!bar)
|
||||
return -ENOTTY;
|
||||
|
||||
/* Get and check firmware capabilities */
|
||||
val = ioread32be(bar + HINIC_VF_FLR_TYPE);
|
||||
if (!(val & HINIC_VF_FLR_CAP_BIT)) {
|
||||
pci_iounmap(pdev, bar);
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
/* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
|
||||
val = ioread32be(bar + HINIC_VF_OP);
|
||||
val = val | HINIC_VF_FLR_PROC_BIT;
|
||||
iowrite32be(val, bar + HINIC_VF_OP);
|
||||
|
||||
pcie_flr(pdev);
|
||||
|
||||
/*
|
||||
* The device must recapture its Bus and Device Numbers after FLR
|
||||
* in order generate Completions. Issue a config write to let the
|
||||
* device capture this information.
|
||||
*/
|
||||
pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
|
||||
|
||||
/* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
|
||||
timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
|
||||
do {
|
||||
val = ioread32be(bar + HINIC_VF_OP);
|
||||
if (!(val & HINIC_VF_FLR_PROC_BIT))
|
||||
goto reset_complete;
|
||||
msleep(20);
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
val = ioread32be(bar + HINIC_VF_OP);
|
||||
if (!(val & HINIC_VF_FLR_PROC_BIT))
|
||||
goto reset_complete;
|
||||
|
||||
pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
|
||||
|
||||
reset_complete:
|
||||
pci_iounmap(pdev, bar);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
|
||||
reset_intel_82599_sfp_virtfn },
|
||||
@ -3923,6 +4008,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
|
||||
{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
|
||||
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
|
||||
reset_chelsio_generic_dev },
|
||||
{ PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
|
||||
reset_hinic_vf_dev },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
@ -4763,6 +4850,8 @@ static const struct pci_dev_acs_enabled {
|
||||
{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
|
||||
{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
|
||||
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
|
||||
/* Broadcom multi-function device */
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
|
||||
/* Amazon Annapurna Labs */
|
||||
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
|
||||
|
@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
|
||||
break;
|
||||
default:
|
||||
dev_err(tphy->dev, "incompatible PHY type\n");
|
||||
clk_disable_unprepare(instance->ref_clk);
|
||||
clk_disable_unprepare(instance->da_ref_clk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -8806,6 +8806,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
|
||||
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
|
||||
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
|
||||
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
|
||||
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
|
||||
};
|
||||
|
||||
static int __init fan_init(struct ibm_init_struct *iibm)
|
||||
|
@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
|
||||
spin_unlock_irqrestore(&queue->lock, flags);
|
||||
}
|
||||
|
||||
s32 scaled_ppm_to_ppb(long ppm)
|
||||
long scaled_ppm_to_ppb(long ppm)
|
||||
{
|
||||
/*
|
||||
* The 'freq' field in the 'struct timex' is in parts per
|
||||
@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
|
||||
s64 ppb = 1 + ppm;
|
||||
ppb *= 125;
|
||||
ppb >>= 13;
|
||||
return (s32) ppb;
|
||||
return (long) ppb;
|
||||
}
|
||||
EXPORT_SYMBOL(scaled_ppm_to_ppb);
|
||||
|
||||
@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
|
||||
delta = ktime_to_ns(kt);
|
||||
err = ops->adjtime(ops, delta);
|
||||
} else if (tx->modes & ADJ_FREQUENCY) {
|
||||
s32 ppb = scaled_ppm_to_ppb(tx->freq);
|
||||
long ppb = scaled_ppm_to_ppb(tx->freq);
|
||||
if (ppb > ops->max_adj || ppb < -ops->max_adj)
|
||||
return -ERANGE;
|
||||
if (ops->adjfine)
|
||||
|
@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
|
||||
|
||||
drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
|
||||
if (IS_ERR(drvdata->dev)) {
|
||||
ret = PTR_ERR(drvdata->dev);
|
||||
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
|
||||
return PTR_ERR(drvdata->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, drvdata);
|
||||
|
@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
|
||||
struct gpio_descs *gpios = priv->enable_gpios;
|
||||
int id = rdev_get_id(rdev), ret;
|
||||
|
||||
if (gpios->ndescs <= id) {
|
||||
if (!gpios || gpios->ndescs <= id) {
|
||||
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
|
||||
goto bypass_gpio;
|
||||
}
|
||||
@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
|
||||
struct gpio_descs *gpios = priv->enable_gpios;
|
||||
int id = rdev_get_id(rdev);
|
||||
|
||||
if (gpios->ndescs <= id) {
|
||||
if (!gpios || gpios->ndescs <= id) {
|
||||
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
|
||||
goto bypass_gpio;
|
||||
}
|
||||
|
@ -27,6 +27,7 @@
|
||||
#define RTMV20_REG_LDIRQ 0x30
|
||||
#define RTMV20_REG_LDSTAT 0x40
|
||||
#define RTMV20_REG_LDMASK 0x50
|
||||
#define RTMV20_MAX_REGS (RTMV20_REG_LDMASK + 1)
|
||||
|
||||
#define RTMV20_VID_MASK GENMASK(7, 4)
|
||||
#define RICHTEK_VID 0x80
|
||||
@ -313,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
|
||||
.val_bits = 8,
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
.max_register = RTMV20_REG_LDMASK,
|
||||
.num_reg_defaults_raw = RTMV20_MAX_REGS,
|
||||
|
||||
.writeable_reg = rtmv20_is_accessible_reg,
|
||||
.readable_reg = rtmv20_is_accessible_reg,
|
||||
|
@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
struct ap_message *ap_msg;
|
||||
bool found = false;
|
||||
|
||||
status = ap_dqap(aq->qid, &aq->reply->psmid,
|
||||
aq->reply->msg, aq->reply->len);
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
aq->queue_count--;
|
||||
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
|
||||
if (aq->queue_count > 0)
|
||||
mod_timer(&aq->timeout,
|
||||
jiffies + aq->request_timeout);
|
||||
@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
|
||||
list_del_init(&ap_msg->list);
|
||||
aq->pendingq_count--;
|
||||
ap_msg->receive(aq, ap_msg, aq->reply);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
|
||||
__func__, aq->reply->psmid,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
}
|
||||
fallthrough;
|
||||
case AP_RESPONSE_NO_PENDING_REPLY:
|
||||
if (!status.queue_empty || aq->queue_count <= 0)
|
||||
@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
|
||||
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
aq->queue_count++;
|
||||
aq->queue_count = max_t(int, 1, aq->queue_count + 1);
|
||||
if (aq->queue_count == 1)
|
||||
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
|
||||
list_move_tail(&ap_msg->list, &aq->pendingq);
|
||||
|
@ -293,7 +293,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
|
||||
int err = 0;
|
||||
|
||||
if (!op->data.nbytes)
|
||||
return stm32_qspi_wait_nobusy(qspi);
|
||||
goto wait_nobusy;
|
||||
|
||||
if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
|
||||
goto out;
|
||||
@ -314,6 +314,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
|
||||
out:
|
||||
/* clear flags */
|
||||
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
|
||||
wait_nobusy:
|
||||
if (!err)
|
||||
err = stm32_qspi_wait_nobusy(qspi);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
|
||||
xqspi->irq = platform_get_irq(pdev, 0);
|
||||
if (xqspi->irq <= 0) {
|
||||
ret = -ENXIO;
|
||||
goto remove_master;
|
||||
goto clk_dis_all;
|
||||
}
|
||||
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
|
||||
0, pdev->name, xqspi);
|
||||
if (ret != 0) {
|
||||
ret = -ENXIO;
|
||||
dev_err(&pdev->dev, "request_irq failed\n");
|
||||
goto remove_master;
|
||||
goto clk_dis_all;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-cs",
|
||||
@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
|
||||
if (ret < 0) {
|
||||
ctlr->num_chipselect = 1;
|
||||
} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
|
||||
ret = -EINVAL;
|
||||
dev_err(&pdev->dev, "only 2 chip selects are available\n");
|
||||
goto remove_master;
|
||||
goto clk_dis_all;
|
||||
} else {
|
||||
ctlr->num_chipselect = num_cs;
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
|
||||
if (p->groups[group].enabled) {
|
||||
dev_err(p->dev, "%s is already enabled\n",
|
||||
p->groups[group].name);
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
p->groups[group].enabled = 1;
|
||||
|
@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
|
||||
int val;
|
||||
unsigned long flags;
|
||||
|
||||
/* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
|
||||
spin_lock_irqsave(&usbmisc->lock, flags);
|
||||
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
|
||||
val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
|
||||
writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
|
||||
spin_unlock_irqrestore(&usbmisc->lock, flags);
|
||||
|
||||
/* TVDMSRC_DIS */
|
||||
msleep(20);
|
||||
|
||||
/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
|
||||
spin_lock_irqsave(&usbmisc->lock, flags);
|
||||
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
|
||||
@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
|
||||
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
|
||||
spin_unlock_irqrestore(&usbmisc->lock, flags);
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
/* TVDMSRC_ON */
|
||||
msleep(40);
|
||||
|
||||
/*
|
||||
* Per BC 1.2, check voltage of D+:
|
||||
@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
|
||||
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
|
||||
spin_unlock_irqrestore(&usbmisc->lock, flags);
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
/* TVDPSRC_ON */
|
||||
msleep(40);
|
||||
|
||||
/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
|
||||
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
|
||||
|
@ -40,6 +40,8 @@
|
||||
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
|
||||
#define USB_VENDOR_SMSC 0x0424
|
||||
#define USB_PRODUCT_USB5534B 0x5534
|
||||
#define USB_VENDOR_CYPRESS 0x04b4
|
||||
#define USB_PRODUCT_CY7C65632 0x6570
|
||||
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
|
||||
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
|
||||
|
||||
@ -5643,6 +5645,11 @@ static const struct usb_device_id hub_id_table[] = {
|
||||
.idProduct = USB_PRODUCT_USB5534B,
|
||||
.bInterfaceClass = USB_CLASS_HUB,
|
||||
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
|
||||
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_PRODUCT,
|
||||
.idVendor = USB_VENDOR_CYPRESS,
|
||||
.idProduct = USB_PRODUCT_CY7C65632,
|
||||
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
|
||||
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
||||
| USB_DEVICE_ID_MATCH_INT_CLASS,
|
||||
.idVendor = USB_VENDOR_GENESYS_LOGIC,
|
||||
|
@ -1672,8 +1672,8 @@ static int dwc3_remove(struct platform_device *pdev)
|
||||
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
dwc3_debugfs_exit(dwc);
|
||||
dwc3_core_exit_mode(dwc);
|
||||
dwc3_debugfs_exit(dwc);
|
||||
|
||||
dwc3_core_exit(dwc);
|
||||
dwc3_ulpi_exit(dwc);
|
||||
|
@ -203,8 +203,8 @@ static int __init afs_init(void)
|
||||
goto error_fs;
|
||||
|
||||
afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
|
||||
if (IS_ERR(afs_proc_symlink)) {
|
||||
ret = PTR_ERR(afs_proc_symlink);
|
||||
if (!afs_proc_symlink) {
|
||||
ret = -ENOMEM;
|
||||
goto error_proc;
|
||||
}
|
||||
|
||||
|
@ -378,7 +378,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
|
||||
info_type, fanotify_info_name(info),
|
||||
info->name_len, buf, count);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out_close_fd;
|
||||
|
||||
buf += ret;
|
||||
count -= ret;
|
||||
@ -426,7 +426,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
|
||||
fanotify_event_object_fh(event),
|
||||
info_type, dot, dot_len, buf, count);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out_close_fd;
|
||||
|
||||
buf += ret;
|
||||
count -= ret;
|
||||
|
@ -26,9 +26,7 @@ struct bd70528_data {
|
||||
struct mutex rtc_timer_lock;
|
||||
};
|
||||
|
||||
#define BD70528_BUCK_VOLTS 17
|
||||
#define BD70528_BUCK_VOLTS 17
|
||||
#define BD70528_BUCK_VOLTS 17
|
||||
#define BD70528_BUCK_VOLTS 0x10
|
||||
#define BD70528_LDO_VOLTS 0x20
|
||||
|
||||
#define BD70528_REG_BUCK1_EN 0x0F
|
||||
|
@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
|
||||
struct mlx5_hairpin_params *params);
|
||||
|
||||
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
|
||||
void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
|
||||
#endif /* __TRANSOBJ_H__ */
|
||||
|
@ -468,13 +468,6 @@ struct mm_struct {
|
||||
*/
|
||||
atomic_t has_pinned;
|
||||
|
||||
/**
|
||||
* @write_protect_seq: Locked when any thread is write
|
||||
* protecting pages mapped by this mm to enforce a later COW,
|
||||
* for instance during page table copying for fork().
|
||||
*/
|
||||
seqcount_t write_protect_seq;
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
atomic_long_t pgtables_bytes; /* PTE page table pages */
|
||||
#endif
|
||||
@ -483,6 +476,18 @@ struct mm_struct {
|
||||
spinlock_t page_table_lock; /* Protects page tables and some
|
||||
* counters
|
||||
*/
|
||||
/*
|
||||
* With some kernel config, the current mmap_lock's offset
|
||||
* inside 'mm_struct' is at 0x120, which is very optimal, as
|
||||
* its two hot fields 'count' and 'owner' sit in 2 different
|
||||
* cachelines, and when mmap_lock is highly contended, both
|
||||
* of the 2 fields will be accessed frequently, current layout
|
||||
* will help to reduce cache bouncing.
|
||||
*
|
||||
* So please be careful with adding new fields before
|
||||
* mmap_lock, which can easily push the 2 fields into one
|
||||
* cacheline.
|
||||
*/
|
||||
struct rw_semaphore mmap_lock;
|
||||
|
||||
struct list_head mmlist; /* List of maybe swapped mm's. These
|
||||
@ -503,7 +508,15 @@ struct mm_struct {
|
||||
unsigned long stack_vm; /* VM_STACK */
|
||||
unsigned long def_flags;
|
||||
|
||||
/**
|
||||
* @write_protect_seq: Locked when any thread is write
|
||||
* protecting pages mapped by this mm to enforce a later COW,
|
||||
* for instance during page table copying for fork().
|
||||
*/
|
||||
seqcount_t write_protect_seq;
|
||||
|
||||
spinlock_t arg_lock; /* protect the below fields */
|
||||
|
||||
unsigned long start_code, end_code, start_data, end_data;
|
||||
unsigned long start_brk, brk, start_stack;
|
||||
unsigned long arg_start, arg_end, env_start, env_end;
|
||||
|
@ -222,7 +222,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
|
||||
* @ppm: Parts per million, but with a 16 bit binary fractional field
|
||||
*/
|
||||
|
||||
extern s32 scaled_ppm_to_ppb(long ppm);
|
||||
extern long scaled_ppm_to_ppb(long ppm);
|
||||
|
||||
/**
|
||||
* ptp_find_pin() - obtain the pin index of a given auxiliary function
|
||||
|
@ -437,6 +437,4 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
|
||||
extern int __sys_socketpair(int family, int type, int protocol,
|
||||
int __user *usockvec);
|
||||
extern int __sys_shutdown(int fd, int how);
|
||||
|
||||
extern struct ns_common *get_net_ns(struct ns_common *ns);
|
||||
#endif /* _LINUX_SOCKET_H */
|
||||
|
@ -23,6 +23,16 @@
|
||||
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
|
||||
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
|
||||
|
||||
/* Clear all flags but only keep swp_entry_t related information */
|
||||
static inline pte_t pte_swp_clear_flags(pte_t pte)
|
||||
{
|
||||
if (pte_swp_soft_dirty(pte))
|
||||
pte = pte_swp_clear_soft_dirty(pte);
|
||||
if (pte_swp_uffd_wp(pte))
|
||||
pte = pte_swp_clear_uffd_wp(pte);
|
||||
return pte;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store a type+offset into a swp_entry_t in an arch-independent format
|
||||
*/
|
||||
@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
|
||||
{
|
||||
swp_entry_t arch_entry;
|
||||
|
||||
if (pte_swp_soft_dirty(pte))
|
||||
pte = pte_swp_clear_soft_dirty(pte);
|
||||
if (pte_swp_uffd_wp(pte))
|
||||
pte = pte_swp_clear_uffd_wp(pte);
|
||||
pte = pte_swp_clear_flags(pte);
|
||||
arch_entry = __pte_to_swp_entry(pte);
|
||||
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
|
||||
}
|
||||
|
@ -6335,7 +6335,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
|
||||
|
||||
/**
|
||||
* ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
|
||||
* of injected frames
|
||||
* of injected frames.
|
||||
*
|
||||
* To accurately parse and take into account rate and retransmission fields,
|
||||
* you must initialize the chandef field in the ieee80211_tx_info structure
|
||||
* of the skb before calling this function.
|
||||
*
|
||||
* @skb: packet injected by userspace
|
||||
* @dev: the &struct device of this 802.11 device
|
||||
*/
|
||||
|
@ -203,6 +203,8 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
|
||||
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
|
||||
|
||||
void net_ns_barrier(void);
|
||||
|
||||
struct ns_common *get_net_ns(struct ns_common *ns);
|
||||
#else /* CONFIG_NET_NS */
|
||||
#include <linux/sched.h>
|
||||
#include <linux/nsproxy.h>
|
||||
@ -222,6 +224,11 @@ static inline void net_ns_get_ownership(const struct net *net,
|
||||
}
|
||||
|
||||
static inline void net_ns_barrier(void) {}
|
||||
|
||||
static inline struct ns_common *get_net_ns(struct ns_common *ns)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif /* CONFIG_NET_NS */
|
||||
|
||||
|
||||
|
@ -289,6 +289,9 @@ struct sockaddr_in {
|
||||
/* Address indicating an error return. */
|
||||
#define INADDR_NONE ((unsigned long int) 0xffffffff)
|
||||
|
||||
/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
|
||||
#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
|
||||
|
||||
/* Network number for local host loopback. */
|
||||
#define IN_LOOPBACKNET 127
|
||||
|
||||
|
@ -5740,6 +5740,27 @@ struct bpf_sanitize_info {
|
||||
bool mask_to_left;
|
||||
};
|
||||
|
||||
static struct bpf_verifier_state *
|
||||
sanitize_speculative_path(struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn,
|
||||
u32 next_idx, u32 curr_idx)
|
||||
{
|
||||
struct bpf_verifier_state *branch;
|
||||
struct bpf_reg_state *regs;
|
||||
|
||||
branch = push_stack(env, next_idx, curr_idx, true);
|
||||
if (branch && insn) {
|
||||
regs = branch->frame[branch->curframe]->regs;
|
||||
if (BPF_SRC(insn->code) == BPF_K) {
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X) {
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
mark_reg_unknown(env, regs, insn->src_reg);
|
||||
}
|
||||
}
|
||||
return branch;
|
||||
}
|
||||
|
||||
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn,
|
||||
const struct bpf_reg_state *ptr_reg,
|
||||
@ -5823,12 +5844,26 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
||||
tmp = *dst_reg;
|
||||
*dst_reg = *ptr_reg;
|
||||
}
|
||||
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
|
||||
ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
|
||||
env->insn_idx);
|
||||
if (!ptr_is_dst_reg && ret)
|
||||
*dst_reg = tmp;
|
||||
return !ret ? REASON_STACK : 0;
|
||||
}
|
||||
|
||||
static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state *vstate = env->cur_state;
|
||||
|
||||
/* If we simulate paths under speculation, we don't update the
|
||||
* insn as 'seen' such that when we verify unreachable paths in
|
||||
* the non-speculative domain, sanitize_dead_code() can still
|
||||
* rewrite/sanitize them.
|
||||
*/
|
||||
if (!vstate->speculative)
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
}
|
||||
|
||||
static int sanitize_err(struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn, int reason,
|
||||
const struct bpf_reg_state *off_reg,
|
||||
@ -7974,14 +8009,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pred == 1) {
|
||||
/* only follow the goto, ignore fall-through */
|
||||
/* Only follow the goto, ignore fall-through. If needed, push
|
||||
* the fall-through branch for simulation under speculative
|
||||
* execution.
|
||||
*/
|
||||
if (!env->bypass_spec_v1 &&
|
||||
!sanitize_speculative_path(env, insn, *insn_idx + 1,
|
||||
*insn_idx))
|
||||
return -EFAULT;
|
||||
*insn_idx += insn->off;
|
||||
return 0;
|
||||
} else if (pred == 0) {
|
||||
/* only follow fall-through branch, since
|
||||
* that's where the program will go
|
||||
/* Only follow the fall-through branch, since that's where the
|
||||
* program will go. If needed, push the goto branch for
|
||||
* simulation under speculative execution.
|
||||
*/
|
||||
if (!env->bypass_spec_v1 &&
|
||||
!sanitize_speculative_path(env, insn,
|
||||
*insn_idx + insn->off + 1,
|
||||
*insn_idx))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9811,7 +9860,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
}
|
||||
|
||||
regs = cur_regs(env);
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
sanitize_mark_insn_seen(env);
|
||||
prev_insn_idx = env->insn_idx;
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
@ -10031,7 +10080,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
return err;
|
||||
|
||||
env->insn_idx++;
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
sanitize_mark_insn_seen(env);
|
||||
} else {
|
||||
verbose(env, "invalid BPF_LD mode\n");
|
||||
return -EINVAL;
|
||||
@ -10439,6 +10488,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
{
|
||||
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
|
||||
struct bpf_insn *insn = new_prog->insnsi;
|
||||
u32 old_seen = old_data[off].seen;
|
||||
u32 prog_len;
|
||||
int i;
|
||||
|
||||
@ -10459,7 +10509,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
memcpy(new_data + off + cnt - 1, old_data + off,
|
||||
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
||||
for (i = off; i < off + cnt - 1; i++) {
|
||||
new_data[i].seen = env->pass_cnt;
|
||||
/* Expand insni[off]'s seen count to the patched range. */
|
||||
new_data[i].seen = old_seen;
|
||||
new_data[i].zext_dst = insn_has_def32(env, insn + i);
|
||||
}
|
||||
env->insn_aux_data = new_data;
|
||||
@ -11703,6 +11754,9 @@ static void free_states(struct bpf_verifier_env *env)
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*
|
||||
* Note that special handling is needed on !env->bypass_spec_v1 if this is
|
||||
* ever called outside of error path with subsequent program rejection.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
|
@ -463,6 +463,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
|
||||
VMCOREINFO_STRUCT_SIZE(mem_section);
|
||||
VMCOREINFO_OFFSET(mem_section, section_mem_map);
|
||||
VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
|
||||
VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
|
||||
#endif
|
||||
VMCOREINFO_STRUCT_SIZE(page);
|
||||
|
@ -3776,11 +3776,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
*/
|
||||
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
/*
|
||||
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
|
||||
* See ___update_load_avg() for details.
|
||||
*/
|
||||
u32 divider = get_pelt_divider(&cfs_rq->avg);
|
||||
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
|
||||
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
|
||||
cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
|
||||
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
|
||||
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
|
||||
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
|
||||
|
||||
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
||||
|
||||
|
@ -2196,9 +2196,6 @@ struct saved_cmdlines_buffer {
|
||||
};
|
||||
static struct saved_cmdlines_buffer *savedcmd;
|
||||
|
||||
/* temporary disable recording */
|
||||
static atomic_t trace_record_taskinfo_disabled __read_mostly;
|
||||
|
||||
static inline char *get_saved_cmdlines(int idx)
|
||||
{
|
||||
return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
|
||||
@ -2484,8 +2481,6 @@ static bool tracing_record_taskinfo_skip(int flags)
|
||||
{
|
||||
if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
|
||||
return true;
|
||||
if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
|
||||
return true;
|
||||
if (!__this_cpu_read(trace_taskinfo_save))
|
||||
return true;
|
||||
return false;
|
||||
@ -3686,9 +3681,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
||||
return ERR_PTR(-EBUSY);
|
||||
#endif
|
||||
|
||||
if (!iter->snapshot)
|
||||
atomic_inc(&trace_record_taskinfo_disabled);
|
||||
|
||||
if (*pos != iter->pos) {
|
||||
iter->ent = NULL;
|
||||
iter->cpu = 0;
|
||||
@ -3731,9 +3723,6 @@ static void s_stop(struct seq_file *m, void *p)
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (!iter->snapshot)
|
||||
atomic_dec(&trace_record_taskinfo_disabled);
|
||||
|
||||
trace_access_unlock(iter->cpu_file);
|
||||
trace_event_read_unlock();
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user