Merge 5.10.7 into android12-5.10

Changes in 5.10.7
	i40e: Fix Error I40E_AQ_RC_EINVAL when removing VFs
	iavf: fix double-release of rtnl_lock
	net/sched: sch_taprio: ensure to reset/destroy all child qdiscs
	net: mvpp2: Add TCAM entry to drop flow control pause frames
	net: mvpp2: prs: fix PPPoE with ipv6 packet parse
	net: systemport: set dev->max_mtu to UMAC_MAX_MTU_SIZE
	ethernet: ucc_geth: fix use-after-free in ucc_geth_remove()
	ethernet: ucc_geth: set dev->max_mtu to 1518
	ionic: account for vlan tag len in rx buffer len
	atm: idt77252: call pci_disable_device() on error path
	net: mvpp2: Fix GoP port 3 Networking Complex Control configurations
	net: stmmac: dwmac-meson8b: ignore the second clock input
	ibmvnic: fix login buffer memory leak
	ibmvnic: continue fatal error reset after passive init
	net: ethernet: mvneta: Fix error handling in mvneta_probe
	qede: fix offload for IPIP tunnel packets
	virtio_net: Fix recursive call to cpus_read_lock()
	net/ncsi: Use real net-device for response handler
	net: ethernet: Fix memleak in ethoc_probe
	net-sysfs: take the rtnl lock when storing xps_cpus
	net-sysfs: take the rtnl lock when accessing xps_cpus_map and num_tc
	net-sysfs: take the rtnl lock when storing xps_rxqs
	net-sysfs: take the rtnl lock when accessing xps_rxqs_map and num_tc
	net: ethernet: ti: cpts: fix ethtool output when no ptp_clock registered
	tun: fix return value when the number of iovs exceeds MAX_SKB_FRAGS
	e1000e: Only run S0ix flows if shutdown succeeded
	e1000e: bump up timeout to wait when ME un-configures ULP mode
	Revert "e1000e: disable s0ix entry and exit flows for ME systems"
	e1000e: Export S0ix flags to ethtool
	bnxt_en: Check TQM rings for maximum supported value.
	net: mvpp2: fix pkt coalescing int-threshold configuration
	bnxt_en: Fix AER recovery.
	ipv4: Ignore ECN bits for fib lookups in fib_compute_spec_dst()
	net: sched: prevent invalid Scell_log shift count
	net: hns: fix return value check in __lb_other_process()
	erspan: fix version 1 check in gre_parse_header()
	net: hdlc_ppp: Fix issues when mod_timer is called while timer is running
	bareudp: set NETIF_F_LLTX flag
	bareudp: Fix use of incorrect min_headroom size
	vhost_net: fix ubuf refcount incorrectly when sendmsg fails
	r8169: work around power-saving bug on some chip versions
	net: dsa: lantiq_gswip: Enable GSWIP_MII_CFG_EN also for internal PHYs
	net: dsa: lantiq_gswip: Fix GSWIP_MII_CFG(p) register access
	CDC-NCM: remove "connected" log message
	ibmvnic: fix: NULL pointer dereference.
	net: usb: qmi_wwan: add Quectel EM160R-GL
	selftests: mlxsw: Set headroom size of correct port
	stmmac: intel: Add PCI IDs for TGL-H platform
	selftests/vm: fix building protection keys test
	block: add debugfs stanza for QUEUE_FLAG_NOWAIT
	workqueue: Kick a worker based on the actual activation of delayed works
	scsi: ufs: Fix wrong print message in dev_err()
	scsi: ufs-pci: Fix restore from S4 for Intel controllers
	scsi: ufs-pci: Ensure UFS device is in PowerDown mode for suspend-to-disk ->poweroff()
	scsi: ufs-pci: Fix recovery from hibernate exit errors for Intel controllers
	scsi: ufs-pci: Enable UFSHCD_CAP_RPM_AUTOSUSPEND for Intel controllers
	scsi: block: Introduce BLK_MQ_REQ_PM
	scsi: ide: Do not set the RQF_PREEMPT flag for sense requests
	scsi: ide: Mark power management requests with RQF_PM instead of RQF_PREEMPT
	scsi: scsi_transport_spi: Set RQF_PM for domain validation commands
	scsi: core: Only process PM requests if rpm_status != RPM_ACTIVE
	local64.h: make <asm/local64.h> mandatory
	lib/genalloc: fix the overflow when size is too big
	depmod: handle the case of /sbin/depmod without /sbin in PATH
	scsi: ufs: Clear UAC for FFU and RPMB LUNs
	kbuild: don't hardcode depmod path
	Bluetooth: revert: hci_h5: close serdev device and free hu in h5_close
	scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT
	scsi: block: Do not accept any requests while suspended
	crypto: ecdh - avoid buffer overflow in ecdh_set_secret()
	crypto: asym_tpm: correct zero out potential secrets
	powerpc: Handle .text.{hot,unlikely}.* in linker script
	Staging: comedi: Return -EFAULT if copy_to_user() fails
	staging: mt7621-dma: Fix a resource leak in an error handling path
	usb: gadget: enable super speed plus
	USB: cdc-acm: blacklist another IR Droid device
	USB: cdc-wdm: Fix use after free in service_outstanding_interrupt().
	usb: typec: intel_pmc_mux: Configure HPD first for HPD+IRQ request
	usb: dwc3: meson-g12a: disable clk on error handling path in probe
	usb: dwc3: gadget: Restart DWC3 gadget when enabling pullup
	usb: dwc3: gadget: Clear wait flag on dequeue
	usb: dwc3: ulpi: Use VStsDone to detect PHY regs access completion
	usb: dwc3: ulpi: Replace CPU-based busyloop with Protocol-based one
	usb: dwc3: ulpi: Fix USB2.0 HS/FS/LS PHY suspend regression
	usb: chipidea: ci_hdrc_imx: add missing put_device() call in usbmisc_get_init_data()
	USB: xhci: fix U1/U2 handling for hardware with XHCI_INTEL_HOST quirk set
	usb: usbip: vhci_hcd: protect shift size
	usb: uas: Add PNY USB Portable SSD to unusual_uas
	USB: serial: iuu_phoenix: fix DMA from stack
	USB: serial: option: add LongSung M5710 module support
	USB: serial: option: add Quectel EM160R-GL
	USB: yurex: fix control-URB timeout handling
	USB: usblp: fix DMA to stack
	ALSA: usb-audio: Fix UBSAN warnings for MIDI jacks
	usb: gadget: select CONFIG_CRC32
	USB: Gadget: dummy-hcd: Fix shift-out-of-bounds bug
	usb: gadget: f_uac2: reset wMaxPacketSize
	usb: gadget: function: printer: Fix a memory leak for interface descriptor
	usb: gadget: u_ether: Fix MTU size mismatch with RX packet size
	USB: gadget: legacy: fix return error code in acm_ms_bind()
	usb: gadget: Fix spinlock lockup on usb_function_deactivate
	usb: gadget: configfs: Preserve function ordering after bind failure
	usb: gadget: configfs: Fix use-after-free issue with udc_name
	USB: serial: keyspan_pda: remove unused variable
	hwmon: (amd_energy) fix allocation of hwmon_channel_info config
	mm: make wait_on_page_writeback() wait for multiple pending writebacks
	x86/mm: Fix leak of pmd ptlock
	KVM: x86/mmu: Use -1 to flag an undefined spte in get_mmio_spte()
	KVM: x86/mmu: Get root level from walkers when retrieving MMIO SPTE
	kvm: check tlbs_dirty directly
	KVM: x86/mmu: Ensure TDP MMU roots are freed after yield
	x86/resctrl: Use an IPI instead of task_work_add() to update PQR_ASSOC MSR
	x86/resctrl: Don't move a task to the same resource group
	blk-iocost: fix NULL iocg deref from racing against initialization
	ALSA: hda/via: Fix runtime PM for Clevo W35xSS
	ALSA: hda/conexant: add a new hda codec CX11970
	ALSA: hda/realtek - Fix speaker volume control on Lenovo C940
	ALSA: hda/realtek: Add mute LED quirk for more HP laptops
	ALSA: hda/realtek: Enable mute and micmute LED on HP EliteBook 850 G7
	ALSA: hda/realtek: Add two "Intel Reference board" SSID in the ALC256.
	iommu/vt-d: Move intel_iommu info from struct intel_svm to struct intel_svm_dev
	btrfs: qgroup: don't try to wait flushing if we're already holding a transaction
	btrfs: send: fix wrong file path when there is an inode with a pending rmdir
	Revert "device property: Keep secondary firmware node secondary by type"
	dmabuf: fix use-after-free of dmabuf's file->f_inode
	arm64: link with -z norelro for LLD or aarch64-elf
	drm/i915: clear the shadow batch
	drm/i915: clear the gpu reloc batch
	bcache: fix typo from SUUP to SUPP in features.h
	bcache: check unsupported feature sets for bcache register
	bcache: introduce BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE for large bucket
	net/mlx5e: Fix SWP offsets when vlan inserted by driver
	ARM: dts: OMAP3: disable AES on N950/N9
	netfilter: x_tables: Update remaining dereference to RCU
	netfilter: ipset: fix shift-out-of-bounds in htable_bits()
	netfilter: xt_RATEEST: reject non-null terminated string from userspace
	netfilter: nft_dynset: report EOPNOTSUPP on missing set feature
	dmaengine: idxd: off by one in cleanup code
	x86/mtrr: Correct the range check before performing MTRR type lookups
	KVM: x86: fix shift out of bounds reported by UBSAN
	xsk: Fix memory leak for failed bind
	rtlwifi: rise completion at the last step of firmware callback
	scsi: target: Fix XCOPY NAA identifier lookup
	Linux 5.10.7

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I1a7c195af35831fe362b027fe013c0c7e4dc20ea
This commit is contained in:
Greg Kroah-Hartman 2021-01-12 20:55:56 +01:00
commit 7eadb0006a
159 changed files with 1153 additions and 667 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 6
SUBLEVEL = 7
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -451,7 +451,7 @@ LEX = flex
YACC = bison
AWK = awk
INSTALLKERNEL := installkernel
DEPMOD = /sbin/depmod
DEPMOD = depmod
PERL = perl
PYTHON = python
PYTHON3 = python3

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h

View File

@ -494,3 +494,11 @@ bluetooth {
clock-names = "sysclk";
};
};
&aes1_target {
status = "disabled";
};
&aes2_target {
status = "disabled";
};

View File

@ -2,7 +2,6 @@
generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += local64.h
generic-y += parport.h
generated-y += mach-types.h

View File

@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X -z norelro
LDFLAGS_vmlinux :=--no-undefined -X
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@ -110,16 +110,20 @@ KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
# Same as above, prefer ELF but fall back to linux target if needed.
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
UTS_MACHINE := aarch64
endif
ifeq ($(CONFIG_LD_IS_LLD), y)
KBUILD_LDFLAGS += -z norelro
endif
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h

View File

@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += qrwlock.h
generic-y += user.h
generic-y += vmlinux.lds.h

View File

@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += spinlock.h

View File

@ -2,5 +2,4 @@
generic-y += extable.h
generic-y += iomap.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -2,6 +2,5 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += spinlock.h

View File

@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += syscalls.h

View File

@ -6,7 +6,6 @@ generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += qrwlock.h

View File

@ -4,6 +4,5 @@ generic-y += cmpxchg.h
generic-y += export.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += parport.h
generic-y += user.h

View File

@ -3,6 +3,5 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += user.h

View File

@ -5,7 +5,6 @@ generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += vtime.h

View File

@ -85,7 +85,7 @@ SECTIONS
ALIGN_FUNCTION();
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
*(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
#ifdef CONFIG_PPC64
*(.tramp.ftrace.text);
#endif

View File

@ -3,6 +3,5 @@ generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += user.h
generic-y += vmlinux.lds.h

View File

@ -7,5 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h

View File

@ -6,5 +6,4 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
*repeat = 0;
*uniform = 1;
/* Make end inclusive instead of exclusive */
end--;
prev_match = MTRR_TYPE_INVALID;
for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state, inclusive;
@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
int repeat;
u64 partial_end;
/* Make end inclusive instead of exclusive */
end--;
if (!mtrr_state_set)
return MTRR_TYPE_INVALID;

View File

@ -525,89 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
kfree(rdtgrp);
}
struct task_move_callback {
struct callback_head work;
struct rdtgroup *rdtgrp;
};
static void move_myself(struct callback_head *head)
static void _update_task_closid_rmid(void *task)
{
struct task_move_callback *callback;
struct rdtgroup *rdtgrp;
callback = container_of(head, struct task_move_callback, work);
rdtgrp = callback->rdtgrp;
/*
* If resource group was deleted before this task work callback
* was invoked, then assign the task to root group and free the
* resource group.
* If the task is still current on this CPU, update PQR_ASSOC MSR.
* Otherwise, the MSR is updated when the task is scheduled in.
*/
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
current->closid = 0;
current->rmid = 0;
rdtgroup_remove(rdtgrp);
}
if (task == current)
resctrl_sched_in();
}
if (unlikely(current->flags & PF_EXITING))
goto out;
preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */
resctrl_sched_in();
preempt_enable();
out:
kfree(callback);
static void update_task_closid_rmid(struct task_struct *t)
{
if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
else
_update_task_closid_rmid(t);
}
static int __rdtgroup_move_task(struct task_struct *tsk,
struct rdtgroup *rdtgrp)
{
struct task_move_callback *callback;
int ret;
callback = kzalloc(sizeof(*callback), GFP_KERNEL);
if (!callback)
return -ENOMEM;
callback->work.func = move_myself;
callback->rdtgrp = rdtgrp;
/* If the task is already in rdtgrp, no need to move the task. */
if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
tsk->rmid == rdtgrp->mon.rmid) ||
(rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
tsk->closid == rdtgrp->mon.parent->closid))
return 0;
/*
* Take a refcount, so rdtgrp cannot be freed before the
* callback has been invoked.
* Set the task's closid/rmid before the PQR_ASSOC MSR can be
* updated by them.
*
* For ctrl_mon groups, move both closid and rmid.
* For monitor groups, can move the tasks only from
* their parent CTRL group.
*/
atomic_inc(&rdtgrp->waitcount);
ret = task_work_add(tsk, &callback->work, TWA_RESUME);
if (ret) {
/*
* Task is exiting. Drop the refcount and free the callback.
* No need to check the refcount as the group cannot be
* deleted before the write function unlocks rdtgroup_mutex.
*/
atomic_dec(&rdtgrp->waitcount);
kfree(callback);
rdt_last_cmd_puts("Task exited\n");
} else {
/*
* For ctrl_mon groups move both closid and rmid.
* For monitor groups, can move the tasks only from
* their parent CTRL group.
*/
if (rdtgrp->type == RDTCTRL_GROUP) {
tsk->closid = rdtgrp->closid;
if (rdtgrp->type == RDTCTRL_GROUP) {
tsk->closid = rdtgrp->closid;
tsk->rmid = rdtgrp->mon.rmid;
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
ret = -EINVAL;
}
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
return -EINVAL;
}
}
return ret;
/*
* Ensure the task's closid and rmid are written before determining if
* the task is current that will decide if it will be interrupted.
*/
barrier();
/*
* By now, the task's closid and rmid are set. If the task is current
* on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
* group go into effect. If the task is not current, the MSR will be
* updated when the task is scheduled in.
*/
update_task_closid_rmid(tsk);
return 0;
}
static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)

View File

@ -49,7 +49,7 @@ static inline u64 rsvd_bits(int s, int e)
if (e < s)
return 0;
return ((1ULL << (e - s + 1)) - 1) << s;
return ((2ULL << (e - s)) - 1) << s;
}
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);

View File

@ -3485,16 +3485,16 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{
struct kvm_shadow_walk_iterator iterator;
int leaf = vcpu->arch.mmu->root_level;
int leaf = -1;
u64 spte;
walk_shadow_page_lockless_begin(vcpu);
for (shadow_walk_init(&iterator, vcpu, addr);
for (shadow_walk_init(&iterator, vcpu, addr),
*root_level = iterator.level;
shadow_walk_okay(&iterator);
__shadow_walk_next(&iterator, spte)) {
leaf = iterator.level;
@ -3504,7 +3504,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
if (!is_shadow_present_pte(spte))
break;
}
walk_shadow_page_lockless_end(vcpu);
@ -3517,9 +3516,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
u64 sptes[PT64_ROOT_MAX_LEVEL];
struct rsvd_bits_validate *rsvd_check;
int root = vcpu->arch.mmu->shadow_root_level;
int leaf;
int level;
int root, leaf, level;
bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
@ -3528,9 +3525,14 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
}
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes);
leaf = get_walk(vcpu, addr, sptes, &root);
if (unlikely(leaf < 0)) {
*sptep = 0ull;
return reserved;
}
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

View File

@ -42,7 +42,48 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
}
#define for_each_tdp_mmu_root(_kvm, _root) \
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
if (kvm_mmu_put_root(kvm, root))
kvm_tdp_mmu_free_root(kvm, root);
}
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
struct kvm_mmu_page *root)
{
lockdep_assert_held(&kvm->mmu_lock);
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;
kvm_mmu_get_root(kvm, root);
return true;
}
static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *root)
{
struct kvm_mmu_page *next_root;
next_root = list_next_entry(root, link);
tdp_mmu_put_root(kvm, root);
return next_root;
}
/*
* Note: this iterator gets and puts references to the roots it iterates over.
* This makes it safe to release the MMU lock and yield within the loop, but
* if exiting the loop early, the caller must drop the reference to the most
* recent root. (Unless keeping a live reference is desirable.)
*/
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
typeof(*_root), link); \
tdp_mmu_next_root_valid(_kvm, _root); \
_root = tdp_mmu_next_root(_kvm, _root))
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
@ -439,18 +480,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
struct kvm_mmu_page *root;
bool flush = false;
for_each_tdp_mmu_root(kvm, root) {
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
for_each_tdp_mmu_root_yield_safe(kvm, root)
flush |= zap_gfn_range(kvm, root, start, end, true);
kvm_mmu_put_root(kvm, root);
}
return flush;
}
@ -609,13 +641,7 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
int ret = 0;
int as_id;
for_each_tdp_mmu_root(kvm, root) {
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
for_each_tdp_mmu_root_yield_safe(kvm, root) {
as_id = kvm_mmu_page_as_id(root);
slots = __kvm_memslots(kvm, as_id);
kvm_for_each_memslot(memslot, slots) {
@ -637,8 +663,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
ret |= handler(kvm, memslot, root, gfn_start,
gfn_end, data);
}
kvm_mmu_put_root(kvm, root);
}
return ret;
@ -826,21 +850,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
kvm_mmu_put_root(kvm, root);
}
return spte_set;
@ -894,21 +910,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
kvm_mmu_put_root(kvm, root);
}
return spte_set;
@ -1017,21 +1025,13 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
kvm_mmu_put_root(kvm, root);
}
return spte_set;
}
@ -1077,21 +1077,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
zap_collapsible_spte_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
kvm_mmu_put_root(kvm, root);
}
}
@ -1148,12 +1140,15 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{
struct tdp_iter iter;
struct kvm_mmu *mmu = vcpu->arch.mmu;
int leaf = vcpu->arch.mmu->shadow_root_level;
gfn_t gfn = addr >> PAGE_SHIFT;
int leaf = -1;
*root_level = vcpu->arch.mmu->shadow_root_level;
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level;

View File

@ -44,5 +44,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
#endif /* __KVM_X86_MMU_TDP_MMU_H */

View File

@ -829,6 +829,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
}
free_page((unsigned long)pmd_sv);
pgtable_pmd_page_dtor(virt_to_page(pmd));
free_page((unsigned long)pmd);
return 1;

View File

@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += qrwlock.h

View File

@ -18,6 +18,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@ -424,11 +425,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
* @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
* @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
const bool pm = flags & BLK_MQ_REQ_PREEMPT;
const bool pm = flags & BLK_MQ_REQ_PM;
while (true) {
bool success = false;
@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
if (pm || !blk_queue_pm_only(q)) {
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
!blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
(pm || (blk_pm_request_resume(q),
!blk_queue_pm_only(q)))) ||
blk_pm_resume_queue(pm, q)) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@ -630,7 +631,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)

View File

@ -2525,8 +2525,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
bool use_debt, ioc_locked;
unsigned long flags;
/* bypass IOs if disabled or for root cgroup */
if (!ioc->enabled || !iocg->level)
/* bypass IOs if disabled, still initializing, or for root cgroup */
if (!ioc->enabled || !iocg || !iocg->level)
return;
/* calculate the absolute vtime cost */
@ -2653,14 +2653,14 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
struct bio *bio)
{
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
struct ioc *ioc = iocg->ioc;
struct ioc *ioc = rqos_to_ioc(rqos);
sector_t bio_end = bio_end_sector(bio);
struct ioc_now now;
u64 vtime, abs_cost, cost;
unsigned long flags;
/* bypass if disabled or for root cgroup */
if (!ioc->enabled || !iocg->level)
/* bypass if disabled, still initializing, or for root cgroup */
if (!ioc->enabled || !iocg || !iocg->level)
return;
abs_cost = calc_vtime_cost(bio, iocg, true);
@ -2837,6 +2837,12 @@ static int blk_iocost_init(struct request_queue *q)
ioc_refresh_params(ioc, true);
spin_unlock_irq(&ioc->lock);
/*
* rqos must be added before activation to allow iocg_pd_init() to
* lookup the ioc from q. This means that the rqos methods may get
* called before policy activation completion, can't assume that the
* target bio has an iocg associated and need to test for NULL iocg.
*/
rq_qos_add(q, rqos);
ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
if (ret) {

View File

@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(NOWAIT),
};
#undef QUEUE_FLAG_NAME
@ -297,7 +298,6 @@ static const char *const rqf_name[] = {
RQF_NAME(MIXED_MERGE),
RQF_NAME(MQ_INFLIGHT),
RQF_NAME(DONTPREP),
RQF_NAME(PREEMPT),
RQF_NAME(FAILED),
RQF_NAME(QUIET),
RQF_NAME(ELVPRIV),

View File

@ -292,8 +292,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->mq_hctx = data->hctx;
rq->rq_flags = 0;
rq->cmd_flags = data->cmd_flags;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
if (data->flags & BLK_MQ_REQ_PM)
rq->rq_flags |= RQF_PM;
if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT;
INIT_LIST_HEAD(&rq->queuelist);

View File

@ -6,11 +6,14 @@
#include <linux/pm_runtime.h>
#ifdef CONFIG_PM
static inline void blk_pm_request_resume(struct request_queue *q)
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
if (!q->dev || !blk_queue_pm_only(q))
return 1; /* Nothing to do */
if (pm && q->rpm_status != RPM_SUSPENDED)
return 1; /* Request allowed */
pm_request_resume(q->dev);
return 0;
}
static inline void blk_pm_mark_last_busy(struct request *rq)
@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
--rq->q->nr_pending;
}
#else
static inline void blk_pm_request_resume(struct request_queue *q)
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
return 1;
}
static inline void blk_pm_mark_last_busy(struct request *rq)

View File

@ -354,7 +354,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
memcpy(cur, e, sizeof(e));
cur += sizeof(e);
/* Zero parameters to satisfy set_pub_key ABI. */
memset(cur, 0, SETKEY_PARAMS_SIZE);
memzero_explicit(cur, SETKEY_PARAMS_SIZE);
return cur - buf;
}

View File

@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
unsigned int ndigits;
if (crypto_ecdh_decode_key(buf, len, &params) < 0)
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
params.key_size > sizeof(ctx->private_key))
return -EINVAL;
ndigits = ecdh_supported_curve(params.curve_id);

View File

@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
return err;
goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);

View File

@ -4276,7 +4276,7 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
if (!(parent && fn == parent->fwnode))
fn->secondary = ERR_PTR(-ENODEV);
fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}

View File

@ -251,12 +251,8 @@ static int h5_close(struct hci_uart *hu)
if (h5->vnd && h5->vnd->close)
h5->vnd->close(h5);
if (hu->serdev)
serdev_device_close(hu->serdev);
kfree_skb(h5->rx_skb);
kfree(h5);
h5 = NULL;
if (!hu->serdev)
kfree(h5);
return 0;
}

View File

@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
static int dma_buf_file_release(struct inode *inode, struct file *file)
{
struct dma_buf *dmabuf;
if (!is_dma_buf_file(file))
return -EINVAL;
dmabuf = file->private_data;
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
return 0;
}
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
.release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,

View File

@ -379,7 +379,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
for (; i > 0; i--)
while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@ -1639,7 +1639,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
for (; i > 0; i--)
while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}

View File

@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);

View File

@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
unsigned long x, n;
unsigned long x, n, remain;
void *ptr;
/*
@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
length = round_up(length,
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) {
int len = min(length, PAGE_SIZE - x);
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
length -= len;
remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
return dst;
}
@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@ -1539,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
if (shadow_needs_clflush(shadow->obj))
drm_clflush_virt_range(batch_end, 8);
}
if (shadow_needs_clflush(shadow->obj)) {
void *ptr = page_mask_bits(shadow->obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);

View File

@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
s_config = devm_kcalloc(dev, cpus + sockets,
s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
s_config[i] = 0;
return 0;
}

View File

@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;

View File

@ -515,15 +515,10 @@ blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
*
* We let requests forced at head of queue with ide-preempt
* though. I hope that doesn't happen too much, hopefully not
* unless the subdriver triggers such a thing in its own PM
* state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
(rq->rq_flags & RQF_PREEMPT) == 0) {
(rq->rq_flags & RQF_PM) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;

View File

@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;

View File

@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(svm->iommu, &desc, 1, 0);
qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(svm->iommu, &desc, 1, 0);
qi_submit_sync(sdev->iommu, &desc, 1, 0);
}
}
@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@ -363,6 +363,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@ -546,6 +547,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@ -575,7 +577,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;

View File

@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
{BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
{BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};

View File

@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
/* 32bit bucket size, obsoleted */
#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
/* real bucket size is (1 << bucket_size) */
#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
#define BCH_FEATURE_COMPAT_SUUP 0
#define BCH_FEATURE_RO_COMPAT_SUUP 0
#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
#define BCH_FEATURE_COMPAT_SUPP 0
#define BCH_FEATURE_RO_COMPAT_SUPP 0
#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
{
return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
}
static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
{
return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
}
static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
{
return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);

View File

@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
bch_has_feature_large_bucket(sb))
bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
if (bch_has_feature_large_bucket(sb)) {
unsigned int max, order;
max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
order = le16_to_cpu(s->bucket_size);
/*
* bcache tool will make sure the overflow won't
* happen, an error message here is enough.
*/
if (order > max)
pr_err("Bucket size (1 << %u) overflows\n",
order);
bucket_size = 1 << order;
} else if (bch_has_feature_obso_large_bucket(sb)) {
bucket_size +=
le16_to_cpu(s->obso_bucket_size_hi) << 16;
}
}
return bucket_size;
}
@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
/* Check incompatible features */
err = "Unsupported compatible feature found";
if (bch_has_unknown_compat_features(sb))
goto err;
err = "Unsupported read-only compatible feature found";
if (bch_has_unknown_ro_compat_features(sb))
goto err;
err = "Unsupported incompatible feature found";
if (bch_has_unknown_incompat_features(sb))
goto err;
err = read_super_common(sb, bdev, s);
if (err)
goto err;

View File

@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;

View File

@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
#define GSWIP_MII_CFG0 0x00
#define GSWIP_MII_CFG1 0x02
#define GSWIP_MII_CFG5 0x04
#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
switch (port) {
case 0:
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
break;
case 1:
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
break;
case 5:
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
break;
}
/* There's no MII_CFG register for the CPU port */
if (!dsa_is_cpu_port(priv->ds, port))
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
for (i = 0; i < priv->hw_info->max_ports; i++)
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@ -1541,9 +1530,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
/* Enable the xMII interface only for the external PHY */
if (interface != PHY_INTERFACE_MODE_INTERNAL)
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,

View File

@ -2577,6 +2577,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;

View File

@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
tqm_rings = ctx->tqm_fp_rings_count + 1;
tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
i < BNXT_MAX_TQM_RINGS;
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
if (!err) {
err = bnxt_hwrm_func_qcaps(bp);
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
}
bnxt_ulp_start(bp, err);
if (!err) {
bnxt_reenable_sriov(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
}
}
if (result != PCI_ERS_RESULT_RECOVERED) {
if (netif_running(netdev))
dev_close(netdev);
pci_disable_device(pdev);
}
rtnl_unlock();
@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
netif_device_attach(netdev);
err = bnxt_hwrm_func_qcaps(bp);
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
bnxt_ulp_start(bp, err);
if (!err) {
bnxt_reenable_sriov(bp);
netif_device_attach(netdev);
}
rtnl_unlock();
}

View File

@ -1435,6 +1435,11 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
#define BNXT_MAX_TQM_SP_RINGS 1
#define BNXT_MAX_TQM_FP_RINGS 8
#define BNXT_MAX_TQM_RINGS \
(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@ -1473,7 +1478,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9];
struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {

View File

@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
goto free2;
goto free3;
}
ret = ethoc_mdio_probe(netdev);
@ -1243,6 +1243,7 @@ static int ethoc_probe(struct platform_device *pdev)
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);

View File

@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
free_netdev(dev);
return 0;
}

View File

@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
if (!new_skb) {
netdev_err(ndev, "skb alloc failed\n");
return;
}
skb = new_skb;
check_ok = 0;

View File

@ -932,6 +932,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@ -2247,8 +2248,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
adapter->from_passive_init)) {
} else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@ -2869,9 +2869,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
netdev_dbg(adapter->netdev,
"Invalid scrq reset. irq (%d) or msgs (%p).\n",
scrq->irq, scrq->msgs);
netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@ -3768,7 +3766,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
client_data_len = vnic_client_data_len(adapter);
buffer_size =

View File

@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))

View File

@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
"s0ix-enabled",
};
#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, e1000e_priv_flags_strings,
E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
}
}
@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
static u32 e1000e_get_priv_flags(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
return priv_flags;
}
static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
struct e1000_hw *hw = &adapter->hw;
if (hw->mac.type < e1000_pch_cnp)
return -EINVAL;
flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
}
if (flags2 != adapter->flags2)
adapter->flags2 = flags2;
return 0;
}
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
.get_priv_flags = e1000e_get_priv_flags,
.set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)

View File

@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
struct e1000_adapter *adapter = hw->adapter;
bool firmware_bug = false;
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
/* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
* If this takes more than 1 second, show a warning indicating a
* firmware bug
*/
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
if (i++ == 30) {
if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
if (i > 100 && !firmware_bug)
firmware_bug = true;
usleep_range(10000, 11000);
}
e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (firmware_bug)
e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
else
e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);

View File

@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
struct e1000e_me_supported {
u16 device_id; /* supported device ID */
};
static const struct e1000e_me_supported me_supported[] = {
{E1000_DEV_ID_PCH_LPT_I217_LM},
{E1000_DEV_ID_PCH_LPTLP_I218_LM},
{E1000_DEV_ID_PCH_I218_LM2},
{E1000_DEV_ID_PCH_I218_LM3},
{E1000_DEV_ID_PCH_SPT_I219_LM},
{E1000_DEV_ID_PCH_SPT_I219_LM2},
{E1000_DEV_ID_PCH_LBG_I219_LM3},
{E1000_DEV_ID_PCH_SPT_I219_LM4},
{E1000_DEV_ID_PCH_SPT_I219_LM5},
{E1000_DEV_ID_PCH_CNP_I219_LM6},
{E1000_DEV_ID_PCH_CNP_I219_LM7},
{E1000_DEV_ID_PCH_ICP_I219_LM8},
{E1000_DEV_ID_PCH_ICP_I219_LM9},
{E1000_DEV_ID_PCH_CMP_I219_LM10},
{E1000_DEV_ID_PCH_CMP_I219_LM11},
{E1000_DEV_ID_PCH_CMP_I219_LM12},
{E1000_DEV_ID_PCH_TGP_I219_LM13},
{E1000_DEV_ID_PCH_TGP_I219_LM14},
{E1000_DEV_ID_PCH_TGP_I219_LM15},
{0}
};
static bool e1000e_check_me(u16 device_id)
{
struct e1000e_me_supported *id;
for (id = (struct e1000e_me_supported *)me_supported;
id->device_id; id++)
if (device_id == id->device_id)
return true;
return false;
}
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
if (rc)
if (rc) {
e1000e_pm_thaw(dev);
/* Introduce S0ix implementation */
if (hw->mac.type >= e1000_pch_cnp &&
!e1000e_check_me(hw->adapter->pdev->device))
e1000e_s0ix_entry_flow(adapter);
} else {
/* Introduce S0ix implementation */
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_entry_flow(adapter);
}
return rc;
}
@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
if (hw->mac.type >= e1000_pch_cnp &&
!e1000e_check_me(hw->adapter->pdev->device))
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)

View File

@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
__I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
#define I40E_PF_RESET_AND_REBUILD_FLAG \
BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {

View File

@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
/* Request a PF Reset
*
* Resets PF and reinitializes PFs VSI.
*/
i40e_prep_for_reset(pf, lock_acquired);
i40e_reset_and_rebuild(pf, true, lock_acquired);
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;

View File

@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;

View File

@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
if (err) {
rtnl_unlock();
if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
}
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)

View File

@ -5232,7 +5232,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
return err;
goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu

View File

@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu();
/* PKT-coalescing registers are per-queue + per-thread */
for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
}
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)

View File

@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
/* Drop flow control pause frames */
static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
{
unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
struct mvpp2_prs_entry pe;
unsigned int len;
memset(&pe, 0, sizeof(pe));
/* For all ports - drop flow control frames */
pe.index = MVPP2_PE_FC_DROP;
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
/* Set match on DA */
len = ETH_ALEN;
while (len--)
mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
MVPP2_PRS_RI_DROP_MASK);
mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
mvpp2_prs_hw_write(priv, &pe);
}
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
/* Skip eth_type + 4 bytes of IPv6 header */
mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
/* Jump to DIP of IPV6 header */
mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,

View File

@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
/* reserved */
#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)

View File

@ -366,6 +366,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
}
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)

View File

@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
if (skb_vlan_tag_present(skb) && ihs)
mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
mlx5e_tx_tunnel_accel(skb, eseg);
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;

View File

@ -615,9 +615,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@ -647,7 +647,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@ -664,7 +665,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());

View File

@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
len = netdev->mtu + ETH_HLEN;
len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {

View File

@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
} else if (l4_proto == IPPROTO_IPIP) {
/* IPIP tunnels are unknown to the device or at least unsupported natively,
* offloads for them can't be done trivially, so disable them for such skb.
*/
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}

View File

@ -2243,7 +2243,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
}
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@ -2269,7 +2270,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:

View File

@ -725,6 +725,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
@ -739,6 +741,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);

View File

@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
{ .fw_name = "clkin1", },
{ .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },

View File

@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;

View File

@ -1401,7 +1401,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
return ERR_PTR(-ENOMEM);
return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);

View File

@ -1863,9 +1863,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
netif_info(dev, link, dev->net,
"network connection: %sconnected\n",
!!event->wValue ? "" : "dis");
usbnet_link_change(dev, !!event->wValue, 0);
break;

View File

@ -1036,6 +1036,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */

View File

@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
virtnet_set_affinity(vi);
if (err) {
put_online_cpus();
goto err;
}
virtnet_set_affinity(vi);
put_online_cpus();
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
err:
return err;
}

View File

@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
/* mod_timer could be called after we entered this function but
* before we got the lock.
*/
if (timer_pending(&proto->timer)) {
spin_unlock_irqrestore(&ppp->lock, flags);
return;
}
switch (proto->state) {
case STOPPING:
case REQ_SENT:

View File

@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
return;
goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
return;
goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@ -109,6 +108,9 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
exit:
complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)

View File

@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@ -1203,6 +1204,8 @@ static blk_status_t
scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
case SDEV_CREATED:
return BLK_STS_OK;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
@ -1229,18 +1232,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
return BLK_STS_RESOURCE;
case SDEV_QUIESCE:
/*
* If the devices is blocked we defer normal commands.
* If the device is blocked we only accept power management
* commands.
*/
if (req && !(req->rq_flags & RQF_PREEMPT))
if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
return BLK_STS_RESOURCE;
return BLK_STS_OK;
default:
/*
* For any other not fully online state we only allow
* special commands. In particular any user initiated
* command is not allowed.
* power management commands.
*/
if (req && !(req->rq_flags & RQF_PREEMPT))
if (req && !(req->rq_flags & RQF_PM))
return BLK_STS_IOERR;
return BLK_STS_OK;
}
@ -2508,15 +2511,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
* scsi_device_quiesce - Block user issued commands.
* scsi_device_quiesce - Block all commands except power management.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
* state, only special requests will be accepted, all others will
* be deferred. Since special requests may also be requeued requests,
* a successful return doesn't guarantee the device will be
* totally quiescent.
* state, only power management requests will be accepted, all others will
* be deferred.
*
* Must be called with user context, may sleep.
*
@ -2578,12 +2579,12 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
if (sdev->quiesced_by) {
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
}
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);

View File

@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
/*
* The purpose of the RQF_PM flag below is to bypass the
* SDEV_QUIESCE state.
*/
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
0, NULL);
RQF_PM, NULL);
if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
*/
lock_system_sleep();
if (scsi_autopm_get_device(sdev))
goto unlock_system_sleep;
if (unlikely(spi_dv_in_progress(starget)))
goto unlock;
goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
goto unlock;
goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
goto out_put;
goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
goto out_free;
goto free_buffer;
scsi_target_quiesce(starget);
@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
out_free:
free_buffer:
kfree(buffer);
out_put:
put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
unlock:
put_autopm:
scsi_autopm_put_device(sdev);
unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);

View File

@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
intel_ltr_hide(hba->dev);
}
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
{
/*
* To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
* address registers must be restored because the restore kernel can
* have used different addresses.
*/
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_H);
ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_H);
if (ufshcd_is_link_hibern8(hba)) {
int ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
ufshcd_set_link_active(hba);
} else {
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
/*
* Force reset and restore. Any other actions can lead
* to an unrecoverable state.
*/
ufshcd_set_link_off(hba);
}
}
return 0;
}
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.init = ufs_intel_common_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
.resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.init = ufs_intel_ehl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
.resume = ufs_intel_resume,
};
#ifdef CONFIG_PM_SLEEP
@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
/**
* ufshcd_pci_poweroff - suspend-to-disk poweroff function
* @dev: pointer to PCI device handle
*
* Returns 0 if successful
* Returns non-zero otherwise
*/
static int ufshcd_pci_poweroff(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int spm_lvl = hba->spm_lvl;
int ret;
/*
* For poweroff we need to set the UFS device to PowerDown mode.
* Force spm_lvl to ensure that.
*/
hba->spm_lvl = 5;
ret = ufshcd_system_suspend(hba);
hba->spm_lvl = spm_lvl;
return ret;
}
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
ufshcd_pci_resume)
#ifdef CONFIG_PM_SLEEP
.suspend = ufshcd_pci_suspend,
.resume = ufshcd_pci_resume,
.freeze = ufshcd_pci_suspend,
.thaw = ufshcd_pci_resume,
.poweroff = ufshcd_pci_poweroff,
.restore = ufshcd_pci_resume,
#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)

View File

@ -3661,7 +3661,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
"dme-reset: error code %d\n", ret);
"dme-enable: error code %d\n", ret);
return ret;
}
@ -7726,13 +7726,12 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
int ret = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
sdp = hba->sdev_ufs_device;
else if (wlun == UFS_UPIU_RPMB_WLUN)
else if (wlun == UFS_UPIU_RPMB_WLUN)
sdp = hba->sdev_rpmb;
else
return -EINVAL;
BUG_ON(1);
if (sdp) {
ret = scsi_device_get(sdp);
if (!ret && !scsi_device_online(sdp)) {
@ -7895,6 +7894,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
pm_runtime_put_sync(hba->dev);
ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
} else {
ufshcd_clear_ua_wluns(hba);
}
}

View File

@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
return copy_to_user(cmd32, &v32, sizeof(v32));
if (copy_to_user(cmd32, &v32, sizeof(v32)))
return -EFAULT;
return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */

View File

@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
return ret;
goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
err_uninit_hsdma:
mtk_hsdma_uninit(hsdma);
return ret;
}

View File

@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
struct xcopy_dev_search_info {
const unsigned char *dev_wwn;
struct se_device *found_dev;
};
/**
* target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
*
* @se_dev: device being considered for match
* @dev_wwn: XCOPY requested NAA dev_wwn
* @return: 1 on match, 0 on no-match
*/
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
void *data)
const unsigned char *dev_wwn)
{
struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
if (!se_dev->dev_attrib.emulate_3pc)
if (!se_dev->dev_attrib.emulate_3pc) {
pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
}
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0)
rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
pr_debug("XCOPY: skip non-matching: %*ph\n",
XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
return 0;
info->found_dev = se_dev;
}
pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
rc = target_depend_item(&se_dev->dev_group.cg_item);
if (rc != 0) {
pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
rc, se_dev);
return rc;
}
pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
se_dev, &se_dev->dev_group);
return 1;
}
static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
struct se_device **found_dev)
static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
const unsigned char *dev_wwn,
struct se_device **_found_dev,
struct percpu_ref **_found_lun_ref)
{
struct xcopy_dev_search_info info;
int ret;
struct se_dev_entry *deve;
struct se_node_acl *nacl;
struct se_lun *this_lun = NULL;
struct se_device *found_dev = NULL;
memset(&info, 0, sizeof(info));
info.dev_wwn = dev_wwn;
/* cmd with NULL sess indicates no associated $FABRIC_MOD */
if (!sess)
goto err_out;
ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
if (ret == 1) {
*found_dev = info.found_dev;
return 0;
} else {
pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
pr_debug("XCOPY 0xe4: searching for: %*ph\n",
XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
nacl = sess->se_node_acl;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_device *this_dev;
int rc;
this_lun = rcu_dereference(deve->se_lun);
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
if (rc) {
if (percpu_ref_tryget_live(&this_lun->lun_ref))
found_dev = this_dev;
break;
}
}
rcu_read_unlock();
if (found_dev == NULL)
goto err_out;
pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
found_dev, &found_dev->dev_group);
*_found_dev = found_dev;
*_found_lun_ref = &this_lun->lun_ref;
return 0;
err_out:
pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
&xop->dst_dev);
rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
xop->dst_tid_wwn,
&xop->dst_dev,
&xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
&xop->src_dev);
rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
xop->src_tid_wwn,
&xop->src_dev,
&xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
struct se_device *remote_dev;
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
remote_dev = xop->dst_dev;
pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
remote_dev = xop->src_dev;
pr_debug("putting src lun_ref for %p\n", xop->src_dev);
pr_debug("Calling configfs_undepend_item for"
" remote_dev: %p remote_dev->dev_group: %p\n",
remote_dev, &remote_dev->dev_group.cg_item);
target_undepend_item(&remote_dev->dev_group.cg_item);
percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)

View File

@ -27,6 +27,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;

View File

@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!misc_pdev || !platform_get_drvdata(misc_pdev))
if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
if (!platform_get_drvdata(misc_pdev)) {
put_device(&misc_pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
data->dev = &misc_pdev->dev;
/*

View File

@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
{ USB_DEVICE(0x04d8, 0xf58b),
.driver_info = IGNORE_DEVICE,
},
#endif
/*Samsung phone in firmware update mode */

View File

@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto out;
}
if (test_bit(WDM_RESETTING, &desc->flags)) {
rv = -EIO;
goto out;
}
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
dev_err(&desc->intf->dev,
"usb_submit_urb failed with result %d\n", rv);
if (!test_bit(WDM_DISCONNECTING, &desc->flags))
dev_err(&desc->intf->dev,
"usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);

Some files were not shown because too many files have changed in this diff Show More