This is the 6.1.23 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmQumsIACgkQONu9yGCS
 aT4yfBAAwaDPXomEa+DY6pkQEE7WPVtIkeO+sQIo7bWHunTDilTLRFeDUJ4THydT
 CnhhlGsBUt8KGeWgSR6hHeTl/c+b+AcBan5k5BBufUGrsDn/XV8QIEyKWhbLIEja
 qWehpogs7BJLg2dFRqTfHQEOhLht1jCmC99tfEozEG4zRudmdS3Z2DbRypfEHshc
 oGOC1Jzg4MLPfB+lCwKNrVMBlR2n/73P7mTUCu/Dc9+DUbm+GtqvsPuGT2LxVyY7
 kkNgGzvdxQQCqtK5X6zyoU61gepsobf6c6kHjBucn8mhaYURT5ndfV9VqLWkDYE7
 71iH0oY5fg2NgbMtQpbA10MokjijFp46I4QxzG/RVl2ZN2pbCFNm5aNIBCwBbF2k
 lN6hwJc1nbTi696o29o1osm+yju3347HCAWC8s+DAszXiquihiUeJBwuCfa1c+Gy
 GhdATa3nNQ/8D0gWULr/kl7DvlgpSpYrbEQGVG2gH6tdsAZt2iKYUtGLFjvDN+fw
 CoMpq2OZTX5afM7AxTX00f5lGmbXhD+T9a+pS9AXhPqKcGv1tt0Gso8dn7cpWpj5
 LxhIE9dK5F1/tI+wPE+8t80CukqQHfoCQ24YO8mfUKmlInwjGd1Hque+ihKJo7ZW
 W5CXlZJJVvpVk9BxMNaYHKfSE+U6G7hYabEAzJXR3fz9vGfoTII=
 =rz/i
 -----END PGP SIGNATURE-----

Merge 6.1.23 into android14-6.1

Changes in 6.1.23
	thunderbolt: Limit USB3 bandwidth of certain Intel USB4 host routers
	cifs: update ip_addr for ses only for primary chan setup
	cifs: prevent data race in cifs_reconnect_tcon()
	cifs: avoid race conditions with parallel reconnects
	zonefs: Reorganize code
	zonefs: Simplify IO error handling
	zonefs: Reduce struct zonefs_inode_info size
	zonefs: Separate zone information from inode information
	zonefs: Fix error message in zonefs_file_dio_append()
	fsverity: don't drop pagecache at end of FS_IOC_ENABLE_VERITY
	kernel: kcsan: kcsan_test: build without structleak plugin
	kcsan: avoid passing -g for test
	btrfs: rename BTRFS_FS_NO_OVERCOMMIT to BTRFS_FS_ACTIVE_ZONE_TRACKING
	btrfs: zoned: count fresh BG region as zone unusable
	net: ethernet: ti: am65-cpsw/cpts: Fix CPTS release action
	riscv: ftrace: Fixup panic by disabling preemption
	ARM: dts: aspeed: p10bmc: Update battery node name
	drm/msm/dpu: Refactor sc7280_pp location
	drm/msm/dpu: correct sm8250 and sm8350 scaler
	drm/msm/disp/dpu: fix sc7280_pp base offset
	tty: serial: fsl_lpuart: switch to new dmaengine_terminate_* API
	tty: serial: fsl_lpuart: fix race on RX DMA shutdown
	tracing: Add .percent suffix option to histogram values
	tracing: Add .graph suffix option to histogram value
	tracing: Do not let histogram values have some modifiers
	net: mscc: ocelot: fix stats region batching
	arm64: efi: Set NX compat flag in PE/COFF header
	cifs: fix missing unload_nls() in smb2_reconnect()
	xfrm: Zero padding when dumping algos and encap
	ASoC: codecs: tx-macro: Fix for KASAN: slab-out-of-bounds
	ASoC: Intel: avs: max98357a: Explicitly define codec format
	ASoC: Intel: avs: da7219: Explicitly define codec format
	ASoC: Intel: avs: ssm4567: Remove nau8825 bits
	ASoC: Intel: avs: nau8825: Adjust clock control
	zstd: Fix definition of assert()
	ACPI: video: Add backlight=native DMI quirk for Dell Vostro 15 3535
	ASoC: SOF: ipc3: Check for upper size limit for the received message
	ASoC: SOF: ipc4-topology: Fix incorrect sample rate print unit
	ASoC: SOF: Intel: pci-tng: revert invalid bar size setting
	ASoC: SOF: IPC4: update gain ipc msg definition to align with fw
	md: avoid signed overflow in slot_store()
	x86/PVH: obtain VGA console info in Dom0
	drm/amdkfd: Fix BO offset for multi-VMA page migration
	drm/amdkfd: fix a potential double free in pqm_create_queue
	drm/amdkfd: fix potential kgd_mem UAFs
	net: hsr: Don't log netdev_err message on unknown prp dst node
	ALSA: asihpi: check pao in control_message()
	ALSA: hda/ca0132: fixup buffer overrun at tuning_ctl_set()
	fbdev: tgafb: Fix potential divide by zero
	ACPI: tools: pfrut: Check if the input of level and type is in the right numeric range
	sched_getaffinity: don't assume 'cpumask_size()' is fully initialized
	nvme-pci: add NVME_QUIRK_BOGUS_NID for Lexar NM620
	drm/amdkfd: Fixed kfd_process cleanup on module exit.
	net/mlx5e: Lower maximum allowed MTU in XSK to match XDP prerequisites
	fbdev: nvidia: Fix potential divide by zero
	fbdev: intelfb: Fix potential divide by zero
	fbdev: lxfb: Fix potential divide by zero
	fbdev: au1200fb: Fix potential divide by zero
	tools/power turbostat: Fix /dev/cpu_dma_latency warnings
	tools/power turbostat: fix decoding of HWP_STATUS
	tracing: Fix wrong return in kprobe_event_gen_test.c
	btrfs: fix uninitialized variable warning in btrfs_update_block_group
	btrfs: use temporary variable for space_info in btrfs_update_block_group
	mtd: rawnand: meson: initialize struct with zeroes
	mtd: nand: mxic-ecc: Fix mxic_ecc_data_xfer_wait_for_completion() when irq is used
	ca8210: Fix unsigned mac_len comparison with zero in ca8210_skb_tx()
	riscv/kvm: Fix VM hang in case of timer delta being zero.
	mips: bmips: BCM6358: disable RAC flush for TP1
	ALSA: usb-audio: Fix recursive locking at XRUN during syncing
	PCI: dwc: Fix PORT_LINK_CONTROL update when CDM check enabled
	platform/x86: think-lmi: add missing type attribute
	platform/x86: think-lmi: use correct possible_values delimiters
	platform/x86: think-lmi: only display possible_values if available
	platform/x86: think-lmi: Add possible_values for ThinkStation
	platform/surface: aggregator: Add missing fwnode_handle_put()
	mtd: rawnand: meson: invalidate cache on polling ECC bit
	SUNRPC: fix shutdown of NFS TCP client socket
	sfc: ef10: don't overwrite offload features at NIC reset
	scsi: megaraid_sas: Fix crash after a double completion
	scsi: mpt3sas: Don't print sense pool info twice
	net: dsa: realtek: fix out-of-bounds access
	ptp_qoriq: fix memory leak in probe()
	net: dsa: microchip: ksz8: fix ksz8_fdb_dump()
	net: dsa: microchip: ksz8: fix ksz8_fdb_dump() to extract all 1024 entries
	net: dsa: microchip: ksz8: fix offset for the timestamp filed
	net: dsa: microchip: ksz8: ksz8_fdb_dump: avoid extracting ghost entry from empty dynamic MAC table.
	net: dsa: microchip: ksz8863_smi: fix bulk access
	net: dsa: microchip: ksz8: fix MDB configuration with non-zero VID
	r8169: fix RTL8168H and RTL8107E rx crc error
	regulator: Handle deferred clk
	net/net_failover: fix txq exceeding warning
	net: stmmac: don't reject VLANs when IFF_PROMISC is set
	drm/i915/tc: Fix the ICL PHY ownership check in TC-cold state
	platform/x86/intel/pmc: Alder Lake PCH slp_s0_residency fix
	can: bcm: bcm_tx_setup(): fix KMSAN uninit-value in vfs_write
	s390/vfio-ap: fix memory leak in vfio_ap device driver
	ACPI: bus: Rework system-level device notification handling
	loop: LOOP_CONFIGURE: send uevents for partitions
	net: mvpp2: classifier flow fix fragmentation flags
	net: mvpp2: parser fix QinQ
	net: mvpp2: parser fix PPPoE
	smsc911x: avoid PHY being resumed when interface is not up
	ice: Fix ice_cfg_rdma_fltr() to only update relevant fields
	ice: add profile conflict check for AVF FDIR
	ice: fix invalid check for empty list in ice_sched_assoc_vsi_to_agg()
	ALSA: ymfpci: Create card with device-managed snd_devm_card_new()
	ALSA: ymfpci: Fix BUG_ON in probe function
	net: ipa: compute DMA pool size properly
	i40e: fix registers dump after run ethtool adapter self test
	bnxt_en: Fix reporting of test result in ethtool selftest
	bnxt_en: Fix typo in PCI id to device description string mapping
	bnxt_en: Add missing 200G link speed reporting
	net: dsa: mv88e6xxx: Enable IGMP snooping on user ports only
	net: ethernet: mtk_eth_soc: fix flow block refcounting logic
	net: ethernet: mtk_eth_soc: add missing ppe cache flush when deleting a flow
	pinctrl: ocelot: Fix alt mode for ocelot
	Input: xpad - fix incorrectly applied patch for MAP_PROFILE_BUTTON
	iommu/vt-d: Allow zero SAGAW if second-stage not supported
	Input: i8042 - add TUXEDO devices to i8042 quirk tables for partial fix
	Input: alps - fix compatibility with -funsigned-char
	Input: focaltech - use explicitly signed char type
	cifs: prevent infinite recursion in CIFSGetDFSRefer()
	cifs: fix DFS traversal oops without CONFIG_CIFS_DFS_UPCALL
	Input: i8042 - add quirk for Fujitsu Lifebook A574/H
	Input: goodix - add Lenovo Yoga Book X90F to nine_bytes_report DMI table
	btrfs: fix deadlock when aborting transaction during relocation with scrub
	btrfs: fix race between quota disable and quota assign ioctls
	btrfs: scan device in non-exclusive mode
	zonefs: Do not propagate iomap_dio_rw() ENOTBLK error to user space
	block/io_uring: pass in issue_flags for uring_cmd task_work handling
	io_uring/poll: clear single/double poll flags on poll arming
	io_uring/rsrc: fix rogue rsrc node grabbing
	io_uring: fix poll/netmsg alloc caches
	vmxnet3: use gro callback when UPT is enabled
	zonefs: Always invalidate last cached page on append write
	dm: fix __send_duplicate_bios() to always allow for splitting IO
	can: j1939: prevent deadlock by moving j1939_sk_errqueue()
	xen/netback: don't do grant copy across page boundary
	net: phy: dp83869: fix default value for tx-/rx-internal-delay
	modpost: Fix processing of CRCs on 32-bit build machines
	pinctrl: amd: Disable and mask interrupts on resume
	pinctrl: at91-pio4: fix domain name assignment
	platform/x86: ideapad-laptop: Stop sending KEY_TOUCHPAD_TOGGLE
	powerpc: Don't try to copy PPR for task with NULL pt_regs
	powerpc/pseries/vas: Ignore VAS update for DLPAR if copy/paste is not enabled
	powerpc/64s: Fix __pte_needs_flush() false positive warning
	NFSv4: Fix hangs when recovering open state after a server reboot
	ALSA: hda/conexant: Partial revert of a quirk for Lenovo
	ALSA: usb-audio: Fix regression on detection of Roland VS-100
	ALSA: hda/realtek: Add quirks for some Clevo laptops
	ALSA: hda/realtek: Add quirk for Lenovo ZhaoYang CF4620Z
	xtensa: fix KASAN report for show_stack
	rcu: Fix rcu_torture_read ftrace event
	dt-bindings: mtd: jedec,spi-nor: Document CPOL/CPHA support
	s390/uaccess: add missing earlyclobber annotations to __clear_user()
	s390: reintroduce expoline dependence to scripts
	drm/etnaviv: fix reference leak when mmaping imported buffer
	drm/amdgpu: allow more APUs to do mode2 reset when go to S4
	drm/amd/display: Add DSC Support for Synaptics Cascaded MST Hub
	drm/amd/display: Take FEC Overhead into Timeslot Calculation
	drm/i915/gem: Flush lmem contents after construction
	drm/i915/dpt: Treat the DPT BO as a framebuffer
	drm/i915: Disable DC states for all commits
	drm/i915: Move CSC load back into .color_commit_arm() when PSR is enabled on skl/glk
	KVM: arm64: PMU: Fix GET_ONE_REG for vPMC regs to return the current value
	KVM: arm64: Disable interrupts while walking userspace PTs
	net: dsa: mv88e6xxx: read FID when handling ATU violations
	net: dsa: mv88e6xxx: replace ATU violation prints with trace points
	net: dsa: mv88e6xxx: replace VTU violation prints with trace points
	selftests/bpf: Test btf dump for struct with padding only fields
	libbpf: Fix BTF-to-C converter's padding logic
	selftests/bpf: Add few corner cases to test padding handling of btf_dump
	libbpf: Fix btf_dump's packed struct determination
	usb: ucsi: Fix ucsi->connector race
	drm/amdkfd: Get prange->offset after svm_range_vram_node_new
	hsr: ratelimit only when errors are printed
	x86/PVH: avoid 32-bit build warning when obtaining VGA console info
	Revert "cpuidle, intel_idle: Fix CPUIDLE_FLAG_IRQ_ENABLE *again*"
	Linux 6.1.23

Change-Id: I15af3697170567c4678bcc9c2380d80e7cef5bc9
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-04-06 14:14:07 +00:00
commit a0f3313ef9
191 changed files with 3598 additions and 2099 deletions

View File

@ -84,6 +84,13 @@ patternProperties:
"^otp(-[0-9]+)?$":
type: object
spi-cpol: true
spi-cpha: true
dependencies:
spi-cpol: [ spi-cpha ]
spi-cpha: [ spi-cpol ]
unevaluatedProperties: false
examples:

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 22
SUBLEVEL = 23
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -244,7 +244,7 @@ led-pcieslot-power {
};
};
iio-hwmon-battery {
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc1 7>;
};

View File

@ -220,7 +220,7 @@ event-fan5-presence {
};
};
iio-hwmon-battery {
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc1 7>;
};

View File

@ -66,7 +66,7 @@
.long .Lefi_header_end - .L_head // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
.short 0 // DllCharacteristics
.short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics
.quad 0 // SizeOfStackReserve
.quad 0 // SizeOfStackCommit
.quad 0 // SizeOfHeapReserve

View File

@ -668,14 +668,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
CONFIG_PGTABLE_LEVELS),
.mm_ops = &kvm_user_mm_ops,
};
unsigned long flags;
kvm_pte_t pte = 0; /* Keep GCC quiet... */
u32 level = ~0;
int ret;
/*
* Disable IRQs so that we hazard against a concurrent
* teardown of the userspace page tables (which relies on
* IPI-ing threads).
*/
local_irq_save(flags);
ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
VM_BUG_ON(ret);
VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
VM_BUG_ON(!(pte & PTE_VALID));
local_irq_restore(flags);
if (ret)
return ret;
/*
* Not seeing an error, but not updating level? Something went
* deeply wrong...
*/
if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
return -EFAULT;
/* Oops, the userspace PTs are gone... Replay the fault */
if (!kvm_pte_valid(pte))
return -EAGAIN;
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
}
@ -1167,7 +1186,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
*
* Returns the size of the mapping.
*/
static unsigned long
static long
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long hva, kvm_pfn_t *pfnp,
phys_addr_t *ipap)
@ -1179,8 +1198,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* sure that the HVA and IPA are sufficiently aligned and that the
* block map is contained within the memslot.
*/
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
int sz = get_user_mapping_size(kvm, hva);
if (sz < 0)
return sz;
if (sz < PMD_SIZE)
return PAGE_SIZE;
/*
* The address we faulted on is backed by a transparent huge
* page. However, because we map the compound huge page and
@ -1411,7 +1437,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool logging_active = memslot_is_logging(memslot);
bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
@ -1568,6 +1594,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
hva, &pfn,
&fault_ipa);
if (vma_pagesize < 0) {
ret = vma_pagesize;
goto out_unlock;
}
}
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {

View File

@ -735,6 +735,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
return true;
}
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
u64 idx;
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
/* PMCCNTR_EL0 */
idx = ARMV8_PMU_CYCLE_IDX;
else
/* PMEVCNTRn_EL0 */
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
*val = kvm_pmu_get_counter_value(vcpu, idx);
return 0;
}
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@ -951,7 +967,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
.reset = reset_pmevcntr, \
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
@ -1600,7 +1616,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
.access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
.access = access_pmu_evcntr, .reset = reset_unknown,
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),

View File

@ -5,6 +5,8 @@
#include <asm/bmips.h>
#include <asm/io.h>
bool bmips_rac_flush_disable;
void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void)
boot_cpu_type() != CPU_BMIPS4380)
return;
if (unlikely(bmips_rac_flush_disable))
return;
/* Flush stale data out of the readahead cache */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);

View File

@ -35,6 +35,8 @@
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9)
extern bool bmips_rac_flush_disable;
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
struct bmips_quirk {
@ -104,6 +106,12 @@ static void bcm6358_quirks(void)
* disable SMP for now
*/
bmips_smp_enabled = 0;
/*
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
}
static void bcm6368_quirks(void)

View File

@ -163,6 +163,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
*/
}
static inline bool __pte_protnone(unsigned long pte)
{
return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
}
static inline bool __pte_flags_need_flush(unsigned long oldval,
unsigned long newval)
{
@ -179,8 +184,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
/*
* We do not expect kernel mappings or non-PTEs or not-present PTEs.
*/
VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));

View File

@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
static int ppr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!target->thread.regs)
return -EINVAL;
return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
}
@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
if (!target->thread.regs)
return -EINVAL;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->ppr, 0, sizeof(u64));
}

View File

@ -857,6 +857,13 @@ int pseries_vas_dlpar_cpu(void)
{
int new_nr_creds, rc;
/*
* NX-GZIP is not enabled. Nothing to do for DLPAR event
*/
if (!copypaste_feat)
return 0;
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
(u64)virt_to_phys(&hv_cop_caps));
@ -1013,6 +1020,7 @@ static int __init pseries_vas_init(void)
* Linux supports user space COPY/PASTE only with Radix
*/
if (!radix_enabled()) {
copypaste_feat = false;
pr_err("API is supported only with radix page tables\n");
return -ENOTSUPP;
}

View File

@ -278,7 +278,7 @@ config ARCH_RV64I
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select SWIOTLB if MMU
endchoice

View File

@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
return;
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
if (delta_ns) {
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
}
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
}
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)

View File

@ -162,7 +162,7 @@ vdso_prepare: prepare0
ifdef CONFIG_EXPOLINE_EXTERN
modules_prepare: expoline_prepare
expoline_prepare:
expoline_prepare: scripts
$(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
endif
endif

View File

@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
: "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;

View File

@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
obj-$(CONFIG_XEN_PV_DOM0) += vga.o
obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_XEN_EFI) += efi.o

View File

@ -1389,7 +1389,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
x86_platform.set_legacy_features =
xen_dom0_set_legacy_features;
xen_init_vga(info, xen_start_info->console.dom0.info_size);
xen_init_vga(info, xen_start_info->console.dom0.info_size,
&boot_params.screen_info);
xen_start_info->console.domU.mfn = 0;
xen_start_info->console.domU.evtchn = 0;

View File

@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params)
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
if (xen_initial_domain()) {
struct xen_platform_op op = {
.cmd = XENPF_get_dom0_console,
};
int ret = HYPERVISOR_platform_op(&op);
if (ret > 0)
xen_init_vga(&op.u.dom0_console,
min(ret * sizeof(char),
sizeof(op.u.dom0_console)),
&boot_params->screen_info);
}
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)

View File

@ -9,10 +9,9 @@
#include "xen-ops.h"
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
struct screen_info *screen_info)
{
struct screen_info *screen_info = &boot_params.screen_info;
/* This is drawn from a dump from vgacon:startup in
* standard Linux. */
screen_info->orig_video_mode = 3;

View File

@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu)
struct dom0_vga_console_info;
#ifdef CONFIG_XEN_PV_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
struct screen_info *);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
size_t size, struct screen_info *si)
{
}
#endif

View File

@ -541,7 +541,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
size_t len;
size_t len, off = 0;
if (!sp)
sp = stack_pointer(task);
@ -550,9 +550,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
printk("%sStack:\n", loglvl);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
sp, len, false);
while (off < len) {
u8 line[STACK_DUMP_LINE_SIZE];
size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
STACK_DUMP_LINE_SIZE : len - off;
__memcpy(line, (u8 *)sp + off, line_len);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
line, line_len, false);
off += STACK_DUMP_LINE_SIZE;
}
show_trace(task, sp, loglvl);
}

View File

@ -456,85 +456,67 @@ static void acpi_bus_osc_negotiate_usb_control(void)
Notification Handling
-------------------------------------------------------------------------- */
/*
* acpi_bus_notify
* ---------------
* Callback for all 'system-level' device notifications (values 0x00-0x7F).
/**
* acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
* @handle: Target ACPI object.
* @type: Notification type.
* @data: Ignored.
*
* This only handles notifications related to device hotplug.
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
bool hotplug_event = false;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_WAKE:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
break;
return;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
break;
return;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n");
break;
return;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n");
break;
return;
case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n");
break;
return;
default:
acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
break;
}
adev = acpi_get_acpi_dev(handle);
if (!adev)
goto err;
if (adev->dev.driver) {
struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
if (driver && driver->ops.notify &&
(driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
driver->ops.notify(adev, type);
}
if (!hotplug_event) {
acpi_put_acpi_dev(adev);
return;
}
if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
adev = acpi_get_acpi_dev(handle);
if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
acpi_put_acpi_dev(adev);
err:
acpi_evaluate_ost(handle, type, ost_code, NULL);
acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
@ -559,42 +541,51 @@ static u32 acpi_device_fixed_event(void *data)
return ACPI_INTERRUPT_HANDLED;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
static int acpi_device_install_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event,
device);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event,
device);
else
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
} else {
u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
status = acpi_install_notify_handler(device->handle, type,
acpi_notify_device,
device);
}
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device)
static void acpi_device_remove_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event);
else
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
} else {
u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
acpi_remove_notify_handler(device->handle, type,
acpi_notify_device);
}
}
/* Handle events targeting \_SB device (at present only graceful shutdown) */
@ -1036,7 +1027,7 @@ static int acpi_device_probe(struct device *dev)
acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
@ -1059,7 +1050,7 @@ static void acpi_device_remove(struct device *dev)
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);

View File

@ -714,6 +714,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
},
},
{
.callback = video_detect_force_native,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
},
},
/*
* Desktops which falsely report a backlight and which our heuristics

View File

@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
}
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
error = 0;
done:
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
return error;
return 0;
out_unlock:
loop_global_unlock(lo, is_loop);
@ -1130,7 +1130,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
fput(file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
goto done;
return error;
}
static void __loop_clr_fd(struct loop_device *lo, bool release)

View File

@ -656,7 +656,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
}
}
static void ubq_complete_io_cmd(struct ublk_io *io, int res)
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
unsigned issue_flags)
{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@ -668,7 +669,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
/* tell ublksrv one io request is coming */
io_uring_cmd_done(io->cmd, res, 0);
io_uring_cmd_done(io->cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@ -685,7 +686,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
}
static inline void __ublk_rq_task_work(struct request *req)
static inline void __ublk_rq_task_work(struct request *req,
unsigned issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
@ -723,7 +725,7 @@ static inline void __ublk_rq_task_work(struct request *req)
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
__func__, io->cmd->cmd_op, ubq->q_id,
req->tag, io->flags);
ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
return;
}
/*
@ -761,17 +763,18 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_rq_task_work(blk_mq_rq_from_pdu(data));
__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
@ -783,12 +786,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
ublk_forward_io_cmds(ubq);
ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
@ -797,8 +800,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
struct ublk_queue *ubq = req->mq_hctx->driver_data;
unsigned issue_flags = IO_URING_F_UNLOCKED;
ublk_forward_io_cmds(ubq);
ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@ -1052,7 +1056,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
IO_URING_F_UNLOCKED);
}
/* all io commands are canceled */
@ -1295,7 +1300,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
out:
io_uring_cmd_done(cmd, ret, 0);
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
@ -2053,7 +2058,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
break;
}
out:
io_uring_cmd_done(cmd, ret, 0);
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;

View File

@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
if ((adev->flags & AMD_IS_APU) &&
adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
return false;
if ((adev->flags & AMD_IS_APU) &&
amdgpu_acpi_is_s3_active(adev))
return false;
if (amdgpu_sriov_vf(adev))

View File

@ -1298,14 +1298,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1;
}
mutex_unlock(&p->mutex);
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@ -1321,9 +1321,9 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr);
return err;
@ -1337,6 +1337,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
@ -1389,16 +1390,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
mutex_unlock(&p->mutex);
if (kfd_flush_tlb_after_unmap(pdd->dev)) {
flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
}
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@ -1414,9 +1418,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr);
return err;
}

View File

@ -295,7 +295,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch)
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
@ -305,19 +305,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
r = svm_range_vram_node_new(adev, prange, true);
if (r) {
dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
goto out;
}
amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
@ -397,14 +391,14 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i + 3] = 0;
}
#endif
out:
return r;
}
static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end, uint32_t trigger)
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@ -457,7 +451,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@ -505,6 +499,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
uint64_t ttm_res_offset;
unsigned long cpages = 0;
long r = 0;
@ -526,6 +521,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(adev, prange, true);
if (r) {
dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
ttm_res_offset = prange->offset << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@ -534,18 +536,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
ttm_res_offset += next - addr;
addr = next;
}
if (cpages)
prange->actual_loc = best_loc;
else
svm_range_vram_node_free(prange);
return r < 0 ? r : 0;
}

View File

@ -77,6 +77,7 @@ static int kfd_init(void)
static void kfd_exit(void)
{
kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();

View File

@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);

View File

@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
static void kfd_process_notifier_release_internal(struct kfd_process *p)
{
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
mmu_notifier_put(&p->mmu_notifier);
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
return;
mutex_lock(&kfd_processes_mutex);
/*
* Do early return if table is empty.
*
* This could potentially happen if this function is called concurrently
* by mmu_notifier and by kfd_cleanup_pocesses.
*
*/
if (hash_empty(kfd_processes_table)) {
mutex_unlock(&kfd_processes_mutex);
return;
}
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
mmu_notifier_put(&p->mmu_notifier);
kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
/*
* This code handles the case when driver is being unloaded before all
* mm_struct are released. We need to safely free the kfd_process and
* avoid race conditions with mmu_notifier that might try to free them.
*
*/
void kfd_cleanup_processes(void)
{
struct kfd_process *p;
struct hlist_node *p_temp;
unsigned int temp;
HLIST_HEAD(cleanup_list);
/*
* Move all remaining kfd_process from the process table to a
* temp list for processing. Once done, callback from mmu_notifier
* release will not see the kfd_process in the table and do early return,
* avoiding double free issues.
*/
mutex_lock(&kfd_processes_mutex);
hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
hash_del_rcu(&p->kfd_processes);
synchronize_srcu(&kfd_processes_srcu);
hlist_add_head(&p->kfd_processes, &cleanup_list);
}
mutex_unlock(&kfd_processes_mutex);
hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
kfd_process_notifier_release_internal(p);
/*
* Ensures that all outstanding free_notifier get called, triggering
* the release of the kfd_process struct.
*/
mmu_notifier_synchronize();
}
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;

View File

@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
cleanup:
if (dev->shared_resources.enable_mes)
uninit_queue(*q);
uninit_queue(*q);
*q = NULL;
return retval;
}

View File

@ -208,6 +208,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false;
}
bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
{
u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
DRM_INFO("Synaptics Cascaded MST hub\n");
return true;
}
}
return false;
}
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@ -231,6 +246,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)
return false;
@ -627,12 +646,25 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
static int kbps_to_peak_pbn(int kbps)
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
{
u8 link_coding_cap;
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
if (link_coding_cap == DP_128b_132b_ENCODING)
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
return fec_overhead_multiplier_x1000;
}
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
peak_kbps = div_u64(peak_kbps, 1000);
peak_kbps *= fec_overhead_multiplier_x1000;
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@ -719,11 +751,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@ -819,6 +852,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@ -848,7 +882,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@ -861,7 +895,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@ -890,6 +924,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@ -951,7 +986,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@ -970,7 +1005,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@ -978,7 +1013,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,

View File

@ -34,6 +34,21 @@
#define SYNAPTICS_RC_OFFSET 0x4BC
#define SYNAPTICS_RC_DATA 0x4C0
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
/**
* Panamera MST Hub detection
* Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
* Check from beginning of branch device vendor specific field (050Ch)
*/
#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
struct amdgpu_display_manager;
struct amdgpu_dm_connector;

View File

@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
int ret;
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
if (!ret) {
/* Drop the reference acquired by drm_gem_mmap_obj(). */
drm_gem_object_put(&etnaviv_obj->base);
}
return ret;
}
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {

View File

@ -499,6 +499,22 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
icl_load_csc_matrix(crtc_state);
}
static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
/*
* Possibly related to display WA #1184, SKL CSC loses the latched
* CSC coeff/offset register values if the CSC registers are disarmed
* between DC5 exit and PSR exit. This will cause the plane(s) to
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
* register programming will be peformed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
}
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(crtc_state);
@ -541,6 +557,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val = 0;
if (crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
@ -2171,7 +2190,7 @@ static const struct intel_color_funcs icl_color_funcs = {
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
@ -2179,7 +2198,7 @@ static const struct intel_color_funcs glk_color_funcs = {
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = NULL,

View File

@ -7124,6 +7124,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
if (!modeset &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@ -7500,8 +7502,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
/*
* During full modesets we write a lot of registers, wait
* for PLLs, etc. Doing that while DC states are enabled
* is not a good idea.
*
* During fastsets and other updates we also need to
* disable DC states due to the following scenario:
* 1. DC5 exit and PSR exit happen
* 2. Some or all _noarm() registers are written
* 3. Due to some long delay PSR is re-entered
* 4. DC5 entry -> DMC saves the already written new
* _noarm() registers and the old not yet written
* _arm() registers
* 5. DC5 exit -> DMC restores a mixture of old and
* new register values and arms the update
* 6. PSR exit -> hardware latches a mixture of old and
* new register values -> corrupted frame, or worse
* 7. New _arm() registers are finally written
* 8. Hardware finally latches a complete set of new
* register values, and subsequent frames will be OK again
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state);
@ -7640,8 +7662,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*

View File

@ -300,6 +300,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->pte_encode = gen8_ggtt_pte_encode;
dpt->obj = dpt_obj;
dpt->obj->is_dpt = true;
return &dpt->vm;
}
@ -308,5 +309,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
dpt->obj->is_dpt = false;
i915_vm_put(&dpt->vm);
}

View File

@ -440,9 +440,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assume safe mode\n",
"Port %s: PHY in TCCOLD, assume not owned\n",
dig_port->tc_port_name);
return true;
return false;
}
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);

View File

@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
memcpy(map, data, size);
i915_gem_object_unpin_map(obj);
i915_gem_object_flush_map(obj);
__i915_gem_object_release_map(obj);
return obj;
}

View File

@ -319,7 +319,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->frontbuffer);
return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
}
static inline unsigned int

View File

@ -491,6 +491,9 @@ struct drm_i915_gem_object {
*/
unsigned int cache_dirty:1;
/* @is_dpt: Object houses a display page table (DPT) */
unsigned int is_dpt:1;
/**
* @read_domains: Read memory domains.
*

View File

@ -356,7 +356,7 @@ static const struct dpu_caps sc8180x_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@ -855,22 +855,22 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
};
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_cfg sm8250_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
@ -1180,6 +1180,13 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
-1),
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
static struct dpu_pingpong_cfg qcm2290_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@ -1203,13 +1210,6 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
/*************************************************************
* DSC sub blocks config
*************************************************************/

View File

@ -168,7 +168,13 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
raw_local_irq_enable();
ret = __intel_idle(dev, drv, index);
raw_local_irq_disable();
/*
* The lockdep hardirqs state may be changed to 'on' with timer
* tick interrupt followed by __do_softirq(). Use local_irq_disable()
* to keep the hardirqs state correct.
*/
local_irq_disable();
return ret;
}

View File

@ -779,9 +779,6 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
input_report_key(dev, BTN_C, data[8]);
input_report_key(dev, BTN_Z, data[9]);
/* Profile button has a value of 0-3, so it is reported as an axis */
if (xpad->mapping & MAP_PROFILE_BUTTON)
input_report_abs(dev, ABS_PROFILE, data[34]);
input_sync(dev);
}
@ -1059,6 +1056,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
(__u16) le16_to_cpup((__le16 *)(data + 8)));
}
/* Profile button has a value of 0-3, so it is reported as an axis */
if (xpad->mapping & MAP_PROFILE_BUTTON)
input_report_abs(dev, ABS_PROFILE, data[34]);
/* paddle handling */
/* based on SDL's SDL_hidapi_xboxone.c */
if (xpad->mapping & MAP_PADDLES) {

View File

@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
x = y = z = 0;
/* Divide 4 since trackpoint's speed is too fast */
input_report_rel(dev2, REL_X, (char)x / 4);
input_report_rel(dev2, REL_Y, -((char)y / 4));
input_report_rel(dev2, REL_X, (s8)x / 4);
input_report_rel(dev2, REL_Y, -((s8)y / 4));
psmouse_report_standard_buttons(dev2, packet[3]);
@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
((packet[3] & 0x20) << 1);
z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
input_report_rel(dev2, REL_X, (char)x);
input_report_rel(dev2, REL_Y, -((char)y));
input_report_rel(dev2, REL_X, (s8)x);
input_report_rel(dev2, REL_Y, -((s8)y));
input_report_abs(dev2, ABS_PRESSURE, z);
psmouse_report_standard_buttons(dev2, packet[1]);
@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
if (reg < 0)
return reg;
x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
if (reg < 0)
return reg;
x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = 17 + x_electrode;
y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
y_electrode = 13 + y_electrode;
x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */

View File

@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
state->pressed = packet[0] >> 7;
finger1 = ((packet[0] >> 4) & 0x7) - 1;
if (finger1 < FOC_MAX_FINGERS) {
state->fingers[finger1].x += (char)packet[1];
state->fingers[finger1].y += (char)packet[2];
state->fingers[finger1].x += (s8)packet[1];
state->fingers[finger1].y += (s8)packet[2];
} else {
psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
finger1);
@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
*/
finger2 = ((packet[3] >> 4) & 0x7) - 1;
if (finger2 < FOC_MAX_FINGERS) {
state->fingers[finger2].x += (char)packet[4];
state->fingers[finger2].y += (char)packet[5];
state->fingers[finger2].x += (s8)packet[4];
state->fingers[finger2].y += (s8)packet[5];
}
}

View File

@ -610,6 +610,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Fujitsu Lifebook A574/H */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Gigabyte M912 */
.matches = {
@ -1116,6 +1124,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
/*
* Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
* the keyboard very laggy for ~5 seconds after boot and
* sometimes also after resume.
* However both are required for the keyboard to not fail
* completely sometimes after boot or resume.
*/
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
@ -1123,6 +1145,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
/*
* Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
* the keyboard very laggy for ~5 seconds after boot and
* sometimes also after resume.
* However both are required for the keyboard to not fail
* completely sometimes after boot or resume.
*/
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),

View File

@ -124,10 +124,18 @@ static const unsigned long goodix_irq_flags[] = {
static const struct dmi_system_id nine_bytes_report[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
.ident = "Lenovo YogaBook",
/* YB1-X91L/F and YB1-X90L/F */
/* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
}
},
{
/* Lenovo Yoga Book X91F / X91L */
.matches = {
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
}
},
#endif

View File

@ -1057,7 +1057,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
if (cap_sagaw(iommu->cap) == 0) {
if (!cap_sagaw(iommu->cap) &&
(!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
iommu->name);
drhd->ignored = 1;

View File

@ -1522,6 +1522,8 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
ret = 1;
break;
default:
if (len)
setup_split_accounting(ci, *len);
/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
alloc_multiple_bios(&blist, ci, ti, num_bios);
while ((clone = bio_list_pop(&blist))) {

View File

@ -3152,6 +3152,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
if (slot < 0)
/* overflow */
return -ENOSPC;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also

View File

@ -429,6 +429,7 @@ static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
mxic_ecc_enable_int(mxic);
ret = wait_for_completion_timeout(&mxic->complete,
msecs_to_jiffies(1000));
ret = ret ? 0 : -ETIMEDOUT;
mxic_ecc_disable_int(mxic);
} else {
ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,

View File

@ -176,6 +176,7 @@ struct meson_nfc {
dma_addr_t daddr;
dma_addr_t iaddr;
u32 info_bytes;
unsigned long assigned_cs;
};
@ -503,6 +504,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
nfc->daddr, datalen, dir);
return ret;
}
nfc->info_bytes = infolen;
cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
@ -520,8 +522,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
struct meson_nfc *nfc = nand_get_controller_data(nand);
dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
if (infolen)
if (infolen) {
dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
nfc->info_bytes = 0;
}
}
static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
@ -710,6 +714,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
usleep_range(10, 15);
/* info is updated by nfc dma engine*/
smp_rmb();
dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
DMA_FROM_DEVICE);
ret = *info & ECC_COMPLETE;
} while (!ret);
}
@ -991,7 +997,7 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
static int meson_nfc_clk_init(struct meson_nfc *nfc)
{
struct clk_parent_data nfc_divider_parent_data[1];
struct clk_parent_data nfc_divider_parent_data[1] = {0};
struct clk_init_data init = {0};
int ret;

View File

@ -907,15 +907,14 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
u16 entries = 0;
u8 timestamp = 0;
u8 fid;
u8 member;
struct alu_struct alu;
u8 src_port;
u8 mac[ETH_ALEN];
do {
alu.is_static = false;
ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
&timestamp, &entries);
if (!ret && (member & BIT(port))) {
ret = cb(alu.mac, alu.fid, alu.is_static, data);
if (!ret && port == src_port) {
ret = cb(mac, fid, false, data);
if (ret)
break;
}

View File

@ -82,22 +82,16 @@ static const struct regmap_bus regmap_smi[] = {
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.max_raw_read = 1,
.max_raw_write = 1,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
.max_raw_read = 2,
.max_raw_write = 2,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
.max_raw_read = 4,
.max_raw_write = 4,
}
};
@ -108,7 +102,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
.use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
},
@ -118,7 +111,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 16,
.cache_type = REGCACHE_NONE,
.use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
},
@ -128,7 +120,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 32,
.cache_type = REGCACHE_NONE,
.use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
}

View File

@ -357,13 +357,13 @@ static const u32 ksz8863_masks[] = {
[VLAN_TABLE_VALID] = BIT(19),
[STATIC_MAC_TABLE_VALID] = BIT(19),
[STATIC_MAC_TABLE_USE_FID] = BIT(21),
[STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
[STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
[STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
[DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
[DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
[DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
[DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
@ -373,10 +373,10 @@ static u8 ksz8863_shifts[] = {
[VLAN_TABLE_MEMBERSHIP_S] = 16,
[STATIC_MAC_FWD_PORTS] = 16,
[STATIC_MAC_FID] = 22,
[DYNAMIC_MAC_ENTRIES_H] = 3,
[DYNAMIC_MAC_ENTRIES_H] = 8,
[DYNAMIC_MAC_ENTRIES] = 24,
[DYNAMIC_MAC_FID] = 16,
[DYNAMIC_MAC_TIMESTAMP] = 24,
[DYNAMIC_MAC_TIMESTAMP] = 22,
[DYNAMIC_MAC_SRC_PORT] = 20,
};

View File

@ -15,3 +15,7 @@ mv88e6xxx-objs += port_hidden.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
mv88e6xxx-objs += smi.o
mv88e6xxx-objs += trace.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -3354,9 +3354,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
/* Forward any IPv4 IGMP or IPv6 MLD frames received
* by a USER port to the CPU port to allow snooping.
*/
if (dsa_is_user_port(ds, port))
reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;

View File

@ -12,6 +12,7 @@
#include "chip.h"
#include "global1.h"
#include "trace.h"
/* Offset 0x01: ATU FID Register */
@ -114,6 +115,19 @@ static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
}
static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
MV88E6XXX_G1_ATU_OP_BUSY |
MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
if (err)
return err;
return mv88e6xxx_g1_atu_op_wait(chip);
}
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
{
u16 val;
@ -159,6 +173,41 @@ int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
}
static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
{
u16 val = 0, upper = 0, op = 0;
int err = -EOPNOTSUPP;
if (mv88e6xxx_num_databases(chip) > 256) {
err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
val &= 0xfff;
if (err)
return err;
} else {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
if (err)
return err;
if (mv88e6xxx_num_databases(chip) > 64) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
&upper);
if (err)
return err;
upper = (upper >> 8) & 0x00f0;
} else if (mv88e6xxx_num_databases(chip) > 16) {
/* ATU DBNum[5:4] are located in ATU Operation 9:8 */
upper = (op >> 4) & 0x30;
}
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
val = (op & 0xf) | upper;
}
*fid = val;
return err;
}
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
@ -353,14 +402,12 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
struct mv88e6xxx_atu_entry entry;
int spid;
int err;
u16 val;
int err, spid;
u16 val, fid;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_op(chip, 0,
MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
err = mv88e6xxx_g1_read_atu_violation(chip);
if (err)
goto out;
@ -368,6 +415,10 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
if (err)
goto out;
err = mv88e6xxx_g1_atu_data_read(chip, &entry);
if (err)
goto out;
@ -385,23 +436,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
}
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU member violation for %pM portvec %x spid %d\n",
entry.mac, entry.portvec, spid);
trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_member_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU miss violation for %pM portvec %x spid %d\n",
entry.mac, entry.portvec, spid);
trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_miss_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU full violation for %pM portvec %x spid %d\n",
entry.mac, entry.portvec, spid);
trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_full_violation++;
}
mv88e6xxx_reg_unlock(chip);

View File

@ -13,6 +13,7 @@
#include "chip.h"
#include "global1.h"
#include "trace.h"
/* Offset 0x02: VTU FID Register */
@ -628,14 +629,12 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
vid, spid);
trace_mv88e6xxx_vtu_member_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
vid, spid);
trace_mv88e6xxx_vtu_miss_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_miss_violation++;
}

View File

@ -0,0 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright 2022 NXP
*/
#define CREATE_TRACE_POINTS
#include "trace.h"

View File

@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright 2022 NXP
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mv88e6xxx
#if !defined(_MV88E6XXX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _MV88E6XXX_TRACE_H
#include <linux/device.h>
#include <linux/if_ether.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
TP_PROTO(const struct device *dev, int spid, u16 portvec,
const unsigned char *addr, u16 fid),
TP_ARGS(dev, spid, portvec, addr, fid),
TP_STRUCT__entry(
__string(name, dev_name(dev))
__field(int, spid)
__field(u16, portvec)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, fid)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->spid = spid;
__entry->portvec = portvec;
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->fid = fid;
),
TP_printk("dev %s spid %d portvec 0x%x addr %pM fid %u",
__get_str(name), __entry->spid, __entry->portvec,
__entry->addr, __entry->fid)
);
DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_member_violation,
TP_PROTO(const struct device *dev, int spid, u16 portvec,
const unsigned char *addr, u16 fid),
TP_ARGS(dev, spid, portvec, addr, fid));
DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_miss_violation,
TP_PROTO(const struct device *dev, int spid, u16 portvec,
const unsigned char *addr, u16 fid),
TP_ARGS(dev, spid, portvec, addr, fid));
DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_full_violation,
TP_PROTO(const struct device *dev, int spid, u16 portvec,
const unsigned char *addr, u16 fid),
TP_ARGS(dev, spid, portvec, addr, fid));
DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
TP_PROTO(const struct device *dev, int spid, u16 vid),
TP_ARGS(dev, spid, vid),
TP_STRUCT__entry(
__string(name, dev_name(dev))
__field(int, spid)
__field(u16, vid)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->spid = spid;
__entry->vid = vid;
),
TP_printk("dev %s spid %d vid %u",
__get_str(name), __entry->spid, __entry->vid)
);
DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_member_violation,
TP_PROTO(const struct device *dev, int spid, u16 vid),
TP_ARGS(dev, spid, vid));
DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_miss_violation,
TP_PROTO(const struct device *dev, int spid, u16 vid),
TP_ARGS(dev, spid, vid));
#endif /* _MV88E6XXX_TRACE_H */
/* We don't want to use include/trace/events */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/overflow.h>
#include <linux/regmap.h>
#include "realtek.h"
@ -152,7 +153,9 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev)
if (!var)
return -EINVAL;
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
priv = devm_kzalloc(&mdiodev->dev,
size_add(sizeof(*priv), var->chip_data_sz),
GFP_KERNEL);
if (!priv)
return -ENOMEM;

View File

@ -175,12 +175,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV

View File

@ -1225,6 +1225,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
u16 support_speeds;
u16 support_pam4_speeds;
u16 auto_link_speeds; /* fw adv setting */

View File

@ -1712,6 +1712,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
return SPEED_100000;
case BNXT_LINK_SPEED_200GB:
return SPEED_200000;
default:
return SPEED_UNKNOWN;
}
@ -3634,6 +3636,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_ulp_stop(bp);
rc = bnxt_close_nic(bp, true, false);
if (rc) {
etest->flags |= ETH_TEST_FL_FAILED;
bnxt_ulp_start(bp, rc);
return;
}

View File

@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
return 0;
}
struct i40e_diag_reg_test_info i40e_reg_list[] = {
const struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u32 reg, mask;
u32 elements;
u32 i, j;
for (i = 0; i40e_reg_list[i].offset != 0 &&
!ret_code; i++) {
elements = i40e_reg_list[i].elements;
/* set actual reg range for dynamically allocated resources */
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
hw->func_caps.num_tx_qp != 0)
i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
elements = hw->func_caps.num_tx_qp;
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
hw->func_caps.num_msix_vectors != 0)
i40e_reg_list[i].elements =
hw->func_caps.num_msix_vectors - 1;
elements = hw->func_caps.num_msix_vectors - 1;
/* test register access */
mask = i40e_reg_list[i].mask;
for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
for (j = 0; j < elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);

View File

@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
u32 stride; /* bytes between each element */
};
extern struct i40e_diag_reg_test_info i40e_reg_list[];
extern const struct i40e_diag_reg_test_info i40e_reg_list[];
i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);

View File

@ -2756,7 +2756,7 @@ static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
struct ice_hw *hw = pi->hw;
int status = 0;
@ -2774,11 +2774,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
if (old_agg_info && old_agg_info != agg_info) {
struct ice_sched_agg_vsi_info *vtmp;
list_for_each_entry_safe(old_agg_vsi_info, vtmp,
list_for_each_entry_safe(iter, vtmp,
&old_agg_info->agg_vsi_list,
list_entry)
if (old_agg_vsi_info->vsi_handle == vsi_handle)
if (iter->vsi_handle == vsi_handle) {
old_agg_vsi_info = iter;
break;
}
}
/* check if entry already exist */

View File

@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
struct ice_vsi_ctx *ctx;
struct ice_vsi_ctx *ctx, *cached_ctx;
int status;
ctx = ice_get_vsi_ctx(hw, vsi_handle);
cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!cached_ctx)
return -ENOENT;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -EIO;
return -ENOMEM;
ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
if (enable)
ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
return ice_update_vsi(hw, vsi_handle, ctx, NULL);
status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
if (!status) {
cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
cached_ctx->info.valid_sections |= ctx->info.valid_sections;
}
kfree(ctx);
return status;
}
/**

View File

@ -541,6 +541,72 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
}
}
/**
* ice_vc_fdir_has_prof_conflict
* @vf: pointer to the VF structure
* @conf: FDIR configuration for each filter
*
* Check if @conf has conflicting profile with existing profiles
*
* Return: true on success, and false on error.
*/
static bool
ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf)
{
struct ice_fdir_fltr *desc;
list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
struct virtchnl_fdir_fltr_conf *existing_conf;
enum ice_fltr_ptype flow_type_a, flow_type_b;
struct ice_fdir_fltr *a, *b;
existing_conf = to_fltr_conf_from_desc(desc);
a = &existing_conf->input;
b = &conf->input;
flow_type_a = a->flow_type;
flow_type_b = b->flow_type;
/* No need to compare two rules with different tunnel types or
* with the same protocol type.
*/
if (existing_conf->ttype != conf->ttype ||
flow_type_a == flow_type_b)
continue;
switch (flow_type_a) {
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
return true;
break;
default:
break;
}
}
return false;
}
/**
* ice_vc_fdir_write_flow_prof
* @vf: pointer to the VF structure
@ -677,6 +743,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
enum ice_fltr_ptype flow;
int ret;
ret = ice_vc_fdir_has_prof_conflict(vf, conf);
if (ret) {
dev_dbg(dev, "Found flow profile conflict for VF %d\n",
vf->vf_id);
return ret;
}
flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow);
if (ret) {

View File

@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */

View File

@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
if (!priv->prs_double_vlans)
return -ENOMEM;
/* Double VLAN: 0x8100, 0x88A8 */
err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
/* Double VLAN: 0x88A8, 0x8100 */
err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
if (err)
return err;
@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
int tid;
int tid, ihl;
/* IPv4 over PPPoE with options */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
return tid;
/* IPv4 over PPPoE with header length >= 5 */
for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
return tid;
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
mvpp2_prs_match_etype(&pe, 0, PPP_IP);
mvpp2_prs_match_etype(&pe, 0, PPP_IP);
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_IPV4_HEAD | ihl,
MVPP2_PRS_IPV4_HEAD_MASK |
MVPP2_PRS_IPV4_IHL_MASK);
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
/* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
MVPP2_ETH_TYPE_LEN + (ihl * 4),
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
mvpp2_prs_hw_write(priv, &pe);
/* IPv4 over PPPoE without options */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
return tid;
pe.index = tid;
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_IPV4_HEAD |
MVPP2_PRS_IPV4_IHL_MIN,
MVPP2_PRS_IPV4_HEAD_MASK |
MVPP2_PRS_IPV4_IHL_MASK);
/* Clear ri before updating */
pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
mvpp2_prs_hw_write(priv, &pe);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
mvpp2_prs_hw_write(priv, &pe);
}
/* IPv6 over PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,

View File

@ -438,6 +438,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
mtk_ppe_cache_clear(ppe);
}
entry->hash = 0xffff;

View File

@ -570,6 +570,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
flow_block_cb_incref(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &block_cb_list);
return 0;
@ -578,7 +579,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
if (!block_cb)
return -ENOENT;
if (flow_block_cb_decref(block_cb)) {
if (!flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}

View File

@ -4104,13 +4104,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
int max_xdp_mtu;
if (!xsk_pool)
continue;
mlx5e_build_xsk_param(xsk_pool, &xsk);
max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
/* Validate XSK params and XDP MTU in advance */
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
new_params->sw_mtu > max_xdp_mtu) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
int max_mtu_frame, max_mtu_page, max_mtu;
@ -4120,9 +4124,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
max_mtu = min(max_mtu_frame, max_mtu_page);
max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}

View File

@ -392,7 +392,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!ocelot->stats_layout[i].reg)
continue;
if (region && ocelot->stats_layout[i].reg == last + 4) {
if (region && ocelot->map[SYS][ocelot->stats_layout[i].reg & REG_MASK] ==
ocelot->map[SYS][last & REG_MASK] + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),

View File

@ -826,6 +826,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
/* disable phy pfm mode */
phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
/* disable 10m pll off */
phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0);
rtl8168g_disable_aldps(phydev);
rtl8168g_config_eee_phy(phydev);
}

View File

@ -1304,7 +1304,8 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
static int efx_ef10_init_nic(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
netdev_features_t hw_enc_features = 0;
struct net_device *net_dev = efx->net_dev;
netdev_features_t tun_feats, tso_feats;
int rc;
if (nic_data->must_check_datapath_caps) {
@ -1349,20 +1350,30 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
/* add encapsulated checksum offload features */
/* encap features might change during reset if fw variant changed */
if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
/* add encapsulated TSO features */
net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
else
net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
tun_feats = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
tso_feats = NETIF_F_TSO | NETIF_F_TSO6;
if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
netdev_features_t encap_tso_features;
encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
hw_enc_features |= encap_tso_features | NETIF_F_TSO;
efx->net_dev->features |= encap_tso_features;
/* If this is first nic_init, or if it is a reset and a new fw
* variant has added new features, enable them by default.
* If the features are not new, maintain their current value.
*/
if (!(net_dev->hw_features & tun_feats))
net_dev->features |= tun_feats;
net_dev->hw_enc_features |= tun_feats | tso_feats;
net_dev->hw_features |= tun_feats;
} else {
net_dev->hw_enc_features &= ~(tun_feats | tso_feats);
net_dev->hw_features &= ~tun_feats;
net_dev->features &= ~tun_feats;
}
efx->net_dev->hw_enc_features = hw_enc_features;
/* don't fail init if RSS setup doesn't work */
rc = efx->type->rx_push_rss_config(efx, false,
@ -4021,7 +4032,10 @@ static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_IPV6_CSUM | \
NETIF_F_RXHASH | \
NETIF_F_NTUPLE)
NETIF_F_NTUPLE | \
NETIF_F_SG | \
NETIF_F_RXCSUM | \
NETIF_F_RXALL)
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,

View File

@ -1001,21 +1001,18 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
}
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
net_dev->features |= NETIF_F_TSO6;
if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
net_dev->hw_enc_features |= NETIF_F_TSO6;
}
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
net_dev->features |= efx->type->offload_features;
/* Add TSO features */
if (efx->type->tso_versions && efx->type->tso_versions(efx))
net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
/* Determine user configurable features */
net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
/* Disable receiving frames with bad FCS, by default. */

View File

@ -1037,8 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
/* Indicate that the MAC is responsible for managing PHY PM */
phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@ -1066,6 +1064,7 @@ static int smsc911x_mii_init(struct platform_device *pdev,
struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
struct phy_device *phydev;
int err = -ENXIO;
pdata->mii_bus = mdiobus_alloc();
@ -1108,6 +1107,10 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
phydev = phy_find_first(pdata->mii_bus);
if (phydev)
phydev->mac_managed_pm = true;
return 0;
err_out_free_bus_2:

View File

@ -532,7 +532,6 @@ struct mac_device_info {
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
unsigned int promisc;
bool vlan_fail_q_en;
u8 vlan_fail_q;
};

View File

@ -481,12 +481,6 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
if (vid > 4095)
return -EINVAL;
if (hw->promisc) {
netdev_err(dev,
"Adding VLAN in promisc mode not supported\n");
return -EPERM;
}
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
/* For single VLAN filter, VID 0 means VLAN promiscuous */
@ -536,12 +530,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
{
int i, ret = 0;
if (hw->promisc) {
netdev_err(dev,
"Deleting VLAN in promisc mode not supported\n");
return -EPERM;
}
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
@ -566,39 +554,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
return ret;
}
static void dwmac4_vlan_promisc_enable(struct net_device *dev,
struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
u32 hash;
u32 val;
int i;
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
dwmac4_write_single_vlan(dev, 0);
return;
}
/* Extended Rx VLAN Filter Enable */
for (i = 0; i < hw->num_vlan; i++) {
if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
dwmac4_write_vlan_filter(dev, hw, i, val);
}
}
hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
if (hash & GMAC_VLAN_VLHT) {
value = readl(ioaddr + GMAC_VLAN_TAG);
if (value & GMAC_VLAN_VTHM) {
value &= ~GMAC_VLAN_VTHM;
writel(value, ioaddr + GMAC_VLAN_TAG);
}
}
}
static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
struct mac_device_info *hw)
{
@ -718,22 +673,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
/* VLAN filtering */
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
value &= ~GMAC_PACKET_FILTER_VTFE;
else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
value |= GMAC_PACKET_FILTER_VTFE;
writel(value, ioaddr + GMAC_PACKET_FILTER);
if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
if (!hw->promisc) {
hw->promisc = 1;
dwmac4_vlan_promisc_enable(dev, hw);
}
} else {
if (hw->promisc) {
hw->promisc = 0;
dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
}
}
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,

View File

@ -2817,6 +2817,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
err_free_phylink:
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@ -2845,6 +2846,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
of_platform_device_destroy(common->mdio_dev, NULL);

View File

@ -918,14 +918,13 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
return cpts_of_mux_clk_setup(cpts, node);
}
static void am65_cpts_release(void *data)
void am65_cpts_release(struct am65_cpts *cpts)
{
struct am65_cpts *cpts = data;
ptp_clock_unregister(cpts->ptp_clock);
am65_cpts_disable(cpts);
clk_disable_unprepare(cpts->refclk);
}
EXPORT_SYMBOL_GPL(am65_cpts_release);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node)
@ -1003,18 +1002,12 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
if (ret) {
dev_err(dev, "failed to add ptpclk reset action %d", ret);
return ERR_PTR(ret);
}
ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
am65_cpts_interrupt,
IRQF_ONESHOT, dev_name(dev), cpts);
if (ret < 0) {
dev_err(cpts->dev, "error attaching irq %d\n", ret);
return ERR_PTR(ret);
goto reset_ptpclk;
}
dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
@ -1023,6 +1016,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return cpts;
reset_ptpclk:
am65_cpts_release(cpts);
refclk_disable:
clk_disable_unprepare(cpts->refclk);
return ERR_PTR(ret);

View File

@ -18,6 +18,7 @@ struct am65_cpts_estf_cfg {
};
#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
@ -29,6 +30,10 @@ int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
#else
static inline void am65_cpts_release(struct am65_cpts *cpts)
{
}
static inline struct am65_cpts *am65_cpts_create(struct device *dev,
void __iomem *regs,
struct device_node *node)

View File

@ -1902,10 +1902,9 @@ static int ca8210_skb_tx(
struct ca8210_priv *priv
)
{
int status;
struct ieee802154_hdr header = { };
struct secspec secspec;
unsigned int mac_len;
int mac_len, status;
dev_dbg(&priv->spi->dev, "%s called\n", __func__);

View File

@ -153,7 +153,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
* gsi_trans_pool_exit_dma() can assume the total allocated
* size is exactly (count * size).
*/
total_size = get_order(total_size) << PAGE_SHIFT;
total_size = PAGE_SIZE << get_order(total_size);
virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
if (!virt)

View File

@ -130,14 +130,10 @@ static u16 net_failover_select_queue(struct net_device *dev,
txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
else
txq = netdev_pick_tx(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
return txq;
} else {
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
}
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
/* Save the original txq to restore before passing to the driver */
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;

View File

@ -588,15 +588,13 @@ static int dp83869_of_init(struct phy_device *phydev)
&dp83869_internal_delay[0],
delay_size, true);
if (dp83869->rx_int_delay < 0)
dp83869->rx_int_delay =
dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF;
dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
&dp83869_internal_delay[0],
delay_size, false);
if (dp83869->tx_int_delay < 0)
dp83869->tx_int_delay =
dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
dp83869->tx_int_delay = DP83869_CLK_DELAY_DEF;
return ret;
}

View File

@ -1688,7 +1688,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (unlikely(rcd->ts))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
if (adapter->netdev->features & NETIF_F_LRO)
/* Use GRO callback if UPT is enabled */
if ((adapter->netdev->features & NETIF_F_LRO) &&
!rq->shared->updateRxProd)
netif_receive_skb(skb);
else
napi_gro_receive(&rq->napi, skb);

View File

@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
/* passed to gnttab_[un]map_refs with pages under (un)mapping */

View File

@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
u32 split_mask;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
struct sk_buff *skb =
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
if (unlikely(skb == NULL))
return NULL;
@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
nr_slots = shinfo->nr_frags + 1;
copy_count(skb) = 0;
XENVIF_TX_CB(skb)->split_mask = 0;
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len;
bool split = false;
cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid;
@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
/* Don't cross local page boundary! */
if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
amount = XEN_PAGE_SIZE - cop->dest.offset;
XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
split = true;
}
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
@ -420,7 +432,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
copy_count(skb)++;
if (!split)
copy_count(skb)++;
cop++;
data_len -= amount;
@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
* The remainder will be mapped.
* The remainder will be mapped or copied in the next
* iteration.
*/
txp->offset += amount;
txp->size -= amount;
@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
/* Split copies need to be handled together. */
if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
(*gopp_copy)++;
if (!newerr)
newerr = (*gopp_copy)->status;
}
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)

View File

@ -387,7 +387,8 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
unsigned issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
@ -408,17 +409,18 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
io_uring_cmd_done(ioucmd, status, result);
io_uring_cmd_done(ioucmd, status, result, issue_flags);
}
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
unsigned issue_flags)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@ -440,7 +442,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_cb(ioucmd);
nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
@ -462,7 +464,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_meta_cb(ioucmd);
nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);

View File

@ -3548,6 +3548,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),

View File

@ -645,11 +645,6 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_FAST_LINK_MODE;
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
if (of_property_read_bool(np, "snps,enable-cdm-check")) {
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
@ -657,6 +652,11 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
}
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_FAST_LINK_MODE;
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");

View File

@ -865,32 +865,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
{
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
const struct pin_desc *pd;
unsigned long flags;
u32 pin_reg, mask;
int i;
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
BIT(WAKE_CNTRL_OFF_S4);
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
pd = pin_desc_get(gpio_dev->pctrl, pin);
if (!pd)
return;
if (!pd)
continue;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + pin * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + pin * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
int i;
pin_reg = readl(gpio_dev->base + i * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
for (i = 0; i < desc->npins; i++)
amd_gpio_irq_init_pin(gpio_dev, i);
}
#ifdef CONFIG_PM_SLEEP
@ -943,8 +945,10 @@ static int amd_gpio_resume(struct device *dev)
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
if (!amd_gpio_should_save(gpio_dev, pin))
if (!amd_gpio_should_save(gpio_dev, pin)) {
amd_gpio_irq_init_pin(gpio_dev, pin);
continue;
}
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;

View File

@ -1176,7 +1176,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
dev_err(dev, "can't add the irq domain\n");
return -ENODEV;
}
atmel_pioctrl->irq_domain->name = "atmel gpio";
for (i = 0; i < atmel_pioctrl->npins; i++) {
int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);

Some files were not shown because too many files have changed in this diff Show More