Merge 5.10.216 into android12-5.10-lts

Changes in 5.10.216
	batman-adv: Avoid infinite loop trying to resize local TT
	Bluetooth: Fix memory leak in hci_req_sync_complete()
	media: cec: core: remove length check of Timer Status
	nouveau: fix function cast warning
	net: openvswitch: fix unwanted error log on timeout policy probing
	u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file
	xsk: validate user input for XDP_{UMEM|COMPLETION}_FILL_RING
	geneve: fix header validation in geneve[6]_xmit_skb
	octeontx2-af: Fix NIX SQ mode and BP config
	ipv6: fib: hide unused 'pn' variable
	ipv4/route: avoid unused-but-set-variable warning
	ipv6: fix race condition between ipv6_get_ifaddr and ipv6_del_addr
	Bluetooth: SCO: Fix not validating setsockopt user input
	netfilter: complete validation of user input
	net/mlx5: Properly link new fs rules into the tree
	af_unix: Do not use atomic ops for unix_sk(sk)->inflight.
	af_unix: Fix garbage collector racing against connect()
	net: ena: Fix potential sign extension issue
	net: ena: Wrong missing IO completions check order
	net: ena: Fix incorrect descriptor free behavior
	iommu/vt-d: Allocate local memory for page request queue
	mailbox: imx: fix suspend failue
	btrfs: qgroup: correctly model root qgroup rsv in convert
	drm/client: Fully protect modes[] with dev->mode_config.mutex
	vhost: Add smp_rmb() in vhost_vq_avail_empty()
	x86/cpu: Actually turn off mitigations by default for SPECULATION_MITIGATIONS=n
	selftests: timers: Fix abs() warning in posix_timers test
	x86/apic: Force native_apic_mem_read() to use the MOV instruction
	irqflags: Explicitly ignore lockdep_hrtimer_exit() argument
	btrfs: record delayed inode root in transaction
	riscv: Enable per-task stack canaries
	riscv: process: Fix kernel gp leakage
	selftests/ftrace: Limit length in subsystem-enable tests
	kprobes: Fix possible use-after-free issue on kprobe registration
	Revert "tracing/trigger: Fix to return error if failed to alloc snapshot"
	netfilter: nf_tables: Fix potential data-race in __nft_expr_type_get()
	netfilter: nft_set_pipapo: do not free live element
	tun: limit printing rate when illegal packet received by tun dev
	RDMA/rxe: Fix the problem "mutex_destroy missing"
	RDMA/cm: Print the old state when cm_destroy_id gets timeout
	RDMA/mlx5: Fix port number for counter query in multi-port configuration
	drm: nv04: Fix out of bounds access
	drm/panel: visionox-rm69299: don't unregister DSI device
	clk: Remove prepare_lock hold assertion in __clk_release()
	clk: Mark 'all_lists' as const
	clk: remove extra empty line
	clk: Print an info line before disabling unused clocks
	clk: Initialize struct clk_core kref earlier
	clk: Get runtime PM before walking tree during disable_unused
	x86/cpufeatures: Fix dependencies for GFNI, VAES, and VPCLMULQDQ
	binder: check offset alignment in binder_get_object()
	thunderbolt: Avoid notify PM core about runtime PM resume
	thunderbolt: Fix wake configurations after device unplug
	comedi: vmk80xx: fix incomplete endpoint checking
	serial/pmac_zilog: Remove flawed mitigation for rx irq flood
	USB: serial: option: add Fibocom FM135-GL variants
	USB: serial: option: add support for Fibocom FM650/FG650
	USB: serial: option: add Lonsung U8300/U9300 product
	USB: serial: option: support Quectel EM060K sub-models
	USB: serial: option: add Rolling RW101-GL and RW135-GL support
	USB: serial: option: add Telit FN920C04 rmnet compositions
	Revert "usb: cdc-wdm: close race between read and workqueue"
	usb: dwc2: host: Fix dereference issue in DDMA completion flow.
	usb: Disable USB3 LPM at shutdown
	mei: me: disable RPL-S on SPS and IGN firmwares
	speakup: Avoid crash on very long word
	fs: sysfs: Fix reference leak in sysfs_break_active_protection()
	init/main.c: Fix potential static_command_line memory overflow
	drm/amdgpu: validate the parameters of bo mapping operations more clearly
	nouveau: fix instmem race condition around ptr stores
	nilfs2: fix OOB in nilfs_set_de_type
	arm64: dts: rockchip: fix alphabetical ordering RK3399 puma
	arm64: dts: rockchip: enable internal pull-up on PCIE_WAKE# for RK3399 Puma
	arm64: dts: rockchip: Remove unsupported node from the Pinebook Pro dts
	arm64: dts: mediatek: mt7622: add support for coherent DMA
	arm64: dts: mediatek: mt7622: introduce nodes for Wireless Ethernet Dispatch
	arm64: dts: mediatek: mt7622: fix clock controllers
	arm64: dts: mediatek: mt7622: fix IR nodename
	arm64: dts: mediatek: mt7622: fix ethernet controller "compatible"
	arm64: dts: mediatek: mt7622: drop "reset-names" from thermal block
	arm64: dts: mediatek: mt2712: fix validation errors
	ARC: [plat-hsdk]: Remove misplaced interrupt-cells property
	wifi: iwlwifi: mvm: remove old PASN station when adding a new one
	vxlan: drop packets from invalid src-address
	mlxsw: core: Unregister EMAD trap using FORWARD action
	NFC: trf7970a: disable all regulators on removal
	ipv4: check for NULL idev in ip_route_use_hint()
	net: usb: ax88179_178a: stop lying about skb->truesize
	net: gtp: Fix Use-After-Free in gtp_dellink
	ipvs: Fix checksumming on GSO of SCTP packets
	net: openvswitch: Fix Use-After-Free in ovs_ct_exit
	mlxsw: spectrum_acl_tcam: Fix race during rehash delayed work
	mlxsw: spectrum_acl_tcam: Fix possible use-after-free during activity update
	mlxsw: spectrum_acl_tcam: Fix possible use-after-free during rehash
	mlxsw: spectrum_acl_tcam: Rate limit error message
	mlxsw: spectrum_acl_tcam: Fix memory leak during rehash
	mlxsw: spectrum_acl_tcam: Fix warning during rehash
	mlxsw: spectrum_acl_tcam: Fix incorrect list API usage
	mlxsw: spectrum_acl_tcam: Fix memory leak when canceling rehash work
	netfilter: nf_tables: honor table dormant flag from netdev release event path
	i40e: Do not use WQ_MEM_RECLAIM flag for workqueue
	i40e: Report MFS in decimal base instead of hex
	iavf: Fix TC config comparison with existing adapter TC config
	net: ethernet: ti: am65-cpts: Fix PTPv1 message type on TX packets
	af_unix: Suppress false-positive lockdep splat for spin_lock() in __unix_gc().
	serial: core: Provide port lock wrappers
	serial: mxs-auart: add spinlock around changing cts state
	Revert "crypto: api - Disallow identical driver names"
	net/mlx5e: Fix a race in command alloc flow
	tracing: Show size of requested perf buffer
	tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together
	PM / devfreq: Fix buffer overflow in trans_stat_show
	Bluetooth: Fix type of len in {l2cap,sco}_sock_getsockopt_old()
	Bluetooth: btusb: Add Realtek RTL8852BE support ID 0x0bda:0x4853
	btrfs: fix information leak in btrfs_ioctl_logical_to_ino()
	cpu: Re-enable CPU mitigations by default for !X86 architectures
	arm64: dts: rockchip: enable internal pull-up for Q7_THRM# on RK3399 Puma
	drm/amdgpu/sdma5.2: use legacy HDP flush for SDMA2/3
	drm/amdgpu: Fix leak when GPU memory allocation fails
	irqchip/gic-v3-its: Prevent double free on error
	ethernet: Add helper for assigning packet type when dest address does not match device address
	net: b44: set pause params only when interface is up
	stackdepot: respect __GFP_NOLOCKDEP allocation flag
	mtd: diskonchip: work around ubsan link failure
	tcp: Clean up kernel listener's reqsk in inet_twsk_purge()
	tcp: Fix NEW_SYN_RECV handling in inet_twsk_purge()
	dmaengine: owl: fix register access functions
	idma64: Don't try to serve interrupts when device is powered off
	dma: xilinx_dpdma: Fix locking
	riscv: fix VMALLOC_START definition
	riscv: Fix TASK_SIZE on 64-bit NOMMU
	i2c: smbus: fix NULL function pointer dereference
	HID: i2c-hid: remove I2C_HID_READ_PENDING flag to prevent lock-up
	bounds: Use the right number of bits for power-of-two CONFIG_NR_CPUS
	udp: preserve the connected status if only UDP cmsg
	serial: core: fix kernel-doc for uart_port_unlock_irqrestore()
	riscv: Disable STACKPROTECTOR_PER_TASK if GCC_PLUGIN_RANDSTRUCT is enabled
	Linux 5.10.216

Change-Id: Ia2bf3ba6ed3f36a56f71543442427eb770a2400b
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-05-15 16:07:02 +00:00
commit c15c1199d6
121 changed files with 1053 additions and 474 deletions

View File

@ -66,6 +66,9 @@ Description:
echo 0 > /sys/class/devfreq/.../trans_stat
If the transition table is bigger than PAGE_SIZE, reading
this will return an -EFBIG error.
What: /sys/class/devfreq/.../userspace/set_freq
Date: September 2011
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 215
SUBLEVEL = 216
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -9,6 +9,14 @@
#
source "arch/$(SRCARCH)/Kconfig"
config ARCH_CONFIGURES_CPU_MITIGATIONS
bool
if !ARCH_CONFIGURES_CPU_MITIGATIONS
config CPU_MITIGATIONS
def_bool y
endif
menu "General architecture-dependent options"
config CRASH_CORE

View File

@ -205,7 +205,6 @@ dmac_cfg_clk: dmac-gpu-cfg-clk {
};
gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;

View File

@ -127,7 +127,7 @@ ethernet_phy0: ethernet-phy@5 {
};
&pio {
eth_default: eth_default {
eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
@ -154,7 +154,7 @@ mdio_pins {
};
};
eth_sleep: eth_sleep {
eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
@ -180,14 +180,14 @@ mdio_pins {
};
};
usb0_id_pins_float: usb0_iddig {
usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
usb1_id_pins_float: usb1_iddig {
usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;

View File

@ -249,10 +249,11 @@ topckgen: syscon@10000000 {
#clock-cells = <1>;
};
infracfg: syscon@10001000 {
infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
pericfg: syscon@10003000 {

View File

@ -244,7 +244,7 @@ scpsys: power-controller@10006000 {
clock-names = "hif_sel";
};
cir: cir@10009000 {
cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@ -275,16 +275,14 @@ thermal_calibration: calib@198 {
};
};
apmixedsys: apmixedsys@10209000 {
compatible = "mediatek,mt7622-apmixedsys",
"syscon";
apmixedsys: clock-controller@10209000 {
compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
topckgen: topckgen@10210000 {
compatible = "mediatek,mt7622-topckgen",
"syscon";
topckgen: clock-controller@10210000 {
compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
@ -357,7 +355,7 @@ cci_control1: slave-if@4000 {
};
cci_control2: slave-if@5000 {
compatible = "arm,cci-400-ctrl-if";
compatible = "arm,cci-400-ctrl-if", "syscon";
interface-type = "ace";
reg = <0x5000 0x1000>;
};
@ -507,7 +505,6 @@ thermal: thermal@1100b000 {
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@ -715,9 +712,8 @@ wmac: wmac@18000000 {
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
ssusbsys: ssusbsys@1a000000 {
compatible = "mediatek,mt7622-ssusbsys",
"syscon";
ssusbsys: clock-controller@1a000000 {
compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@ -774,9 +770,8 @@ u2port1: usb-phy@1a0c5000 {
};
};
pciesys: pciesys@1a100800 {
compatible = "mediatek,mt7622-pciesys",
"syscon";
pciesys: clock-controller@1a100800 {
compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@ -893,7 +888,13 @@ sata_port: sata-phy@1a243000 {
};
};
ethsys: syscon@1b000000 {
hifsys: clock-controller@1af00000 {
compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
#clock-cells = <1>;
};
ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
@ -911,10 +912,28 @@ hsdma: dma-controller@1b007000 {
#dma-cells = <1>;
};
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth",
"mediatek,mt2701-eth",
pcie_mirror: pcie-mirror@10000400 {
compatible = "mediatek,mt7622-pcie-mirror",
"syscon";
reg = <0 0x10000400 0 0x10>;
};
wed0: wed@1020a000 {
compatible = "mediatek,mt7622-wed",
"syscon";
reg = <0 0x1020a000 0 0x1000>;
interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
};
wed1: wed@1020b000 {
compatible = "mediatek,mt7622-wed",
"syscon";
reg = <0 0x1020b000 0 0x1000>;
interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
};
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
@ -937,6 +956,11 @@ eth: ethernet@1b100000 {
power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys>;
mediatek,cci-control = <&cci_control2>;
mediatek,wed = <&wed0>, <&wed1>;
mediatek,pcie-mirror = <&pcie_mirror>;
mediatek,hifsys = <&hifsys>;
dma-coherent;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";

View File

@ -784,7 +784,6 @@ &pcie_phy {
};
&pcie0 {
bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";

View File

@ -430,16 +430,22 @@ &io_domains {
gpio1830-supply = <&vcc_1v8>;
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
&pcie_clkreqn_cpm {
rockchip,pins =
<2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
gpios {
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@ -470,6 +476,15 @@ vcc5v0_host_en: vcc5v0-host-en {
};
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
};
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable

View File

@ -445,6 +445,14 @@ config EFI
allow the kernel to be booted as an EFI application. This
is only useful on systems that have UEFI firmware.
config CC_HAVE_STACKPROTECTOR_TLS
def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0)
config STACKPROTECTOR_PER_TASK
def_bool y
depends on !GCC_PLUGIN_RANDSTRUCT
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
endmenu
config BUILTIN_DTB

View File

@ -88,6 +88,16 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# architectures. It's faster to have GCC emit only aligned accesses.
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare
stack_protector_prepare: prepare0
$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \
-mstack-protector-guard-reg=tp \
-mstack-protector-guard-offset=$(shell \
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
include/generated/asm-offsets.h))
endif
# arch specific predefines for sparse
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)

View File

@ -456,8 +456,8 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
#define TASK_SIZE 0xffffffffUL
#define VMALLOC_START 0
#define TASK_SIZE _AC(-1, UL)
#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}

View File

@ -24,6 +24,7 @@ static __always_inline void boot_init_stack_canary(void)
canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
__stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_RISCV_STACKPROTECTOR_H */

View File

@ -66,6 +66,9 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
#ifdef CONFIG_STACKPROTECTOR
OFFSET(TSK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(PT_SIZE, sizeof(struct pt_regs));
OFFSET(PT_EPC, pt_regs, epc);

View File

@ -22,9 +22,7 @@
#include <asm/switch_to.h>
#include <asm/thread_info.h>
register unsigned long gp_in_global __asm__("gp");
#ifdef CONFIG_STACKPROTECTOR
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@ -117,7 +115,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* Kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp_in_global;
/* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;

View File

@ -58,6 +58,7 @@ config X86
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
@ -2415,17 +2416,17 @@ config CC_HAS_SLS
config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)
menuconfig SPECULATION_MITIGATIONS
bool "Mitigations for speculative execution vulnerabilities"
menuconfig CPU_MITIGATIONS
bool "Mitigations for CPU vulnerabilities"
default y
help
Say Y here to enable options which enable mitigations for
speculative execution hardware vulnerabilities.
Say Y here to enable options which enable mitigations for hardware
vulnerabilities (usually related to speculative execution).
If you say N, all mitigations will be disabled. You really
should know what you are doing to say so.
if SPECULATION_MITIGATIONS
if CPU_MITIGATIONS
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"

View File

@ -12,6 +12,7 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@ -111,7 +112,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
return readl((void __iomem *)(APIC_BASE + reg));
}
extern void native_apic_wait_icr_idle(void);

View File

@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },

View File

@ -246,7 +246,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
!strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}

View File

@ -576,7 +576,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) {
while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);

View File

@ -418,6 +418,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |

View File

@ -37,7 +37,11 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
static struct hlist_head *all_lists[] = {
/* List of registered clks that use runtime PM */
static HLIST_HEAD(clk_rpm_list);
static DEFINE_MUTEX(clk_rpm_list_lock);
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@ -131,6 +136,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
/**
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
*
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
* that disabling unused clks avoids a deadlock where a device is runtime PM
* resuming/suspending and the runtime PM callback is trying to grab the
* prepare_lock for something like clk_prepare_enable() while
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
* PM resume/suspend the device as well.
*
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
* success. Otherwise the lock is released on failure.
*
* Return: 0 on success, negative errno otherwise.
*/
static int clk_pm_runtime_get_all(void)
{
int ret;
struct clk_core *core, *failed;
/*
* Grab the list lock to prevent any new clks from being registered
* or unregistered until clk_pm_runtime_put_all().
*/
mutex_lock(&clk_rpm_list_lock);
/*
* Runtime PM "get" all the devices that are needed for the clks
* currently registered. Do this without holding the prepare_lock, to
* avoid the deadlock.
*/
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
ret = clk_pm_runtime_get(core);
if (ret) {
failed = core;
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
dev_name(failed->dev), failed->name);
goto err;
}
}
return 0;
err:
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
if (core == failed)
break;
clk_pm_runtime_put(core);
}
mutex_unlock(&clk_rpm_list_lock);
return ret;
}
/**
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
*
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
* the 'clk_rpm_list_lock'.
*/
static void clk_pm_runtime_put_all(void)
{
struct clk_core *core;
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
clk_pm_runtime_put(core);
mutex_unlock(&clk_rpm_list_lock);
}
static void clk_pm_runtime_init(struct clk_core *core)
{
struct device *dev = core->dev;
if (dev && pm_runtime_enabled(dev)) {
core->rpm_enabled = true;
mutex_lock(&clk_rpm_list_lock);
hlist_add_head(&core->rpm_node, &clk_rpm_list);
mutex_unlock(&clk_rpm_list_lock);
}
}
/*** locking ***/
static void clk_prepare_lock(void)
{
@ -1237,9 +1325,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_pm_runtime_get(core))
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@ -1248,8 +1333,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@ -1269,9 +1352,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
if (clk_pm_runtime_get(core))
goto unprepare_out;
flags = clk_enable_lock();
if (core->enable_count)
@ -1296,8 +1376,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@ -1313,12 +1391,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
pr_info("clk: Disabling unused clocks\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
/*
* Grab the prepare lock to keep the clk topology stable while iterating
* over clks.
*/
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@ -1335,6 +1423,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0;
}
late_initcall_sync(clk_disable_unused);
@ -3713,9 +3803,6 @@ static int __clk_core_init(struct clk_core *core)
clk_core_hold_state(core);
clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@ -3944,6 +4031,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
/* Free memory allocated for a struct clk_core */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
if (core->rpm_enabled) {
mutex_lock(&clk_rpm_list_lock);
hlist_del(&core->rpm_node);
mutex_unlock(&clk_rpm_list_lock);
}
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@ -3964,6 +4067,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
kref_init(&core->ref);
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@ -3976,9 +4081,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@ -4018,12 +4122,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
clk_core_free_parent_map(core);
fail_parents:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@ -4103,18 +4205,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
lockdep_assert_held(&prepare_lock);
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@ -4167,7 +4257,7 @@ static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
struct hlist_head **lists;
const struct hlist_head **lists;
struct clk_core *root;
lockdep_assert_held(&prepare_lock);

View File

@ -1641,7 +1641,7 @@ static ssize_t trans_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq *df = to_devfreq(dev);
ssize_t len;
ssize_t len = 0;
int i, j;
unsigned int max_state;
@ -1650,7 +1650,7 @@ static ssize_t trans_stat_show(struct device *dev,
max_state = df->profile->max_state;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
mutex_lock(&df->lock);
if (!df->stop_polling &&
@ -1660,33 +1660,54 @@ static ssize_t trans_stat_show(struct device *dev,
}
mutex_unlock(&df->lock);
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
len += sprintf(buf + len, "%10lu",
df->profile->freq_table[i]);
len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += scnprintf(buf + len, PAGE_SIZE - len, " :");
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
df->profile->freq_table[i]);
}
if (len >= PAGE_SIZE - 1)
return PAGE_SIZE - 1;
len += sprintf(buf + len, " time(ms)\n");
len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
if (df->profile->freq_table[i]
== df->previous_freq) {
len += sprintf(buf + len, "*");
len += scnprintf(buf + len, PAGE_SIZE - len, "*");
} else {
len += sprintf(buf + len, " ");
len += scnprintf(buf + len, PAGE_SIZE - len, " ");
}
len += sprintf(buf + len, "%10lu:",
df->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
df->stats.trans_table[(i * max_state) + j]);
if (len >= PAGE_SIZE - 1)
break;
len += sprintf(buf + len, "%10llu\n", (u64)
jiffies64_to_msecs(df->stats.time_in_state[i]));
len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
df->profile->freq_table[i]);
for (j = 0; j < max_state; j++) {
if (len >= PAGE_SIZE - 1)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
df->stats.trans_table[(i * max_state) + j]);
}
if (len >= PAGE_SIZE - 1)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
jiffies64_to_msecs(df->stats.time_in_state[i]));
}
if (len < PAGE_SIZE - 1)
len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
df->stats.total_trans);
if (len >= PAGE_SIZE - 1) {
pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
}
len += sprintf(buf + len, "Total transition : %u\n",
df->stats.total_trans);
return len;
}

View File

@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
/* Since IRQ may be shared, check if DMA controller is powered on */
if (status == GENMASK(31, 0))
return IRQ_NONE;
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */

View File

@ -249,7 +249,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
writel(val, pchan->base + reg);
writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@ -273,7 +273,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
writel(val, od->base + reg);
writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)

View File

@ -213,7 +213,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
* @lock: lock to access struct xilinx_dpdma_chan
* @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
* @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@ -1101,12 +1102,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
spin_lock_irqsave(&chan->vchan.lock, flags);
spin_lock_irqsave(&chan->lock, flags);
spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@ -1491,7 +1496,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}

View File

@ -1259,6 +1259,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
err_bo_create:
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
kfree(*mem);
err:

View File

@ -2201,6 +2201,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -2227,21 +2258,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -2294,17 +2318,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -2318,7 +2334,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -2405,10 +2421,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -390,17 +390,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
if (ring->me > 1) {
amdgpu_asic_flush_hdp(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
}
/**

View File

@ -774,6 +774,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
/* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@ -842,7 +843,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@ -872,6 +872,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:

View File

@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@ -1672,7 +1673,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@ -1758,26 +1759,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1);
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
all_heads, 0);
all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
all_heads, 1);
all_heads, DCB_OUTPUT_B);
}
static int

View File

@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
.fini = (void(*)(void *))kfree,
.fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,

View File

@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps))
if (refcount_inc_not_zero(&iobj->maps)) {
/* read barrier match the wmb on refcount set */
smp_rmb();
return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
/* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1);
}

View File

@ -261,8 +261,6 @@ static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel);
return 0;
}

View File

@ -56,7 +56,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@ -256,7 +255,6 @@ static int __i2c_hid_command(struct i2c_client *client,
msg[1].len = data_len;
msg[1].buf = buf_recv;
msg_num = 2;
set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
if (wait)
@ -264,9 +262,6 @@ static int __i2c_hid_command(struct i2c_client *client,
ret = i2c_transfer(client->adapter, msg, msg_num);
if (data_len > 0)
clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
if (ret != msg_num)
return ret < 0 ? ret : -EIO;
@ -538,9 +533,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
return IRQ_HANDLED;
i2c_hid_get_input(ihid);
return IRQ_HANDLED;

View File

@ -2013,13 +2013,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
* takes place. adap->algo->master_xfer existence isn't checked.
* takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@ -2086,11 +2091,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,

View File

@ -1057,23 +1057,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
enum ib_cm_state old_state)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
enum ib_cm_state old_state;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@ -1187,7 +1190,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
cm_destroy_id_wait_timeout(cm_id);
cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)

View File

@ -166,7 +166,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
!mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;

View File

@ -35,6 +35,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */

View File

@ -33,7 +33,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);

View File

@ -4522,13 +4522,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
set_bit(i, bitmap);
}
if (err) {
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
if (err)
its_vpe_irq_domain_free(domain, virq, i);
return err;
}

View File

@ -331,8 +331,6 @@ static int imx_mu_startup(struct mbox_chan *chan)
break;
}
priv->suspend = true;
return 0;
}
@ -550,8 +548,6 @@ static int imx_mu_probe(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
priv->suspend = false;
return 0;
disable_runtime_pm:
@ -614,6 +610,8 @@ static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
if (!priv->clk)
priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
priv->suspend = true;
return 0;
}
@ -632,6 +630,8 @@ static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
priv->suspend = false;
return 0;
}

View File

@ -1116,20 +1116,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (valid_la && min_len) {
/* These messages have special length requirements */
switch (cmd) {
case CEC_MSG_TIMER_STATUS:
if (msg->msg[2] & 0x10) {
switch (msg->msg[2] & 0xf) {
case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
if (msg->len < 5)
valid_la = false;
break;
}
} else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
if (msg->len < 5)
valid_la = false;
}
break;
case CEC_MSG_RECORD_ON:
switch (msg->msg[2]) {
case CEC_OP_RECORD_SRC_OWN:

View File

@ -115,7 +115,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},

View File

@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
0xffffffff };
};
static struct mtd_info *doclist = NULL;
@ -1552,7 +1552,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}

View File

@ -352,7 +352,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);

View File

@ -1105,8 +1105,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
bool is_xdp_ring;
u32 i;
is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@ -1126,10 +1129,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
if (is_xdp_ring)
xdp_return_frame(tx_info->xdpf);
else
dev_kfree_skb_any(tx_info->skb);
}
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->qid));
if (!is_xdp_ring)
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@ -3672,10 +3680,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
int i, budget, rc;
int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@ -3688,27 +3697,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
budget = ENA_MONITORED_TX_QUEUES;
budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
qid = adapter->last_monitored_tx_qid;
while (budget) {
qid = (qid + 1) % io_queue_count;
tx_ring = &adapter->tx_ring[qid];
rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
rc = !ENA_IS_XDP_INDEX(adapter, i) ?
rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
if (!budget)
break;
}
adapter->last_monitored_tx_qid = i % io_queue_count;
adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */

View File

@ -2027,12 +2027,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
if (netif_running(dev)) {
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
}
spin_unlock_irq(&bp->lock);

View File

@ -15488,8 +15488,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
if (val < MAX_FRAME_SIZE_DEFAULT)
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
pf->hw.port, val);
dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@ -16023,7 +16023,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;

View File

@ -2642,6 +2642,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
spin_unlock_bh(&adapter->cloud_filter_list_lock);
}
/**
* iavf_is_tc_config_same - Compare the mqprio TC config with the
* TC config already configured on this adapter.
* @adapter: board private structure
* @mqprio_qopt: TC config received from kernel.
*
* This function compares the TC config received from the kernel
* with the config already configured on the adapter.
*
* Return: True if configuration is same, false otherwise.
**/
static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
struct tc_mqprio_qopt *mqprio_qopt)
{
struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
int i;
if (adapter->num_tc != mqprio_qopt->num_tc)
return false;
for (i = 0; i < adapter->num_tc; i++) {
if (ch[i].count != mqprio_qopt->count[i] ||
ch[i].offset != mqprio_qopt->offset[i])
return false;
}
return true;
}
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
@ -2698,7 +2726,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
if (adapter->num_tc == num_tc)
if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;

View File

@ -3146,19 +3146,19 @@ int rvu_nix_init(struct rvu *rvu)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
/* Disable SQ manager's sticky mode operation (set TM6 = 0)
* This sticky mode is known to cause SQ stalls when multiple
* SQs are mapped to same SMQ and transmitting pkts at a time.
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
cfg &= ~BIT_ULL(15);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
/* Disable SQ manager's sticky mode operation (set TM6 = 0)
* This sticky mode is known to cause SQ stalls when multiple
* SQs are mapped to same SMQ and transmitting pkts at a time.
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
cfg &= ~BIT_ULL(15);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);

View File

@ -114,15 +114,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
static int cmd_alloc_index(struct mlx5_cmd *cmd)
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
if (ret < cmd->max_reg_cmds)
if (ret < cmd->max_reg_cmds) {
clear_bit(ret, &cmd->bitmask);
ent->idx = ret;
cmd->ent_arr[ent->idx] = ent;
}
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
@ -912,7 +915,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
alloc_ret = cmd_alloc_index(cmd);
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@ -927,15 +930,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
ent->idx = alloc_ret;
} else {
ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->bitmask);
cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));

View File

@ -1675,8 +1675,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
/* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}

View File

@ -697,7 +697,7 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
EMAD, DISCARD);
EMAD, FORWARD);
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{

View File

@ -713,7 +713,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@ -723,6 +725,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
}
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
{
/* The entry markers are relative to the current chunk and therefore
* needs to be reset together with the chunk marker.
*/
ctx->current_vchunk = NULL;
ctx->start_ventry = NULL;
ctx->stop_ventry = NULL;
}
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
@ -745,7 +758,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
vregion->rehash.ctx.current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@ -818,10 +831,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
cancel_delayed_work_sync(&vregion->rehash.dw);
if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
ctx->hints_priv)
ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@ -1187,8 +1204,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
ventry->entry, activity);
struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
int err;
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
activity);
mutex_unlock(&vregion->lock);
return err;
}
static int
@ -1222,6 +1245,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
WARN_ON(vchunk->chunk2);
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@ -1240,7 +1265,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
ctx->current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@ -1263,6 +1288,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
if (list_empty(&vchunk->ventry_list))
goto out;
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@ -1272,6 +1300,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
WARN_ON(ventry->vchunk != vchunk);
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@ -1312,6 +1342,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@ -1325,6 +1356,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
if (list_empty(&vregion->vchunk_list))
return 0;
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@ -1353,16 +1387,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
if (ctx->this_is_rollback)
return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
ctx->current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@ -1373,7 +1408,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@ -1422,6 +1456,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@ -1474,7 +1509,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
return;
}
if (*credits >= 0)

View File

@ -649,6 +649,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
(skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
mtype_seqid = skb_cb->skb_mtype_seqid;
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;

View File

@ -893,7 +893,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_inet_may_pull(skb))
if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -990,7 +990,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_inet_may_pull(skb))
if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);

View File

@ -700,11 +700,12 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);

View File

@ -2141,14 +2141,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
if (net_ratelimit()) {
netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
}
WARN_ON_ONCE(1);
return -EINVAL;
}

View File

@ -1558,21 +1558,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
skb_trim(ax_skb, pkt_len);
skb_put(ax_skb, pkt_len);
memcpy(ax_skb->data, skb->data + 2, pkt_len);
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb->truesize = pkt_len_plus_padd +
SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);

View File

@ -1778,6 +1778,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
/* Ignore packets from invalid src-address */
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
return false;
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;

View File

@ -106,6 +106,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {

View File

@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
struct regulator *regulator;
struct regulator *vin_regulator;
struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@ -1882,7 +1883,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
ret = regulator_enable(trf->regulator);
ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@ -1925,7 +1926,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
ret = regulator_disable(trf->regulator);
ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@ -2064,37 +2065,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
trf->regulator = devm_regulator_get(&spi->dev, "vin");
if (IS_ERR(trf->regulator)) {
ret = PTR_ERR(trf->regulator);
trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
if (IS_ERR(trf->vin_regulator)) {
ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
ret = regulator_enable(trf->regulator);
ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
uvolts = regulator_get_voltage(trf->regulator);
uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
if (IS_ERR(trf->regulator)) {
ret = PTR_ERR(trf->regulator);
trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
if (IS_ERR(trf->vddio_regulator)) {
ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
goto err_destroy_lock;
goto err_disable_vin_regulator;
}
ret = regulator_enable(trf->regulator);
ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
goto err_destroy_lock;
goto err_disable_vin_regulator;
}
if (regulator_get_voltage(trf->regulator) == 1800000) {
if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@ -2107,7 +2108,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
goto err_disable_regulator;
goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@ -2136,8 +2137,10 @@ static int trf7970a_probe(struct spi_device *spi)
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
err_disable_regulator:
regulator_disable(trf->regulator);
err_disable_vddio_regulator:
regulator_disable(trf->vddio_regulator);
err_disable_vin_regulator:
regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@ -2156,7 +2159,8 @@ static int trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
regulator_disable(trf->regulator);
regulator_disable(trf->vddio_regulator);
regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);

View File

@ -642,32 +642,21 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc;
int i;
struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
int ret;
if (iface_desc->desc.bNumEndpoints != 2)
if (devpriv->model == VMK8061_MODEL)
ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
&ep_tx_desc, NULL, NULL);
else
ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
&ep_rx_desc, &ep_tx_desc);
if (ret)
return -ENODEV;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
ep_desc = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep_desc) ||
usb_endpoint_is_bulk_in(ep_desc)) {
if (!devpriv->ep_rx)
devpriv->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc) ||
usb_endpoint_is_bulk_out(ep_desc)) {
if (!devpriv->ep_tx)
devpriv->ep_tx = ep_desc;
continue;
}
}
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
devpriv->ep_rx = ep_rx_desc;
devpriv->ep_tx = ep_tx_desc;
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;

View File

@ -2402,22 +2402,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
if (sw->is_unplugged)
return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
/*
* Unconfigure downstream port so that wake-on-connect can be
* configured after router unplug. No need to unconfigure upstream port
* since its router is unplugged.
*/
up = tb_upstream_port(sw);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
if (sw->is_unplugged)
return;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
usb4_port_unconfigure(up);
else
tb_lc_unconfigure_port(up);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
}
static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
@ -2631,7 +2638,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
int tb_switch_resume(struct tb_switch *sw)
static void tb_switch_check_wakes(struct tb_switch *sw)
{
if (device_may_wakeup(&sw->dev)) {
if (tb_switch_is_usb4(sw))
usb4_switch_check_wakes(sw);
}
}
/**
* tb_switch_resume() - Resume a switch after sleep
* @sw: Switch to resume
* @runtime: Is this resume from runtime suspend or system sleep
*
* Resumes and re-enumerates router (and all its children), if still plugged
* after suspend. Don't enumerate device router whose UID was changed during
* suspend. If this is resume from system sleep, notifies PM core about the
* wakes occurred during suspend. Disables all wakes, except USB4 wake of
* upstream port for USB4 routers that shall be always enabled.
*/
int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@ -2676,6 +2702,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
if (!runtime)
tb_switch_check_wakes(sw);
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@ -2702,7 +2731,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
if (port->remote && tb_switch_resume(port->remote->sw)) {
if (port->remote &&
tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);

View File

@ -1385,7 +1385,7 @@ static int tb_resume_noirq(struct tb *tb)
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@ -1488,7 +1488,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
tb_switch_resume(tb->root_switch);
tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)

View File

@ -653,7 +653,7 @@ int tb_switch_configure(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
@ -957,6 +957,7 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
return NULL;
}
void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,

View File

@ -197,15 +197,18 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
return 0;
}
static void usb4_switch_check_wakes(struct tb_switch *sw)
/**
* usb4_switch_check_wakes() - Check for wakes and notify PM core about them
* @sw: Router whose wakes to check
*
* Checks wakes occurred during suspend and notify the PM core about them.
*/
void usb4_switch_check_wakes(struct tb_switch *sw)
{
struct tb_port *port;
bool wakeup = false;
u32 val;
if (!device_may_wakeup(&sw->dev))
return;
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@ -270,8 +273,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
usb4_switch_check_wakes(sw);
if (!tb_route(sw))
return 0;

View File

@ -1122,11 +1122,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
u32 istat;
u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
u32 stat = mxs_read(s, REG_STAT);
uart_port_lock(&s->port);
stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@ -1162,6 +1164,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
uart_port_unlock(&s->port);
return IRQ_HANDLED;
}

View File

@ -217,7 +217,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@ -298,24 +297,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
/* We can get stuck in an infinite loop getting char 0 when the
* line is in a wrong HW state, we break that here.
* When that happens, I disable the receive side of the driver.
* Note that what I've been experiencing is a real irq loop where
* I'm getting flooded regardless of the actual port speed.
* Something strange is going on with the HW
*/
if ((++loops) > 1000)
goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
flood:
pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
return true;
}

View File

@ -471,7 +471,6 @@ static ssize_t wdm_write
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@ -486,10 +485,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
if (used)
goto out;
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);

View File

@ -295,8 +295,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
if (port_dev->child)
if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
usb_unlocked_disable_lpm(port_dev->child);
}
}
static const struct dev_pm_ops usb_port_pm_ops = {

View File

@ -897,13 +897,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx;
struct urb *usb_urb = qtd->urb->priv;
struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
usb_urb = qtd->urb->priv;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),

View File

@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
#define QUECTEL_PRODUCT_EM060K_129 0x0129
#define QUECTEL_PRODUCT_EM060K_12a 0x012a
#define QUECTEL_PRODUCT_EM060K_12b 0x012b
#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
.driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },

View File

@ -2513,9 +2513,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
return vq->avail_idx == vq->last_avail_idx;
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (vq->avail_idx != vq->last_avail_idx) {
/* Since we have updated avail_idx, the following
* call to vhost_get_vq_desc() will read available
* ring entries. Make sure that read happens after
* the avail_idx read.
*/
smp_rmb();
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);

View File

@ -2315,20 +2315,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
data = kvmalloc(alloc_bytes, GFP_KERNEL);
data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
if (total_bytes >= sizeof(*data)) {
if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data);
data->bytes_missing = 0;
} else {
else
data->bytes_missing = sizeof(*data) - total_bytes;
data->bytes_left = 0;
}
data->elem_cnt = 0;
data->elem_missed = 0;
return data;
}

View File

@ -1135,6 +1135,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
ret = btrfs_record_root_in_trans(trans, node->root);
if (ret)
return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}

View File

@ -4114,6 +4114,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
if (!sb_rdonly(fs_info->sb))
add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
}
/*

View File

@ -243,7 +243,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
#define S_SHIFT 12
static unsigned char
nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
[S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,

View File

@ -429,6 +429,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
kn = kernfs_find_and_get(kobj->sd, attr->name);
if (kn)
kernfs_break_active_protection(kn);
else
kobject_put(kobj);
return kn;
}
EXPORT_SYMBOL_GPL(sysfs_break_active_protection);

View File

@ -542,6 +542,31 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
#endif
}
/**
* eth_skb_pkt_type - Assign packet type if destination address does not match
* @skb: Assigned a packet type if address does not match @dev address
* @dev: Network device used to compare packet address against
*
* If the destination MAC address of the packet does not match the network
* device address, assign an appropriate packet type.
*/
static inline void eth_skb_pkt_type(struct sk_buff *skb,
const struct net_device *dev)
{
const struct ethhdr *eth = eth_hdr(skb);
if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else {
skb->pkt_type = PACKET_OTHERHOST;
}
}
}
/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad

View File

@ -133,7 +133,7 @@ do { \
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
# define lockdep_hrtimer_exit(__context) do { } while (0)
# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)

View File

@ -265,6 +265,85 @@ struct uart_port {
ANDROID_KABI_RESERVE(2);
};
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
}
/**
* uart_port_lock_irq - Lock the UART port and disable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
}
/**
* uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*/
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
}
/**
* uart_port_trylock - Try to lock the UART port
* @up: Pointer to UART port structure
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
return spin_trylock(&up->lock);
}
/**
* uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
return spin_trylock_irqsave(&up->lock, *flags);
}
/**
* uart_port_unlock - Unlock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock(struct uart_port *up)
{
spin_unlock(&up->lock);
}
/**
* uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
spin_unlock_irq(&up->lock);
}
/**
* uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
* @up: Pointer to UART port structure
* @flags: The saved interrupt flags for restore
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
spin_unlock_irqrestore(&up->lock, flags);
}
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);

View File

@ -569,7 +569,7 @@ struct trace_event_file {
} \
early_initcall(trace_init_perf_perm_##name);
#define PERF_MAX_TRACE_SIZE 2048
#define PERF_MAX_TRACE_SIZE 8192
#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */

View File

@ -116,7 +116,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
#endif
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
#define u64_stats_init(syncp) \
do { \
struct u64_stats_sync *__s = (syncp); \
seqcount_init(&__s->seq); \
} while (0)
#else
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{

View File

@ -457,6 +457,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt);
}
static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
{
return refcount_inc_not_zero(&ifp->refcnt);
}
/*
* compute link-local solicited-node multicast address

View File

@ -56,7 +56,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
atomic_long_t inflight;
unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
@ -77,6 +77,9 @@ enum unix_socket_lock_class {
U_LOCK_NORMAL,
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
* candidates to close a small race window.
*/
};
static inline void unix_state_lock_nested(struct sock *sk,

View File

@ -487,6 +487,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
sockptr_t src, size_t src_size)
{
if (dst_size > src_size)
return -EINVAL;
return copy_from_sockptr(dst, src, dst_size);
}
int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);

View File

@ -333,6 +333,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen);
}
/* Variant of pskb_inet_may_pull().
*/
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
{
int nhlen = 0, maclen = ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)
* And we get MAC len.
*/
if (eth_type_vlan(type))
type = __vlan_get_protocol(skb, type, &maclen);
switch (type) {
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
nhlen = sizeof(struct ipv6hdr);
break;
#endif
case htons(ETH_P_IP):
nhlen = sizeof(struct iphdr);
break;
}
/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
* a base network header in skb->head.
*/
if (!pskb_may_pull(skb, maclen + nhlen))
return false;
skb_set_network_header(skb, maclen);
return true;
}
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;

View File

@ -619,6 +619,8 @@ static void __init setup_command_line(char *command_line)
if (!saved_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
len = xlen + strlen(command_line) + 1;
static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
if (!static_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len);

View File

@ -19,7 +19,7 @@ int main(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
#ifdef CONFIG_SMP
DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */

View File

@ -2900,7 +2900,8 @@ enum cpu_mitigations {
};
static enum cpu_mitigations cpu_mitigations __ro_after_init =
CPU_MITIGATIONS_AUTO;
IS_ENABLED(CONFIG_CPU_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
CPU_MITIGATIONS_OFF;
static int __init mitigations_parse_cmdline(char *arg)
{

View File

@ -1647,10 +1647,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
jump_label_lock();
preempt_disable();
/* Ensure it is not in reserved area nor out of text */
if (!(core_kernel_text((unsigned long) p->addr) ||
is_module_text_address((unsigned long) p->addr)) ||
in_gate_area_no_mm((unsigned long) p->addr) ||
/* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) {
*probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod)) {
ret = -EINVAL;
goto out;
}
}
/* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
@ -1660,8 +1667,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
goto out;
}
/* Check if are we probing a module */
*probed_mod = __module_text_address((unsigned long) p->addr);
/* Get module refcount and reject __init functions for loaded modules. */
if (*probed_mod) {
/*
* We must hold a refcount of the probed module while updating

View File

@ -400,7 +400,8 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"perf buffer not large enough"))
"perf buffer not large enough, wanted %d, have %d",
size, PERF_MAX_TRACE_SIZE))
return NULL;
*rctxp = rctx = perf_swevent_get_recursion_context();

View File

@ -1140,10 +1140,8 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
{
int ret = tracing_alloc_snapshot_instance(file->tr);
if (ret < 0)
return ret;
if (tracing_alloc_snapshot_instance(file->tr) != 0)
return 0;
return register_trigger(glob, ops, data, file);
}

View File

@ -295,10 +295,10 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
* contexts and I/O.
* contexts, I/O, nolockdep.
*/
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
if (page)

View File

@ -4188,7 +4188,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
spin_lock_bh(&bat_priv->tt.commit_lock);
while (true) {
while (timeout) {
table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size)
break;

View File

@ -107,8 +107,10 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
if (skb)
if (skb) {
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
}
}

View File

@ -456,7 +456,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
int len, err = 0;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@ -503,7 +504,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mode 0x%2.2x", chan->mode);
len = min_t(unsigned int, len, sizeof(opts));
len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
@ -553,7 +554,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;

View File

@ -825,7 +825,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
int err = 0;
struct bt_voice voice;
u32 opt;
@ -841,10 +841,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
err = -EFAULT;
err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
if (err)
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@ -861,11 +860,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
voice.setting = sco_pi(sk)->setting;
len = min_t(unsigned int, sizeof(voice), optlen);
if (copy_from_sockptr(&voice, optval, len)) {
err = -EFAULT;
err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
optlen);
if (err)
break;
}
/* Explicitly check for these values */
if (voice.setting != BT_VOICE_TRANSPARENT &&
@ -878,10 +876,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_PKT_STATUS:
if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
err = -EFAULT;
err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
if (err)
break;
}
if (opt)
sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
@ -904,7 +901,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
int err = 0;
size_t len;
BT_DBG("sk %p", sk);
@ -926,7 +924,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@ -944,7 +942,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;

Some files were not shown because too many files have changed in this diff Show More