This is the 5.4.42 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl7EzDkACgkQONu9yGCS
 aT6E0Q//dzSQiIdz+a06TLKzcXyxzY+FIIy/OVZAYSGxeM3Kt6GDUzUWnlAPOSbo
 9UVcQdHllNKuJXXBOTkjEmFLEdJ+AFaJ2M7tjh5PKd7Y1am5xY2t7w4z5ohFaJto
 lPxakRXXPh4Yrf8n+ANyR9Dz69uLC+qfHi2wbQ6v8ly07Fgr034X7Pl+BMewNYM8
 QCcsS2a7+qmsvRGTheWhiFTZhEbHzEK5GB4EWIaYTEEsrTet1htwvtRLZ7TEvAma
 2jnapJBQBGkxxMoo9D1GAzA9MI7p7XqUvCmRpMtXiMaPUhFmOgHYtGZ4x3Iu/TmK
 dIeWOZjyHlosIHtbf7m2hrNPedaErjnZCM97gjOi9TjupndBveikYuynnbLthfbZ
 1mrrXHJew/FjILFE1GL1zVI6LMsFRUyWA7H3R8xQkBUXBKhoIzGl4263bugSO2Hk
 Gvn+cSTUBvou15yZPEm0WFqUiM0kBO3A1x9YcF9yKYuzGtpGF5GEQlriNCLLvfWf
 kihgOM4yIaFFYVL+CCbN6uHBVFhfI+Ts9/7PzLgoGtwshw9EK2Gn/FBn2NJetC1Q
 1wHlNlavLoYvI9RZ0kCuD/jcVxG2hvOjvo/RCYg+ETujjhkhWwcWZxhZQsG1Kel3
 a2/DMrx52RnhNMVDGecgRGKIADtqI4x6XZzk0gvgDG1ib3u77Ng=
 =dmPo
 -----END PGP SIGNATURE-----

Merge 5.4.42 into android-5.4-stable

Changes in 5.4.42
	net: dsa: Do not make user port errors fatal
	shmem: fix possible deadlocks on shmlock_user_lock
	net: phy: microchip_t1: add lan87xx_phy_init to initialize the lan87xx phy.
	KVM: arm: vgic: Synchronize the whole guest on GIC{D,R}_I{S,C}ACTIVER read
	gpio: pca953x: Fix pca953x_gpio_set_config
	SUNRPC: Add "@len" parameter to gss_unwrap()
	SUNRPC: Fix GSS privacy computation of auth->au_ralign
	net/sonic: Fix a resource leak in an error handling path in 'jazz_sonic_probe()'
	net: moxa: Fix a potential double 'free_irq()'
	ftrace/selftests: workaround cgroup RT scheduling issues
	drop_monitor: work around gcc-10 stringop-overflow warning
	virtio-blk: handle block_device_operations callbacks after hot unplug
	sun6i: dsi: fix gcc-4.8
	net_sched: fix tcm_parent in tc filter dump
	scsi: sg: add sg_remove_request in sg_write
	selftests/bpf: fix goto cleanup label not defined
	mmc: sdhci-acpi: Add SDHCI_QUIRK2_BROKEN_64_BIT_DMA for AMDI0040
	dpaa2-eth: properly handle buffer size restrictions
	net: fix a potential recursive NETDEV_FEAT_CHANGE
	netlabel: cope with NULL catmap
	net: phy: fix aneg restart in phy_ethtool_set_eee
	net: stmmac: fix num_por initialization
	pppoe: only process PADT targeted at local interfaces
	Revert "ipv6: add mtu lock check in __ip6_rt_update_pmtu"
	tcp: fix error recovery in tcp_zerocopy_receive()
	tcp: fix SO_RCVLOWAT hangs with fat skbs
	virtio_net: fix lockdep warning on 32 bit
	dpaa2-eth: prevent array underflow in update_cls_rule()
	hinic: fix a bug of ndo_stop
	net: dsa: loop: Add module soft dependency
	net: ipv4: really enforce backoff for redirects
	netprio_cgroup: Fix unlimited memory leak of v2 cgroups
	net: tcp: fix rx timestamp behavior for tcp_recvmsg
	nfp: abm: fix error return code in nfp_abm_vnic_alloc()
	r8169: re-establish support for RTL8401 chip version
	umh: fix memory leak on execve failure
	riscv: fix vdso build with lld
	dmaengine: pch_dma.c: Avoid data race between probe and irq handler
	dmaengine: mmp_tdma: Do not ignore slave config validation errors
	dmaengine: mmp_tdma: Reset channel error on release
	selftests/ftrace: Check the first record for kprobe_args_type.tc
	cpufreq: intel_pstate: Only mention the BIOS disabling turbo mode once
	ALSA: hda/hdmi: fix race in monitor detection during probe
	drm/amd/powerplay: avoid using pm_en before it is initialized revised
	drm/amd/display: check if REFCLK_CNTL register is present
	drm/amd/display: Update downspread percent to match spreadsheet for DCN2.1
	drm/qxl: lost qxl_bo_kunmap_atomic_page in qxl_image_init_helper()
	drm/amdgpu: simplify padding calculations (v2)
	drm/amdgpu: invalidate L2 before SDMA IBs (v2)
	ipc/util.c: sysvipc_find_ipc() incorrectly updates position index
	ALSA: hda/realtek - Fix S3 pop noise on Dell Wyse
	gfs2: Another gfs2_walk_metadata fix
	mmc: sdhci-pci-gli: Fix no irq handler from suspend
	IB/hfi1: Fix another case where pq is left on waitlist
	ACPI: EC: PM: Avoid premature returns from acpi_s2idle_wake()
	pinctrl: sunrisepoint: Fix PAD lock register offset for SPT-H
	pinctrl: baytrail: Enable pin configuration setting for GPIO chip
	pinctrl: qcom: fix wrong write in update_dual_edge
	pinctrl: cherryview: Add missing spinlock usage in chv_gpio_irq_handler
	bpf: Fix error return code in map_lookup_and_delete_elem()
	ALSA: firewire-lib: fix 'function sizeof not defined' error of tracepoints format
	i40iw: Fix error handling in i40iw_manage_arp_cache()
	drm/i915: Don't enable WaIncreaseLatencyIPCEnabled when IPC is disabled
	bpf, sockmap: msg_pop_data can incorrecty set an sge length
	bpf, sockmap: bpf_tcp_ingress needs to subtract bytes from sg.size
	mmc: alcor: Fix a resource leak in the error path for ->probe()
	mmc: sdhci-pci-gli: Fix can not access GL9750 after reboot from Windows 10
	mmc: core: Check request type before completing the request
	mmc: core: Fix recursive locking issue in CQE recovery path
	mmc: block: Fix request completion in the CQE timeout path
	gfs2: More gfs2_find_jhead fixes
	fork: prevent accidental access to clone3 features
	drm/amdgpu: force fbdev into vram
	NFS: Fix fscache super_cookie index_key from changing after umount
	nfs: fscache: use timespec64 in inode auxdata
	NFSv4: Fix fscache cookie aux_data to ensure change_attr is included
	netfilter: conntrack: avoid gcc-10 zero-length-bounds warning
	drm/i915/gvt: Fix kernel oops for 3-level ppgtt guest
	arm64: fix the flush_icache_range arguments in machine_kexec
	nfs: fix NULL deference in nfs4_get_valid_delegation
	SUNRPC: Signalled ASYNC tasks need to exit
	netfilter: nft_set_rbtree: Introduce and use nft_rbtree_interval_start()
	netfilter: nft_set_rbtree: Add missing expired checks
	RDMA/rxe: Always return ERR_PTR from rxe_create_mmap_info()
	IB/mlx4: Test return value of calls to ib_get_cached_pkey
	IB/core: Fix potential NULL pointer dereference in pkey cache
	RDMA/core: Fix double put of resource
	RDMA/iw_cxgb4: Fix incorrect function parameters
	hwmon: (da9052) Synchronize access with mfd
	s390/ism: fix error return code in ism_probe()
	mm, memcg: fix inconsistent oom event behavior
	NFSv3: fix rpc receive buffer size for MOUNT call
	pnp: Use list_for_each_entry() instead of open coding
	net/rds: Use ERR_PTR for rds_message_alloc_sgs()
	Stop the ad-hoc games with -Wno-maybe-initialized
	gcc-10: disable 'zero-length-bounds' warning for now
	gcc-10: disable 'array-bounds' warning for now
	gcc-10: disable 'stringop-overflow' warning for now
	gcc-10: disable 'restrict' warning for now
	gcc-10 warnings: fix low-hanging fruit
	gcc-10: mark more functions __init to avoid section mismatch warnings
	gcc-10: avoid shadowing standard library 'free()' in crypto
	usb: usbfs: correct kernel->user page attribute mismatch
	USB: usbfs: fix mmap dma mismatch
	ALSA: hda/realtek - Limit int mic boost for Thinkpad T530
	ALSA: hda/realtek - Add COEF workaround for ASUS ZenBook UX431DA
	ALSA: rawmidi: Fix racy buffer resize under concurrent accesses
	ALSA: usb-audio: Add control message quirk delay for Kingston HyperX headset
	usb: core: hub: limit HUB_QUIRK_DISABLE_AUTOSUSPEND to USB5534B
	usb: host: xhci-plat: keep runtime active when removing host
	usb: cdns3: gadget: prev_req->trb is NULL for ep0
	USB: gadget: fix illegal array access in binding with UDC
	usb: xhci: Fix NULL pointer dereference when enqueuing trbs from urb sg list
	Make the "Reducing compressed framebufer size" message be DRM_INFO_ONCE()
	ARM: dts: dra7: Fix bus_dma_limit for PCIe
	ARM: dts: imx27-phytec-phycard-s-rdk: Fix the I2C1 pinctrl entries
	ARM: dts: imx6dl-yapp4: Fix Ursa board Ethernet connection
	drm/amd/display: add basic atomic check for cursor plane
	powerpc/32s: Fix build failure with CONFIG_PPC_KUAP_DEBUG
	cifs: fix leaked reference on requeued write
	x86: Fix early boot crash on gcc-10, third try
	x86/unwind/orc: Fix error handling in __unwind_start()
	exec: Move would_dump into flush_old_exec
	clk: rockchip: fix incorrect configuration of rk3228 aclk_gpu* clocks
	dwc3: Remove check for HWO flag in dwc3_gadget_ep_reclaim_trb_sg()
	fanotify: fix merging marks masks with FAN_ONDIR
	usb: gadget: net2272: Fix a memory leak in an error handling path in 'net2272_plat_probe()'
	usb: gadget: audio: Fix a missing error return value in audio_bind()
	usb: gadget: legacy: fix error return code in gncm_bind()
	usb: gadget: legacy: fix error return code in cdc_bind()
	Revert "ALSA: hda/realtek: Fix pop noise on ALC225"
	clk: Unlink clock if failed to prepare or enable
	arm64: dts: meson-g12b-khadas-vim3: add missing frddr_a status property
	arm64: dts: meson-g12-common: fix dwc2 clock names
	arm64: dts: rockchip: Replace RK805 PMIC node name with "pmic" on rk3328 boards
	arm64: dts: rockchip: Rename dwc3 device nodes on rk3399 to make dtc happy
	arm64: dts: imx8mn: Change SDMA1 ahb clock for imx8mn
	ARM: dts: r8a73a4: Add missing CMT1 interrupts
	arm64: dts: renesas: r8a77980: Fix IPMMU VIP[01] nodes
	ARM: dts: r8a7740: Add missing extal2 to CPG node
	SUNRPC: Revert 241b1f419f ("SUNRPC: Remove xdr_buf_trim()")
	bpf: Fix sk_psock refcnt leak when receiving message
	KVM: x86: Fix off-by-one error in kvm_vcpu_ioctl_x86_setup_mce
	Makefile: disallow data races on gcc-10 as well
	libbpf: Extract and generalize CPU mask parsing logic
	selftest/bpf: fix backported test_select_reuseport selftest changes
	bpf: Test_progs, fix test_get_stack_rawtp_err.c build
	Linux 5.4.42

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I15318d0b2611dec3962b1db1cbca8ae5dea2ff60
This commit is contained in:
Greg Kroah-Hartman 2020-05-20 12:49:59 +02:00
commit a93d8ad9ba
171 changed files with 1240 additions and 524 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 41
SUBLEVEL = 42
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -721,12 +721,9 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
endif
ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED
KBUILD_CFLAGS += -Wno-maybe-uninitialized
endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races)
include scripts/Makefile.kcov
include scripts/Makefile.gcc-plugins
@ -924,6 +921,17 @@ KBUILD_CFLAGS += -Wno-pointer-sign
# disable stringop warnings in gcc 8+
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
# We'll want to enable this eventually, but it's not going away for 5.7 at least
KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
# Another good warning that we'll want to enable eventually
KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
# Enabled with W=2, disabled by default as noisy
KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)

View File

@ -172,6 +172,7 @@
#address-cells = <1>;
ranges = <0x51000000 0x51000000 0x3000
0x0 0x20000000 0x10000000>;
dma-ranges;
/**
* To enable PCI endpoint mode, disable the pcie1_rc
* node and enable pcie1_ep mode.
@ -185,7 +186,6 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
@ -230,6 +230,7 @@
#address-cells = <1>;
ranges = <0x51800000 0x51800000 0x3000
0x0 0x30000000 0x10000000>;
dma-ranges;
status = "disabled";
pcie2_rc: pcie@51800000 {
reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
@ -240,7 +241,6 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;

View File

@ -75,8 +75,8 @@
imx27-phycard-s-rdk {
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX27_PAD_I2C2_SDA__I2C2_SDA 0x0
MX27_PAD_I2C2_SCL__I2C2_SCL 0x0
MX27_PAD_I2C_DATA__I2C_DATA 0x0
MX27_PAD_I2C_CLK__I2C_CLK 0x0
>;
};

View File

@ -38,7 +38,7 @@
};
&switch_ports {
/delete-node/ port@2;
/delete-node/ port@3;
};
&touchscreen {

View File

@ -131,7 +131,14 @@
cmt1: timer@e6130000 {
compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1";
reg = <0 0xe6130000 0 0x1004>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
clock-names = "fck";
power-domains = <&pd_c5>;

View File

@ -479,7 +479,7 @@
cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7740-cpg-clocks";
reg = <0xe6150000 0x10000>;
clocks = <&extal1_clk>, <&extalr_clk>;
clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
#clock-cells = <1>;
clock-output-names = "system", "pllc0", "pllc1",
"pllc2", "r",

View File

@ -2365,7 +2365,7 @@
reg = <0x0 0xff400000 0x0 0x40000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
clock-names = "ddr";
clock-names = "otg";
phys = <&usb2_phy1>;
phy-names = "usb2-phy";
dr_mode = "peripheral";

View File

@ -152,6 +152,10 @@
clock-latency = <50000>;
};
&frddr_a {
status = "okay";
};
&frddr_b {
status = "okay";
};

View File

@ -616,7 +616,7 @@
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
<&clk IMX8MN_CLK_SDMA1_ROOT>;
<&clk IMX8MN_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";

View File

@ -1318,6 +1318,7 @@
ipmmu_vip0: mmu@e7b00000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7b00000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
#iommu-cells = <1>;
};
@ -1325,6 +1326,7 @@
ipmmu_vip1: mmu@e7960000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7960000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 11>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
#iommu-cells = <1>;
};

View File

@ -92,7 +92,7 @@
&i2c1 {
status = "okay";
rk805: rk805@18 {
rk805: pmic@18 {
compatible = "rockchip,rk805";
reg = <0x18>;
interrupt-parent = <&gpio2>;

View File

@ -169,7 +169,7 @@
&i2c1 {
status = "okay";
rk805: rk805@18 {
rk805: pmic@18 {
compatible = "rockchip,rk805";
reg = <0x18>;
interrupt-parent = <&gpio2>;

View File

@ -410,7 +410,7 @@
reset-names = "usb3-otg";
status = "disabled";
usbdrd_dwc3_0: dwc3 {
usbdrd_dwc3_0: usb@fe800000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe800000 0x0 0x100000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
@ -446,7 +446,7 @@
reset-names = "usb3-otg";
status = "disabled";
usbdrd_dwc3_1: dwc3 {
usbdrd_dwc3_1: usb@fe900000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe900000 0x0 0x100000>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;

View File

@ -189,6 +189,7 @@ void machine_kexec(struct kimage *kimage)
* the offline CPUs. Therefore, we must use the __* variant here.
*/
__flush_icache_range((uintptr_t)reboot_code_buffer,
(uintptr_t)reboot_code_buffer +
arm64_relocate_new_kernel_size);
/* Flush the kimage list and its buffers. */

View File

@ -75,7 +75,7 @@
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
lwz \gpr2, KUAP(thread)
lwz \gpr, KUAP(thread)
999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif

View File

@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld -R we can then refer to
# these symbols in the kernel code rather than hand-coded addresses.
# table and layout of the linked DSO. With ld --just-symbols we can then
# refer to these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
LDFLAGS_vdso-syms.o := -r -R
LDFLAGS_vdso-syms.o := -r --just-symbols
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
$(call if_changed,ld)

View File

@ -55,8 +55,13 @@
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* NOTE: this must only be called from functions that never return
* and it must always be inlined.
*
* In addition, it should be called from a compilation unit for which
* stack protector is disabled. Alternatively, the caller should not end
* with a function call which gets tail-call optimized as that would
* lead to checking a modified canary value.
*/
static __always_inline void boot_init_stack_canary(void)
{

View File

@ -262,6 +262,14 @@ static void notrace start_secondary(void *unused)
wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
/*
* Prevent tail call to cpu_startup_entry() because the stack protector
* guard has been changed a couple of function calls up, in
* boot_init_stack_canary() and must not be checked before tail calling
* another function.
*/
prevent_tail_call_optimization();
}
/**

View File

@ -608,23 +608,23 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long *first_frame)
{
if (!orc_init)
goto done;
memset(state, 0, sizeof(*state));
state->task = task;
if (!orc_init)
goto err;
/*
* Refuse to unwind the stack of a task while it's executing on another
* CPU. This check is racy, but that's ok: the unwinder has other
* checks to prevent it from going off the rails.
*/
if (task_on_another_cpu(task))
goto done;
goto err;
if (regs) {
if (user_mode(regs))
goto done;
goto the_end;
state->ip = regs->ip;
state->sp = regs->sp;
@ -657,6 +657,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
* generate some kind of backtrace if this happens.
*/
void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
state->error = true;
if (get_stack_info(next_page, state->task, &state->stack_info,
&state->stack_mask))
return;
@ -682,8 +683,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
return;
done:
err:
state->error = true;
the_end:
state->stack_info.type = STACK_TYPE_UNKNOWN;
return;
}
EXPORT_SYMBOL_GPL(__unwind_start);

View File

@ -3682,7 +3682,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
goto out;
if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
goto out;

View File

@ -92,6 +92,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
cpu_bringup();
boot_init_stack_canary();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
prevent_tail_call_optimization();
}
void xen_smp_intr_free_pv(unsigned int cpu)

View File

@ -289,7 +289,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->child);
}
static void free(struct skcipher_instance *inst)
static void free_inst(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@ -401,7 +401,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
inst->free = free;
inst->free = free_inst;
err = skcipher_register_instance(tmpl, inst);
if (err)

View File

@ -328,7 +328,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(ctx->tweak);
}
static void free(struct skcipher_instance *inst)
static void free_inst(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@ -439,7 +439,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
inst->free = free;
inst->free = free_inst;
err = skcipher_register_instance(tmpl, inst);
if (err)

View File

@ -1962,23 +1962,31 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
bool acpi_ec_other_gpes_active(void)
{
return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
}
bool acpi_ec_dispatch_gpe(void)
{
u32 ret;
if (!first_ec)
return acpi_any_gpe_status_set(U32_MAX);
/*
* Report wakeup if the status bit is set for any enabled GPE other
* than the EC one.
*/
if (acpi_any_gpe_status_set(first_ec->gpe))
return true;
if (ec_no_wakeup)
return false;
/*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
if (ret == ACPI_INTERRUPT_HANDLED) {
if (ret == ACPI_INTERRUPT_HANDLED)
pm_pr_dbg("EC GPE dispatched\n");
return true;
}
return false;
}
#endif /* CONFIG_PM_SLEEP */

View File

@ -201,7 +201,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
bool acpi_ec_other_gpes_active(void);
bool acpi_ec_dispatch_gpe(void);
#endif

View File

@ -1010,20 +1010,10 @@ static bool acpi_s2idle_wake(void)
if (acpi_check_wakeup_handlers())
return true;
/*
* If the status bit is set for any enabled GPE other than the
* EC one, the wakeup is regarded as a genuine one.
*/
if (acpi_ec_other_gpes_active())
/* Check non-EC GPE wakeups and dispatch the EC GPE. */
if (acpi_ec_dispatch_gpe())
return true;
/*
* If the EC GPE status bit has not been set, the wakeup is
* regarded as a spurious one.
*/
if (!acpi_ec_dispatch_gpe())
return false;
/*
* Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there.

View File

@ -33,6 +33,15 @@ struct virtio_blk_vq {
} ____cacheline_aligned_in_smp;
struct virtio_blk {
/*
* This mutex must be held by anything that may run after
* virtblk_remove() sets vblk->vdev to NULL.
*
* blk-mq, virtqueue processing, and sysfs attribute code paths are
* shut down before vblk->vdev is set to NULL and therefore do not need
* to hold this mutex.
*/
struct mutex vdev_mutex;
struct virtio_device *vdev;
/* The disk structure for the kernel. */
@ -44,6 +53,13 @@ struct virtio_blk {
/* Process context for config space updates */
struct work_struct config_work;
/*
* Tracks references from block_device_operations open/release and
* virtio_driver probe/remove so this object can be freed once no
* longer in use.
*/
refcount_t refs;
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@ -390,10 +406,55 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
return err;
}
static void virtblk_get(struct virtio_blk *vblk)
{
refcount_inc(&vblk->refs);
}
static void virtblk_put(struct virtio_blk *vblk)
{
if (refcount_dec_and_test(&vblk->refs)) {
ida_simple_remove(&vd_index_ida, vblk->index);
mutex_destroy(&vblk->vdev_mutex);
kfree(vblk);
}
}
static int virtblk_open(struct block_device *bd, fmode_t mode)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
if (vblk->vdev)
virtblk_get(vblk);
else
ret = -ENXIO;
mutex_unlock(&vblk->vdev_mutex);
return ret;
}
static void virtblk_release(struct gendisk *disk, fmode_t mode)
{
struct virtio_blk *vblk = disk->private_data;
virtblk_put(vblk);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
if (!vblk->vdev) {
ret = -ENXIO;
goto out;
}
/* see if the host passed in geometry config */
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
@ -409,12 +470,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
}
return 0;
out:
mutex_unlock(&vblk->vdev_mutex);
return ret;
}
static const struct block_device_operations virtblk_fops = {
.ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
.open = virtblk_open,
.release = virtblk_release,
.getgeo = virtblk_getgeo,
};
@ -769,6 +834,10 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_index;
}
/* This reference is dropped in virtblk_remove(). */
refcount_set(&vblk->refs, 1);
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
@ -934,8 +1003,6 @@ static int virtblk_probe(struct virtio_device *vdev)
static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
int refc;
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
@ -945,18 +1012,21 @@ static void virtblk_remove(struct virtio_device *vdev)
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
put_disk(vblk->disk);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
kfree(vblk);
/* Only free device id if we don't have any users */
if (refc == 1)
ida_simple_remove(&vd_index_ida, index);
mutex_unlock(&vblk->vdev_mutex);
virtblk_put(vblk);
}
#ifdef CONFIG_PM_SLEEP

View File

@ -3530,6 +3530,9 @@ static int __clk_core_init(struct clk_core *core)
out:
clk_pm_runtime_put(core);
unlock:
if (ret)
hlist_del_init(&core->child_node);
clk_prepare_unlock();
if (!ret)

View File

@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
GATE(0, "cpll_gpu", "cpll", 0,
COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
GATE(0, "gpll_gpu", "gpll", 0,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
GATE(0, "usb480m_gpu", "usb480m", 0,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */
GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
/* PD_BUS */
GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),

View File

@ -1058,7 +1058,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
update_turbo_state();
if (global.turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;

View File

@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size);
tdmac->desc_arr = NULL;
if (tdmac->status == DMA_ERROR)
tdmac->status = DMA_COMPLETE;
return;
}
@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc)
goto err_out;
mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
goto err_out;
while (buf < buf_len) {
desc = &tdmac->desc_arr[i];

View File

@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
}
pci_set_master(pdev);
pd->dma.dev = &pdev->dev;
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
if (err) {
@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_free_irq;
}
pd->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&pd->dma.channels);

View File

@ -16,7 +16,7 @@
int efi_tpm_final_log_size;
EXPORT_SYMBOL(efi_tpm_final_log_size);
static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
{
struct tcg_pcr_event2_head *header;
int event_size, size = 0;

View File

@ -528,7 +528,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
switch (config) {
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
return pca953x_gpio_set_pull_up_down(chip, offset, config);

View File

@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
u32 cpp;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
AMDGPU_GEM_CREATE_VRAM_CLEARED;
info = drm_get_format_info(adev->ddev, mode_cmd);
cpp = info->cpp[0];

View File

@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
u32 pad_count;
int i;
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =

View File

@ -73,6 +73,22 @@
#define SDMA_OP_AQL_COPY 0
#define SDMA_OP_AQL_BARRIER_OR 0
#define SDMA_GCR_RANGE_IS_PA (1 << 18)
#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
#define SDMA_GCR_GL2_WB (1 << 15)
#define SDMA_GCR_GL2_INV (1 << 14)
#define SDMA_GCR_GL2_DISCARD (1 << 13)
#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
#define SDMA_GCR_GL2_US (1 << 10)
#define SDMA_GCR_GL1_INV (1 << 9)
#define SDMA_GCR_GLV_INV (1 << 8)
#define SDMA_GCR_GLK_INV (1 << 7)
#define SDMA_GCR_GLK_WB (1 << 6)
#define SDMA_GCR_GLM_INV (1 << 5)
#define SDMA_GCR_GLM_WB (1 << 4)
#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
/*define for op field*/
#define SDMA_PKT_HEADER_op_offset 0
#define SDMA_PKT_HEADER_op_mask 0x000000FF

View File

@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =

View File

@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =

View File

@ -698,7 +698,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@ -1579,7 +1579,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =

View File

@ -382,8 +382,27 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
/* IB packet must end on a 8 DW boundary */
sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
/* Invalidate L2, because if we don't do it, we might get stale cache
* lines from previous IBs.
*/
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
SDMA_GCR_GL2_WB |
SDMA_GCR_GLM_INV |
SDMA_GCR_GLM_WB) << 16);
amdgpu_ring_write(ring, 0xffffff80);
amdgpu_ring_write(ring, 0xffff);
/* An IB packet must end on a 8 DW boundary--the next dword
* must be on a 8-dword boundary. Our IB packet below is 6
* dwords long, thus add x number of NOPs, such that, in
* modular arithmetic,
* wptr + 6 + x = 8k, k >= 0, which in C is,
* (wptr + 6 + x) % 8 = 0.
* The expression below, is a solution of x.
*/
sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@ -1086,10 +1105,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
* sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
*
* sdma_v5_0_ring_pad_ib - pad the IB
* @ib: indirect buffer to fill with padding
*
* Pad the IB with NOPs to a boundary multiple of 8.
*/
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
@ -1097,7 +1116,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
pad_count = (-ib->length_dw) & 0x7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@ -1600,7 +1619,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
.emit_fence = sdma_v5_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,

View File

@ -6921,6 +6921,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
struct amdgpu_crtc *new_acrtc;
bool needs_reset;
int ret = 0;
@ -6930,9 +6931,30 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
/*TODO Implement atomic check for cursor plane */
if (plane->type == DRM_PLANE_TYPE_CURSOR)
/*TODO Implement better atomic check for cursor plane */
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
if (!enable || !new_plane_crtc ||
drm_atomic_plane_disabling(plane->state, new_plane_state))
return 0;
new_acrtc = to_amdgpu_crtc(new_plane_crtc);
if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
new_plane_state->crtc_w, new_plane_state->crtc_h);
return -EINVAL;
}
if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
new_plane_state->crtc_x, new_plane_state->crtc_y);
return -EINVAL;
}
return 0;
}
needs_reset = should_reset_plane(state, plane, old_plane_state,
new_plane_state);

View File

@ -2015,7 +2015,8 @@ static void dcn20_fpga_init_hw(struct dc *dc)
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
REG_WRITE(REFCLK_CNTL, 0);
if (REG(REFCLK_CNTL))
REG_WRITE(REFCLK_CNTL, 0);
//

View File

@ -247,7 +247,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,

View File

@ -1425,7 +1425,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
mutex_lock(&hwmgr->smu_lock);
@ -1459,7 +1460,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);

View File

@ -504,8 +504,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
if (!ret)
goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
fbc->threshold = ret;

View File

@ -374,7 +374,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
/* skip now as current i915 ppgtt alloc won't allocate
top level pdp for non 4-level table, won't impact
shadow ppgtt. */
if (!pd)
break;
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}

View File

@ -4784,7 +4784,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
dev_priv->ipc_enabled)
latency += 4;

View File

@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
break;
default:
DRM_ERROR("unsupported image bit depth\n");
return -EINVAL; /* TODO: cleanup */
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return -EINVAL;
}
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width;

View File

@ -718,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
struct mipi_dsi_device *device = dsi->device;
union phy_configure_opts opts = { 0 };
union phy_configure_opts opts = { };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;

View File

@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
int channel = to_sensor_dev_attr(devattr)->index;
int ret;
mutex_lock(&hwmon->hwmon_lock);
mutex_lock(&hwmon->da9052->auxadc_lock);
ret = __da9052_read_tsi(dev, channel);
mutex_unlock(&hwmon->hwmon_lock);
mutex_unlock(&hwmon->da9052->auxadc_lock);
if (ret < 0)
return ret;

View File

@ -1542,8 +1542,11 @@ int ib_cache_setup_one(struct ib_device *device)
if (err)
return err;
rdma_for_each_port (device, p)
ib_cache_update(device, p, true);
rdma_for_each_port (device, p) {
err = ib_cache_update(device, p, true);
if (err)
return err;
}
return 0;
}

View File

@ -1248,10 +1248,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
rdma_restrack_put(res);
if (ret)
goto err_free;
rdma_restrack_put(res);
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);

View File

@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
srqidx = ABORT_RSS_SRQIDX_G(
be32_to_cpu(req->srqidx_status));
if (srqidx) {
complete_cached_srq_buffers(ep,
req->srqidx_status);
complete_cached_srq_buffers(ep, srqidx);
} else {
/* Hold ep ref until finish_peer_abort() */
c4iw_get_ep(&ep->com);
@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
TCB_RQ_START_S);
ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
TCB_RQ_START_S);
cleanup:
pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);

View File

@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
pq->state = SDMA_PKT_Q_ACTIVE;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
if (unlikely(ret < 0 && ret != -EBUSY))
goto free_req;
/*
* This is a somewhat blocking send implementation.

View File

@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
if (arp_index == -1)
if (arp_index < 0)
return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)

View File

@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
int send_size;
int header_size;
int spc;
int err;
int i;
if (wr->wr.opcode != IB_WR_SEND)
@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.lrh.virtual_lane = 0;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
if (err)
return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
}
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
&pkey);
else
ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
&pkey);
if (err)
return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));

View File

@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
return NULL;
return ERR_PTR(-ENOMEM);
size = PAGE_ALIGN(size);

View File

@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
if (outbuf) {
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
if (!ip)
if (IS_ERR(ip)) {
err = PTR_ERR(ip);
goto err1;
}
err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
if (err)
if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
err = -EFAULT;
goto err2;
}
spin_lock_bh(&rxe->pending_lock);
list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
err2:
kfree(ip);
err1:
return -EINVAL;
return err;
}
inline void rxe_queue_reset(struct rxe_queue *q)

View File

@ -1425,6 +1425,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
struct mmc_request *mrq = &mqrq->brq.mrq;
struct request_queue *q = req->q;
struct mmc_host *host = mq->card->host;
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
unsigned long flags;
bool put_card;
int err;
@ -1454,7 +1455,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
spin_lock_irqsave(&mq->lock, flags);
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);

View File

@ -108,11 +108,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed)
__mmc_cqe_recovery_notifier(mq);
mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER;
}
/* No timeout (XXX: huh? comment doesn't make much sense) */
blk_mq_complete_request(req);
/* The request has gone already */
return BLK_EH_DONE;
default:
/* Timeout is handled by mmc core */
@ -128,18 +127,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
unsigned long flags;
int ret;
bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled)
ret = BLK_EH_RESET_TIMER;
else
ret = mmc_cqe_timed_out(req);
ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
spin_unlock_irqrestore(&mq->lock, flags);
return ret;
return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
static void mmc_mq_recovery_handler(struct work_struct *work)

View File

@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to get irq for data line\n");
return ret;
goto free_host;
}
mutex_init(&host->cmd_mutex);
@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, host);
mmc_add_host(mmc);
return 0;
free_host:
mmc_free_host(mmc);
return ret;
}
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)

View File

@ -602,10 +602,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
.chip = &sdhci_acpi_chip_amd,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
SDHCI_QUIRK_32BIT_ADMA_SIZE,
.chip = &sdhci_acpi_chip_amd,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE |
SDHCI_QUIRK_32BIT_ADMA_SIZE,
.quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
};

View File

@ -26,6 +26,9 @@
#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
#define GLI_9750_DRIVING_1_VALUE 0xFFF
#define GLI_9750_DRIVING_2_VALUE 0x3
#define SDHCI_GLI_9750_SEL_1 BIT(29)
#define SDHCI_GLI_9750_SEL_2 BIT(31)
#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
#define SDHCI_GLI_9750_PLL 0x864
#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host)
GLI_9750_DRIVING_1_VALUE);
driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
GLI_9750_DRIVING_2_VALUE);
driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
driving_value |= SDHCI_GLI_9750_SEL_2;
sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
pci_free_irq_vectors(slot->chip->pdev);
gli_pcie_enable_msi(slot);
return sdhci_pci_resume_host(chip);
}
#endif
static const struct sdhci_ops sdhci_gl9755_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9755,
.ops = &sdhci_gl9755_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_gli_resume,
#endif
};
static const struct sdhci_ops sdhci_gl9750_ops = {
@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9750,
.ops = &sdhci_gl9750_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_gli_resume,
#endif
};

View File

@ -356,6 +356,7 @@ static void __exit dsa_loop_exit(void)
}
module_exit(dsa_loop_exit);
MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");

View File

@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)sg_vaddr, 0);
@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
(page_address(page) - page_address(head_page));
skb_add_rx_frag(skb, i - 1, head_page, page_offset,
sg_length, DPAA2_ETH_RX_BUF_SIZE);
sg_length, priv->rx_buf_size);
}
if (dpaa2_sg_is_final(sge))
@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)vaddr, 0);
}
@ -331,7 +331,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
break;
case XDP_REDIRECT:
dma_unmap_page(priv->net_dev->dev.parent, addr,
DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--;
xdp.data_hard_start = vaddr;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
@ -370,7 +370,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
@ -389,13 +389,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
@ -963,7 +963,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
if (!page)
goto err_alloc;
addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@ -973,7 +973,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
page, DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, DPAA2_ETH_RX_BUF_SIZE,
addr, priv->rx_buf_size,
bpid);
}
@ -1680,7 +1680,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
int mfl, linear_mfl;
mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
if (mfl > linear_mfl) {
@ -2432,6 +2432,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
else
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
/* We need to ensure that the buffer size seen by WRIOP is a multiple
* of 64 or 256 bytes depending on the WRIOP version.
*/
priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
@ -3096,7 +3101,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) {
dev_err(dev, "dpni_set_pools() failed\n");

View File

@ -373,6 +373,7 @@ struct dpaa2_eth_priv {
u16 tx_data_offset;
struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid;
struct iommu_domain *iommu_domain;

View File

@ -590,7 +590,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;

View File

@ -45,6 +45,8 @@
#define MGMT_MSG_TIMEOUT 5000
#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
u8 *buf_in, u16 in_size,
u8 *buf_out, u16 *out_size,
enum mgmt_direction_type direction,
u16 resp_msg_id)
u16 resp_msg_id, u32 timeout)
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_recv_msg *recv_msg;
struct completion *recv_done;
unsigned long timeo;
u16 msg_id;
int err;
@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
if (!wait_for_completion_timeout(recv_done,
msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;
@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 timeout = 0;
if (sync != HINIC_MGMT_MSG_SYNC) {
dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP);
MSG_NOT_RESP, timeout);
}
/**

View File

@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
int err;
down(&nic_dev->mgmt_lock);
@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed to set func port state\n");
nic_dev->flags |= (flags & HINIC_INTF_UP);
return err;
}
hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
if (err) {
netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
nic_dev->flags |= (flags & HINIC_INTF_UP);
return err;
}
hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (nic_dev->flags & HINIC_RSS_ENABLE) {
hinic_rss_deinit(nic_dev);

View File

@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
unregister_netdev(ndev);
free_irq(ndev->irq, ndev);
devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev);
free_netdev(ndev);

View File

@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
goto out1;
goto undo_probe1;
return 0;
out1:
undo_probe1:
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);

View File

@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
goto err_free_alink;
alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
if (!alink->prio_map)
if (!alink->prio_map) {
err = -ENOMEM;
goto err_free_alink;
}
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.

View File

@ -2202,6 +2202,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
/* RTL8401, reportedly works if treated as RTL8101e */
{ 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
{ 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },

View File

@ -75,6 +75,11 @@ struct ethqos_emac_por {
unsigned int value;
};
struct ethqos_emac_driver_data {
const struct ethqos_emac_por *por;
unsigned int num_por;
};
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
{ .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
};
static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
.por = emac_v2_3_0_por,
.num_por = ARRAY_SIZE(emac_v2_3_0_por),
};
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
unsigned int val;
@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
const struct ethqos_emac_driver_data *data;
struct qcom_ethqos *ethqos;
struct resource *res;
int ret;
@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
ethqos->por = of_device_get_match_data(&pdev->dev);
data = of_device_get_match_data(&pdev->dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_ethqos_match[] = {
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);

View File

@ -3,9 +3,21 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/phy.h>
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
/* External Register Read Data Register */
#define LAN87XX_EXT_REG_RD_DATA (0x15)
/* External Register Write Data Register */
#define LAN87XX_EXT_REG_WR_DATA (0x16)
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
@ -14,9 +26,160 @@
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
/* phyaccess nested types */
#define PHYACC_ATTR_MODE_READ 0
#define PHYACC_ATTR_MODE_WRITE 1
#define PHYACC_ATTR_MODE_MODIFY 2
#define PHYACC_ATTR_BANK_SMI 0
#define PHYACC_ATTR_BANK_MISC 1
#define PHYACC_ATTR_BANK_PCS 2
#define PHYACC_ATTR_BANK_AFE 3
#define PHYACC_ATTR_BANK_MAX 7
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
struct access_ereg_val {
u8 mode;
u8 bank;
u8 offset;
u16 val;
u16 mask;
};
static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
u8 offset, u16 val)
{
u16 ereg = 0;
int rc = 0;
if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX)
return -EINVAL;
if (bank == PHYACC_ATTR_BANK_SMI) {
if (mode == PHYACC_ATTR_MODE_WRITE)
rc = phy_write(phydev, offset, val);
else
rc = phy_read(phydev, offset);
return rc;
}
if (mode == PHYACC_ATTR_MODE_WRITE) {
ereg = LAN87XX_EXT_REG_CTL_WR_CTL;
rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val);
if (rc < 0)
return rc;
} else {
ereg = LAN87XX_EXT_REG_CTL_RD_CTL;
}
ereg |= (bank << 8) | offset;
rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
if (rc < 0)
return rc;
if (mode == PHYACC_ATTR_MODE_READ)
rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA);
return rc;
}
static int access_ereg_modify_changed(struct phy_device *phydev,
u8 bank, u8 offset, u16 val, u16 mask)
{
int new = 0, rc = 0;
if (bank > PHYACC_ATTR_BANK_MAX)
return -EINVAL;
rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val);
if (rc < 0)
return rc;
new = val | (rc & (mask ^ 0xFFFF));
rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new);
return rc;
}
static int lan87xx_phy_init(struct phy_device *phydev)
{
static const struct access_ereg_val init[] = {
/* TX Amplitude = 5 */
{PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
0x000A, 0x001E},
/* Clear SMI interrupts */
{PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
0, 0},
/* Clear MISC interrupts */
{PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
0, 0},
/* Turn on TC10 Ring Oscillator (ROSC) */
{PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
0x0020, 0x0020},
/* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
0x283C, 0},
/* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
0x274F, 0},
/* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
* and Wake_In to wake PHY
*/
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
0x80A7, 0},
/* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
* to 128 uS
*/
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
0xF110, 0},
/* Enable HW Init */
{PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
0x0100, 0x0100},
};
int rc, i;
/* Start manual initialization procedures in Managed Mode */
rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
0x1a, 0x0000, 0x0100);
if (rc < 0)
return rc;
/* Soft Reset the SMI block */
rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
0x00, 0x8000, 0x8000);
if (rc < 0)
return rc;
/* Check to see if the self-clearing bit is cleared */
usleep_range(1000, 2000);
rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
PHYACC_ATTR_BANK_SMI, 0x00, 0);
if (rc < 0)
return rc;
if ((rc & 0x8000) != 0)
return -ETIMEDOUT;
/* PHY Initialization */
for (i = 0; i < ARRAY_SIZE(init); i++) {
if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
rc = access_ereg_modify_changed(phydev, init[i].bank,
init[i].offset,
init[i].val,
init[i].mask);
} else {
rc = access_ereg(phydev, init[i].mode, init[i].bank,
init[i].offset, init[i].val);
}
if (rc < 0)
return rc;
}
return 0;
}
static int lan87xx_phy_config_intr(struct phy_device *phydev)
{
int rc, val = 0;
@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
static int lan87xx_config_init(struct phy_device *phydev)
{
int rc = lan87xx_phy_init(phydev);
return rc < 0 ? rc : 0;
}
static struct phy_driver microchip_t1_phy_driver[] = {
{
.phy_id = 0x0007c150,
@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
.config_aneg = genphy_config_aneg,
.ack_interrupt = lan87xx_phy_ack_interrupt,

View File

@ -1160,9 +1160,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
/* Restart autonegotiation so the new modes get sent to the
* link partner.
*/
ret = phy_restart_aneg(phydev);
if (ret < 0)
return ret;
if (phydev->autoneg == AUTONEG_ENABLE) {
ret = phy_restart_aneg(phydev);
if (ret < 0)
return ret;
}
}
return 0;

View File

@ -492,6 +492,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
if (skb->pkt_type != PACKET_HOST)
goto abort;
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto abort;

View File

@ -1231,9 +1231,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
break;
} while (rq->vq->num_free);
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
u64_stats_update_begin(&rq->stats.syncp);
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq->stats.kicks++;
u64_stats_update_end(&rq->stats.syncp);
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
return !oom;

View File

@ -1297,6 +1297,7 @@ static const struct gpio_chip byt_gpio_chip = {
.direction_output = byt_gpio_direction_output,
.get = byt_gpio_get,
.set = byt_gpio_set,
.set_config = gpiochip_generic_config,
.dbg_show = byt_gpio_dbg_show,
};

View File

@ -1474,11 +1474,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
unsigned long flags;
u32 intr_line;
chained_irq_enter(chip, desc);
raw_spin_lock_irqsave(&chv_lock, flags);
pending = readl(pctrl->regs + CHV_INTSTAT);
raw_spin_unlock_irqrestore(&chv_lock, flags);
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
unsigned irq, offset;

View File

@ -15,17 +15,18 @@
#include "pinctrl-intel.h"
#define SPT_PAD_OWN 0x020
#define SPT_PADCFGLOCK 0x0a0
#define SPT_HOSTSW_OWN 0x0d0
#define SPT_GPI_IS 0x100
#define SPT_GPI_IE 0x120
#define SPT_PAD_OWN 0x020
#define SPT_H_PADCFGLOCK 0x090
#define SPT_LP_PADCFGLOCK 0x0a0
#define SPT_HOSTSW_OWN 0x0d0
#define SPT_GPI_IS 0x100
#define SPT_GPI_IE 0x120
#define SPT_COMMUNITY(b, s, e) \
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
.padcfglock_offset = SPT_PADCFGLOCK, \
.padcfglock_offset = SPT_LP_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
@ -47,7 +48,7 @@
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
.padcfglock_offset = SPT_PADCFGLOCK, \
.padcfglock_offset = SPT_H_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \

View File

@ -692,7 +692,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
pol = msm_readl_intr_cfg(pctrl, g);
pol ^= BIT(g->intr_polarity_bit);
msm_writel_intr_cfg(val, pctrl, g);
msm_writel_intr_cfg(pol, pctrl, g);
val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
intstat = msm_readl_intr_status(pctrl, g);

View File

@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
if (!ism->smcd)
if (!ism->smcd) {
ret = -ENOMEM;
goto err_resource;
}
ism->smcd->priv = ism;
ret = ism_dev_init(ism);

View File

@ -689,8 +689,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
if (__copy_from_user(cmnd, buf, cmd_size)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there

View File

@ -2105,7 +2105,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
link_trb = priv_req->trb;
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring) {
if (req_on_hw_ring && link_trb) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
((priv_req->end_trb + 1) * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |

View File

@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
{
struct usb_memory *usbm = NULL;
struct usb_dev_state *ps = file->private_data;
struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
return -EAGAIN;
if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
return -EAGAIN;
}
} else {
if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
size)) {
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
return -EAGAIN;
}
}
vma->vm_flags |= VM_IO;

View File

@ -39,6 +39,7 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
#define USB_PRODUCT_USB5534B 0x5534
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@ -5511,8 +5512,11 @@ static void hub_event(struct work_struct *work)
}
static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC,
.idProduct = USB_PRODUCT_USB5534B,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR

View File

@ -2480,9 +2480,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
if (trb->ctrl & DWC3_TRB_CTRL_HWO)
break;
req->sg = sg_next(s);
req->num_pending_sgs--;

View File

@ -292,6 +292,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
char *name;
int ret;
if (strlen(page) < len)
return -EOVERFLOW;
name = kstrdup(page, GFP_KERNEL);
if (!name)
return -ENOMEM;

View File

@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc)
if (!usb_desc) {
status = -ENOMEM;
goto fail;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;

View File

@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc)
if (!usb_desc) {
status = -ENOMEM;
goto fail1;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;

View File

@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc)
if (!usb_desc) {
status = -ENOMEM;
goto fail;
}
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;

View File

@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev)
err_req:
release_mem_region(base, len);
err:
kfree(dev);
return ret;
}

View File

@ -363,6 +363,7 @@ static int xhci_plat_remove(struct platform_device *dev)
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(shared_hcd);
@ -376,8 +377,9 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
pm_runtime_set_suspended(&dev->dev);
pm_runtime_disable(&dev->dev);
pm_runtime_put_noidle(&dev->dev);
pm_runtime_set_suspended(&dev->dev);
return 0;
}

View File

@ -3421,8 +3421,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* New sg entry */
--num_sgs;
sent_len -= block_len;
if (num_sgs != 0) {
sg = sg_next(sg);
sg = sg_next(sg);
if (num_sgs != 0 && sg) {
block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
addr += sent_len;

View File

@ -2135,8 +2135,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
}
}
kref_put(&wdata2->refcount, cifs_writedata_release);
if (rc) {
kref_put(&wdata2->refcount, cifs_writedata_release);
if (is_retryable_error(rc))
continue;
i += nr_pages;

View File

@ -1274,6 +1274,8 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
set_mm_exe_file(bprm->mm, bprm->file);
would_dump(bprm, bprm->file);
/*
* Release all of the old mmap stuff
*/
@ -1817,8 +1819,6 @@ static int __do_execve_file(int fd, struct filename *filename,
if (retval < 0)
goto out;
would_dump(bprm, bprm->file);
retval = exec_binprm(bprm);
if (retval < 0)
goto out;

View File

@ -528,10 +528,12 @@ static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
/* Advance in metadata tree. */
(mp->mp_list[hgt])++;
if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
if (!hgt)
if (hgt) {
if (mp->mp_list[hgt] >= sdp->sd_inptrs)
goto lower_metapath;
} else {
if (mp->mp_list[hgt] >= sdp->sd_diptrs)
break;
goto lower_metapath;
}
fill_up_metapath:
@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
ret = -ENOENT;
goto unlock;
} else {
/* report a hole */
iomap->offset = pos;
iomap->length = length;
goto do_alloc;
goto hole_found;
}
}
iomap->length = size;
@ -933,8 +934,6 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
return ret;
do_alloc:
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
if (flags & IOMAP_REPORT) {
if (pos >= size)
ret = -ENOENT;
@ -956,6 +955,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
if (pos < size && height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
}
hole_found:
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
goto out;
}

View File

@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
struct super_block *sb = sdp->sd_vfs;
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
@ -504,7 +504,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
unsigned int bsize = sdp->sd_sb.sb_bsize, off;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
unsigned int max_bio_size = 2 * 1024 * 1024;
struct gfs2_journal_extent *je;
int sz, ret = 0;
struct bio *bio = NULL;
@ -532,12 +532,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
off = 0;
}
if (!bio || (bio_chained && !off)) {
if (!bio || (bio_chained && !off) ||
bio->bi_iter.bi_size >= max_bio_size) {
/* start new bio */
} else {
sz = bio_add_page(bio, page, bsize, off);
if (sz == bsize)
goto block_added;
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
sz = bio_add_page(bio, page, bsize, off);
if (sz == bsize)
goto block_added;
}
if (off) {
unsigned int blocks =
(PAGE_SIZE - off) >> bsize_shift;
@ -563,7 +568,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
off += bsize;
if (off == PAGE_SIZE)
page = NULL;
if (blocks_submitted < blocks_read + readahead_blocks) {
if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
/* Keep at least one bio in flight */
continue;
}

Some files were not shown because too many files have changed in this diff Show More