This is the 6.1.59 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmUxmvQACgkQONu9yGCS
 aT79txAAsdMG7H4n6wai9EsZa6fpenp5MaQdice97FinKvS3El1hmlNOJPY2idAC
 hFnfhebrWsyStCUcUs2KeiosZwKoKSRuIZ2l43P9o4tNbAoaJfp7EOihCKSJyl1K
 qcu1P7AJZH2GfR3wS86grRjlembwxYNaFVS+n4a9X3XfoMOTM9TLwIPyKzXmruUv
 4FjTH6clfQbg7lu80nPBeps1FKd2XXhiLfFH21ilnSY8ESQEKo0x10Vbi0/LqE/F
 QxvP2bFRMXyKl9HHQMAkIIjpQH+hZDzpbGx1hoC2I0xc92dTERHzzZxWkuUt2e/Y
 zGrgQ3gQR8VkpuhPuFRrSu3bZUlEr74zRyp3sXBG0RSOpK13xSYgcdRzBac/L0zT
 aSqvmIYuLnjf6qE85LEp/NAQAgKxQ2S2nGwSoN+cePb/zB0qlyXNfSsPg2mptOTi
 MOxgldZg10oNh0VXEayhtoJGUCJBjk1XUor0bAFj4u4GCiBbFfAt3e5fF0jauMva
 9b2s89qE5444dr98kdAcd79mEI0xkX9SbjCTY9lwTzA7xk+7iw+vOchLC7fQWoyf
 gspg7PdzCaFnDAS3WiIR3NqkSlGpv426Kr3kd/8jrLA3VB81Rb6bFX+E7iRzVJsd
 /YShEGesDA16aZ5BDnOMXMHzFbTEgfNDH1AiJoJcjq/V5cIuSc0=
 =Dwcv
 -----END PGP SIGNATURE-----

Merge 6.1.59 into android14-6.1-lts

Changes in 6.1.59
	net: mana: Fix TX CQE error handling
	mptcp: fix delegated action races
	drm/i915: Don't set PIPE_CONTROL_FLUSH_L3 for aux inval
	RDMA/cxgb4: Check skb value for failure to allocate
	perf/arm-cmn: Fix the unhandled overflow status of counter 4 to 7
	platform/x86: think-lmi: Fix reference leak
	platform/x86: hp-wmi:: Mark driver struct with __refdata to prevent section mismatch warning
	scsi: Do not rescan devices with a suspended queue
	HID: logitech-hidpp: Fix kernel crash on receiver USB disconnect
	quota: Fix slow quotaoff
	ASoC: amd: yc: Fix non-functional mic on Lenovo 82YM
	ata: libata-scsi: Disable scsi device manage_system_start_stop
	net: prevent address rewrite in kernel_bind()
	arm64: dts: qcom: sm8150: extend the size of the PDC resource
	dt-bindings: interrupt-controller: renesas,rzg2l-irqc: Update description for '#interrupt-cells' property
	irqchip: renesas-rzg2l: Fix logic to clear TINT interrupt source
	KEYS: trusted: Remove redundant static calls usage
	ALSA: usb-audio: Fix microphone sound on Opencomm2 Headset
	ALSA: usb-audio: Fix microphone sound on Nexigo webcam.
	ALSA: hda/realtek: Change model for Intel RVP board
	ASoC: SOF: amd: fix for firmware reload failure after playback
	ASoC: simple-card-utils: fixup simple_util_startup() error handling
	ASoC: Intel: soc-acpi: Add entry for HDMI_In capture support in MTL match table
	ASoC: Intel: sof_sdw: add support for SKU 0B14
	ASoC: Intel: soc-acpi: Add entry for sof_es8336 in MTL match table.
	ASoC: Use of_property_read_bool() for boolean properties
	ASoC: fsl_sai: MCLK bind with TX/RX enable bit
	ASoC: fsl_sai: Don't disable bitclock for i.MX8MP
	ALSA: hda/realtek: Add quirk for HP Victus 16-d1xxx to enable mute LED
	ALSA: hda/realtek: Add quirk for mute LEDs on HP ENVY x360 15-eu0xxx
	ALSA: hda/realtek - ALC287 I2S speaker platform support
	ALSA: hda/realtek - ALC287 merge RTK codec with CS CS35L41 AMP
	pinctrl: nuvoton: wpcm450: fix out of bounds write
	drm/msm/dp: do not reinitialize phy unless retry during link training
	drm/msm/dsi: skip the wait for video mode done if not applicable
	drm/msm/dsi: fix irq_of_parse_and_map() error checking
	drm/msm/dpu: change _dpu_plane_calc_bw() to use u64 to avoid overflow
	drm/msm/dp: Add newlines to debug printks
	phy: lynx-28g: cancel the CDR check work item on the remove path
	phy: lynx-28g: lock PHY while performing CDR lock workaround
	phy: lynx-28g: serialize concurrent phy_set_mode_ext() calls to shared registers
	net: dsa: qca8k: fix potential MDIO bus conflict when accessing internal PHYs via management frames
	can: isotp: isotp_sendmsg(): fix TX state detection and wait behavior
	can: sun4i_can: Only show Kconfig if ARCH_SUNXI is set
	arm64: dts: mediatek: mt8195: Set DSU PMU status to fail
	ravb: Fix up dma_free_coherent() call in ravb_remove()
	ravb: Fix use-after-free issue in ravb_tx_timeout_work()
	ieee802154: ca8210: Fix a potential UAF in ca8210_probe
	mlxsw: fix mlxsw_sp2_nve_vxlan_learning_set() return type
	xen-netback: use default TX queue size for vifs
	riscv, bpf: Factor out emit_call for kernel and bpf context
	riscv, bpf: Sign-extend return values
	drm/vmwgfx: fix typo of sizeof argument
	bpf: Fix verifier log for async callback return values
	net: refine debug info in skb_checksum_help()
	net: macsec: indicate next pn update when offloading
	net: phy: mscc: macsec: reject PN update requests
	net/mlx5e: macsec: use update_pn flag instead of PN comparation
	ixgbe: fix crash with empty VF macvlan list
	net/mlx5e: Again mutually exclude RX-FCS and RX-port-timestamp
	net: nfc: fix races in nfc_llcp_sock_get() and nfc_llcp_sock_get_sn()
	net/smc: Fix pos miscalculation in statistics
	pinctrl: renesas: rzn1: Enable missing PINMUX
	nfc: nci: assert requested protocol is valid
	workqueue: Override implicit ordered attribute in workqueue_apply_unbound_cpumask()
	tcp: enforce receive buffer memory limits by allowing the tcp window to shrink
	dmaengine: stm32-mdma: abort resume if no ongoing transfer
	dmaengine: stm32-dma: fix stm32_dma_prep_slave_sg in case of MDMA chaining
	dmaengine: stm32-dma: fix residue in case of MDMA chaining
	dmaengine: stm32-mdma: use Link Address Register to compute residue
	dmaengine: stm32-mdma: set in_flight_bytes in case CRQA flag is set
	usb: xhci: xhci-ring: Use sysdev for mapping bounce buffer
	net: usb: dm9601: fix uninitialized variable use in dm9601_mdio_read
	usb: dwc3: Soft reset phy on probe for host
	usb: cdns3: Modify the return value of cdns_set_active () to void when CONFIG_PM_SLEEP is disabled
	usb: hub: Guard against accesses to uninitialized BOS descriptors
	usb: musb: Get the musb_qh poniter after musb_giveback
	usb: musb: Modify the "HWVers" register address
	iio: pressure: bmp280: Fix NULL pointer exception
	iio: imu: bno055: Fix missing Kconfig dependencies
	iio: adc: imx8qxp: Fix address for command buffer registers
	iio: dac: ad3552r: Correct device IDs
	iio: admv1013: add mixer_vgate corner cases
	iio: pressure: dps310: Adjust Timeout Settings
	iio: pressure: ms5611: ms5611_prom_is_valid false negative bug
	iio: addac: Kconfig: update ad74413r selections
	arm64: dts: mediatek: mt8195-demo: fix the memory size to 8GB
	arm64: dts: mediatek: mt8195-demo: update and reorder reserved memory regions
	drm/atomic-helper: relax unregistered connector check
	drm/amdgpu: add missing NULL check
	drm/amd/display: Don't set dpms_off for seamless boot
	ACPI: resource: Skip IRQ override on ASUS ExpertBook B1402CBA
	ACPI: EC: Add quirk for the HP Pavilion Gaming 15-dk1xxx
	ksmbd: not allow to open file if delelete on close bit is set
	perf/x86/lbr: Filter vsyscall addresses
	x86/cpu: Fix AMD erratum #1485 on Zen4-based CPUs
	mcb: remove is_added flag from mcb_device struct
	thunderbolt: Workaround an IOMMU fault on certain systems with Intel Maple Ridge
	thunderbolt: Check that lane 1 is in CL0 before enabling lane bonding
	thunderbolt: Restart XDomain discovery handshake after failure
	powerpc/47x: Fix 47x syscall return crash
	libceph: use kernel_connect()
	ceph: fix incorrect revoked caps assert in ceph_fill_file_size()
	ceph: fix type promotion bug on 32bit systems
	Input: powermate - fix use-after-free in powermate_config_complete
	Input: psmouse - fix fast_reconnect function for PS/2 mode
	Input: xpad - add PXN V900 support
	Input: i8042 - add Fujitsu Lifebook E5411 to i8042 quirk table
	Input: goodix - ensure int GPIO is in input for gpio_count == 1 && gpio_int_idx == 0 case
	tee: amdtee: fix use-after-free vulnerability in amdtee_close_session
	mctp: perform route lookups under a RCU read-side lock
	nfp: flower: avoid rmmod nfp crash issues
	usb: typec: ucsi: Use GET_CAPABILITY attributes data to set power supply scope
	cgroup: Remove duplicates in cgroup v1 tasks file
	dma-buf: add dma_fence_timestamp helper
	pinctrl: avoid unsafe code pattern in find_pinctrl()
	scsi: ufs: core: Correct clear TM error log
	counter: chrdev: fix getting array extensions
	counter: microchip-tcb-capture: Fix the use of internal GCLK logic
	usb: typec: altmodes/displayport: Signal hpd low when exiting mode
	usb: typec: ucsi: Clear EVENT_PENDING bit if ucsi_send_command fails
	usb: gadget: udc-xilinx: replace memcpy with memcpy_toio
	usb: gadget: ncm: Handle decoding of multiple NTB's in unwrap call
	usb: cdnsp: Fixes issue with dequeuing not queued requests
	x86/alternatives: Disable KASAN in apply_alternatives()
	dmaengine: idxd: use spin_lock_irqsave before wait_event_lock_irq
	dmaengine: mediatek: Fix deadlock caused by synchronize_irq()
	powerpc/8xx: Fix pte_access_permitted() for PAGE_NONE
	powerpc/64e: Fix wrong test in __ptep_test_and_clear_young()
	ALSA: hda/realtek - Fixed two speaker platform
	Linux 6.1.59

Change-Id: Iaae6736993c003cc47f495f275591bbb924f986e
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-11-02 09:12:31 +00:00
commit 3858124ea0
150 changed files with 1288 additions and 509 deletions

View File

@ -31,8 +31,9 @@ properties:
- const: renesas,rzg2l-irqc
'#interrupt-cells':
description: The first cell should contain external interrupt number (IRQ0-7) and the
second cell is used to specify the flag.
description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the
include/dt-bindings/interrupt-controller/irqc-rzg2l.h and the second
cell is used to specify the flag.
const: 2
'#address-cells':

View File

@ -967,6 +967,21 @@ tcp_tw_reuse - INTEGER
tcp_window_scaling - BOOLEAN
Enable window scaling as defined in RFC1323.
tcp_shrink_window - BOOLEAN
This changes how the TCP receive window is calculated.
RFC 7323, section 2.4, says there are instances when a retracted
window can be offered, and that TCP implementations MUST ensure
that they handle a shrinking window, as specified in RFC 1122.
- 0 - Disabled. The window is never shrunk.
- 1 - Enabled. The window is shrunk when necessary to remain within
the memory limit set by autotuning (sk_rcvbuf).
This only occurs if a non-zero receive window
scaling factor is also in effect.
Default: 0
tcp_wmem - vector of 3 INTEGERs: min, default, max
min: Amount of memory reserved for send buffers for TCP sockets.
Each TCP socket has rights to use it due to fact of its birth.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 58
SUBLEVEL = 59
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -48,7 +48,7 @@ key-0 {
memory@40000000 {
device_type = "memory";
reg = <0 0x40000000 0 0x80000000>;
reg = <0 0x40000000 0x2 0x00000000>;
};
reserved-memory {
@ -56,13 +56,8 @@ reserved-memory {
#size-cells = <2>;
ranges;
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
bl31_secmon_reserved: secmon@54600000 {
no-map;
reg = <0 0x54600000 0x0 0x200000>;
};
/* 12 MiB reserved for OP-TEE (BL32)
/*
* 12 MiB reserved for OP-TEE (BL32)
* +-----------------------+ 0x43e0_0000
* | SHMEM 2MiB |
* +-----------------------+ 0x43c0_0000
@ -75,6 +70,34 @@ optee_reserved: optee@43200000 {
no-map;
reg = <0 0x43200000 0 0x00c00000>;
};
scp_mem: memory@50000000 {
compatible = "shared-dma-pool";
reg = <0 0x50000000 0 0x2900000>;
no-map;
};
vpu_mem: memory@53000000 {
compatible = "shared-dma-pool";
reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
};
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
bl31_secmon_mem: memory@54600000 {
no-map;
reg = <0 0x54600000 0x0 0x200000>;
};
snd_dma_mem: memory@60000000 {
compatible = "shared-dma-pool";
reg = <0 0x60000000 0 0x1100000>;
no-map;
};
apu_mem: memory@62000000 {
compatible = "shared-dma-pool";
reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
};
};
};

View File

@ -229,6 +229,7 @@ dsu-pmu {
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
<&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
status = "fail";
};
dmic_codec: dmic-codec {

View File

@ -3701,7 +3701,7 @@ camnoc_virt: interconnect@ac00000 {
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm8150-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x400>;
reg = <0 0x0b220000 0 0x30000>;
qcom,pdc-ranges = <0 480 94>, <94 609 31>,
<125 63 1>;
#interrupt-cells = <2>;

View File

@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
#define pte_wrprotect pte_wrprotect
static inline int pte_read(pte_t pte)
{
return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
}
#define pte_read pte_read
static inline int pte_write(pte_t pte)
{
return !(pte_val(pte) & _PAGE_RO);

View File

@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
if (pte_young(*ptep))
if (!pte_young(*ptep))
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;

View File

@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
return pte_val(pte) & _PAGE_RW;
}
#endif
#ifndef pte_read
static inline int pte_read(pte_t pte) { return 1; }
#endif
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }

View File

@ -135,8 +135,9 @@ ret_from_syscall:
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
bne- 2f
bne- .L44x_icache_flush
#endif /* CONFIG_PPC_47x */
.L44x_icache_flush_return:
kuep_unlock
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
@ -170,10 +171,11 @@ syscall_exit_finish:
b 1b
#ifdef CONFIG_44x
2: li r7,0
.L44x_icache_flush:
li r7,0
iccci r0,r0
stw r7,icache_44x_need_flush@l(r4)
b 1b
b .L44x_icache_flush_return
#endif /* CONFIG_44x */
.globl ret_from_fork

View File

@ -236,7 +236,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
if (!is_tail_call)
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
is_tail_call ? 4 : 0, /* skip TCC init */
ctx);
@ -428,12 +428,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
*rd = RV_REG_T2;
}
static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
s64 upper, lower;
if (rvoff && is_21b_int(rvoff) && !force_jalr) {
if (rvoff && fixed_addr && is_21b_int(rvoff)) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return 0;
} else if (in_auipc_jalr_range(rvoff)) {
@ -454,24 +454,17 @@ static bool is_signed_bpf_cond(u8 cond)
cond == BPF_JSGE || cond == BPF_JSLE;
}
static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
{
s64 off = 0;
u64 ip;
u8 rd;
int ret;
if (addr && ctx->insns) {
ip = (u64)(long)(ctx->insns + ctx->ninsns);
off = addr - ip;
}
ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
if (ret)
return ret;
rd = bpf_to_rv_reg(BPF_REG_0, ctx);
emit_mv(rd, RV_REG_A0, ctx);
return 0;
return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
}
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
@ -913,7 +906,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* JUMP off */
case BPF_JMP | BPF_JA:
rvoff = rv_offset(i, off, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
@ -1032,17 +1025,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* function call */
case BPF_JMP | BPF_CALL:
{
bool fixed;
bool fixed_addr;
u64 addr;
mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
&fixed);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&addr, &fixed_addr);
if (ret < 0)
return ret;
ret = emit_call(fixed, addr, ctx);
ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
if (insn->src_reg != BPF_PSEUDO_CALL)
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */
@ -1057,7 +1054,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
break;
rvoff = epilogue_offset(ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/insn.h>
#include <linux/mm.h>
#include "perf_event.h"
@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort,
* The LBR logs any address in the IP, even if the IP just
* faulted. This means userspace can control the from address.
* Ensure we don't blindly read any address by validating it is
* a known text address.
* a known text address and not a vsyscall address.
*/
if (kernel_text_address(from)) {
if (kernel_text_address(from) && !in_gate_area_no_mm(from)) {
addr = (void *)from;
/*
* Assume we can get the maximum possible size

View File

@ -635,12 +635,17 @@
/* AMD Last Branch Record MSRs */
#define MSR_AMD64_LBR_SELECT 0xc000010e
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
/* Zen4 */
#define MSR_ZEN4_BP_CFG 0xc001102e
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
/* Zen 2 */
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231

View File

@ -270,6 +270,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
u8 insn_buff[MAX_PATCH_LEN];
DPRINTK("alt table %px, -> %px", start, end);
/*
* In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
* cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
* During the process, KASAN becomes confused seeing partial LA57
* conversion and triggers a false-positive out-of-bound report.
*
* Disable KASAN until the patching is complete.
*/
kasan_disable_current();
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
@ -337,6 +348,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
next:
optimize_nops(instr, a->instrlen);
}
kasan_enable_current();
}
static inline bool is_jcc32(struct insn *insn)

View File

@ -80,6 +80,10 @@ static const int amd_div0[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
static const int amd_erratum_1485[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
int osvw_id = *erratum++;
@ -1125,6 +1129,10 @@ static void init_amd(struct cpuinfo_x86 *c)
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
}
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
cpu_has_amd_erratum(c, amd_erratum_1485))
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
}
#ifdef CONFIG_X86_32

View File

@ -1886,6 +1886,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
},
},
{
/*
* HP Pavilion Gaming Laptop 15-dk1xxx
* https://github.com/systemd/systemd/issues/28942
*/
.callback = ec_honor_dsdt_gpe,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
},
},
{
/*
* Samsung hardware

View File

@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
{
.ident = "Asus ExpertBook B1402CBA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
},
},
{
.ident = "Asus ExpertBook B2402CBA",
.matches = {

View File

@ -1943,6 +1943,96 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
return rc;
}
/**
* ata_dev_power_set_standby - Set a device power mode to standby
* @dev: target device
*
* Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
* For an HDD device, this spins down the disks.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_dev_power_set_standby(struct ata_device *dev)
{
unsigned long ap_flags = dev->link->ap->flags;
struct ata_taskfile tf;
unsigned int err_mask;
/* Issue STANDBY IMMEDIATE command only if supported by the device */
if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
return;
/*
* Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
* causing some drives to spin up and down again. For these, do nothing
* if we are being called on shutdown.
*/
if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
system_state == SYSTEM_POWER_OFF)
return;
if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
system_entering_hibernation())
return;
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf.protocol = ATA_PROT_NODATA;
tf.command = ATA_CMD_STANDBYNOW1;
ata_dev_notice(dev, "Entering standby power mode\n");
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask)
ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
err_mask);
}
/**
* ata_dev_power_set_active - Set a device power mode to active
* @dev: target device
*
* Issue a VERIFY command to enter to ensure that the device is in the
* active power mode. For a spun-down HDD (standby or idle power mode),
* the VERIFY command will complete after the disk spins up.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_dev_power_set_active(struct ata_device *dev)
{
struct ata_taskfile tf;
unsigned int err_mask;
/*
* Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
* if supported by the device.
*/
if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
return;
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf.protocol = ATA_PROT_NODATA;
tf.command = ATA_CMD_VERIFY;
tf.nsect = 1;
if (dev->flags & ATA_DFLAG_LBA) {
tf.flags |= ATA_TFLAG_LBA;
tf.device |= ATA_LBA;
} else {
/* CHS */
tf.lbal = 0x1; /* sect */
}
ata_dev_notice(dev, "Entering active power mode\n");
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask)
ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
err_mask);
}
/**
* ata_read_log_page - read a specific log page
* @dev: target device

View File

@ -106,6 +106,14 @@ static const unsigned int ata_eh_flush_timeouts[] = {
UINT_MAX,
};
static const unsigned int ata_eh_pm_timeouts[] = {
10000, /* most drives spin up by 10sec */
10000, /* > 99% working drives spin up before 20sec */
35000, /* give > 30 secs of idleness for outlier devices */
5000, /* and sweet one last chance */
UINT_MAX, /* > 1 min has elapsed, give up */
};
static const unsigned int ata_eh_other_timeouts[] = {
5000, /* same rationale as identify timeout */
10000, /* ditto */
@ -147,6 +155,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
.timeouts = ata_eh_flush_timeouts },
{ .commands = CMDS(ATA_CMD_VERIFY),
.timeouts = ata_eh_pm_timeouts },
};
#undef CMDS
@ -498,7 +508,19 @@ static void ata_eh_unload(struct ata_port *ap)
struct ata_device *dev;
unsigned long flags;
/* Restore SControl IPM and SPD for the next driver and
/*
* Unless we are restarting, transition all enabled devices to
* standby power mode.
*/
if (system_state != SYSTEM_RESTART) {
ata_for_each_link(link, ap, PMP_FIRST) {
ata_for_each_dev(dev, link, ENABLED)
ata_dev_power_set_standby(dev);
}
}
/*
* Restore SControl IPM and SPD for the next driver and
* disable attached devices.
*/
ata_for_each_link(link, ap, PMP_FIRST) {
@ -687,6 +709,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ehc->saved_xfer_mode[devno] = dev->xfer_mode;
if (ata_ncq_enabled(dev))
ehc->saved_ncq_enabled |= 1 << devno;
/* If we are resuming, wake up the device */
if (ap->pflags & ATA_PFLAG_RESUMING)
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
}
}
@ -750,6 +776,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
/* clean up */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_RESUMING;
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
@ -1241,6 +1269,13 @@ void ata_eh_detach_dev(struct ata_device *dev)
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
/*
* If the device is still enabled, transition it to standby power mode
* (i.e. spin down HDDs).
*/
if (ata_dev_enabled(dev))
ata_dev_power_set_standby(dev);
ata_dev_disable(dev);
spin_lock_irqsave(ap->lock, flags);
@ -2927,6 +2962,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
if (ehc->i.flags & ATA_EHI_DID_RESET)
readid_flags |= ATA_READID_POSTRESET;
/*
* When resuming, before executing any command, make sure to
* transition the device to the active power mode.
*/
if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
ata_dev_power_set_active(dev);
ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
}
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
@ -3886,6 +3930,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
unsigned long flags;
int rc = 0;
struct ata_device *dev;
struct ata_link *link;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
@ -3898,6 +3943,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
/* Set all devices attached to the port in standby mode */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ENABLED)
ata_dev_power_set_standby(dev);
}
/*
* If we have a ZPODD attached, check its zero
* power ready status before the port is frozen.
@ -3980,6 +4031,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
ap->pflags |= ATA_PFLAG_RESUMING;
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */

View File

@ -1262,7 +1262,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
}
if (cdb[4] & 0x1) {
tf->nsect = 1; /* 1 sector, lba=0 */
tf->nsect = 1; /* 1 sector, lba=0 */
if (qc->dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
@ -1278,7 +1278,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
tf->lbah = 0x0; /* cyl high */
}
tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
* or S5) causing some drives to spin up and down again.
@ -1288,7 +1288,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
goto skip;
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
system_entering_hibernation())
system_entering_hibernation())
goto skip;
/* Issue ATA STANDBY IMMEDIATE command */

View File

@ -62,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
unsigned int readid_flags);
extern int ata_dev_configure(struct ata_device *dev);
extern void ata_dev_power_set_standby(struct ata_device *dev);
extern void ata_dev_power_set_active(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,

View File

@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext,
if (*id == component_id)
return 0;
if (ext->type == COUNTER_COMP_ARRAY) {
element = ext->priv;
if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
element = ext[*ext_idx].priv;
if (component_id - *id < element->length)
return 0;

View File

@ -98,7 +98,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
priv->qdec_mode = 0;
/* Set highest rate based on whether soc has gclk or not */
bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
if (priv->tc_cfg->has_gclk)
if (!priv->tc_cfg->has_gclk)
cmr |= ATMEL_TC_TIMER_CLOCK2;
else
cmr |= ATMEL_TC_TIMER_CLOCK1;

View File

@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
++count;
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&tmp->flags)) {
if (ktime_after(tmp->timestamp, timestamp))
timestamp = tmp->timestamp;
} else {
/*
* Use the current time if the fence is
* currently signaling.
*/
timestamp = ktime_get();
ktime_t t = dma_fence_timestamp(tmp);
if (ktime_after(t, timestamp))
timestamp = t;
}
}
}

View File

@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
cpu_relax();
info->timestamp_ns =
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
dma_fence_is_signaled(fence) ?
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
return info->status;
}

View File

@ -495,6 +495,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
union idxd_command_reg cmd;
DECLARE_COMPLETION_ONSTACK(done);
u32 stat;
unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@ -508,7 +509,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
spin_lock(&idxd->cmd_lock);
spin_lock_irqsave(&idxd->cmd_lock, flags);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
idxd->cmd_lock);
@ -525,7 +526,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
spin_unlock(&idxd->cmd_lock);
spin_unlock_irqrestore(&idxd->cmd_lock, flags);
wait_for_completion(&done);
stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
spin_lock(&idxd->cmd_lock);

View File

@ -451,9 +451,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
synchronize_irq(c->irq);
spin_unlock_irqrestore(&c->vc.lock, flags);
synchronize_irq(c->irq);
return 0;
}

View File

@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
/* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
if (chan->trig_mdma && sg_len > 1)
if (chan->trig_mdma && sg_len > 1) {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
}
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue = stm32_dma_get_remaining_bytes(chan);
if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
residue = sg_req->len;
if (!chan->trig_mdma)
residue = sg_req->len;
}
/*
@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
* residue = remaining bytes from NDTR + remaining
* periods/sg to be transferred
*/
if (!chan->desc->cyclic || n_sg != 0)
if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
for (i = n_sg; i < desc->num_sgs; i++)
residue += desc->sg_req[i].len;

View File

@ -778,8 +778,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
/* Enable interrupts */
ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
if (sg_len > 1)
ccr |= STM32_MDMA_CCR_BTIE;
desc->ccr = ccr;
return 0;
@ -1237,6 +1235,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
unsigned long flags;
u32 status, reg;
/* Transfer can be terminated */
if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
return -EPERM;
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
spin_lock_irqsave(&chan->vchan.lock, flags);
@ -1317,21 +1319,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
struct stm32_mdma_desc *desc,
u32 curr_hwdesc)
u32 curr_hwdesc,
struct dma_tx_state *state)
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct stm32_mdma_hwdesc *hwdesc;
u32 cbndtr, residue, modulo, burst_size;
u32 cisr, clar, cbndtr, residue, modulo, burst_size;
int i;
cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
residue = 0;
for (i = curr_hwdesc + 1; i < desc->count; i++) {
/* Get the next hw descriptor to process from current transfer */
clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
for (i = desc->count - 1; i >= 0; i--) {
hwdesc = desc->node[i].hwdesc;
if (hwdesc->clar == clar)
break;/* Current transfer found, stop cumulating */
/* Cumulate residue of unprocessed hw descriptors */
residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
}
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
state->in_flight_bytes = 0;
if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
if (!chan->mem_burst)
return residue;
@ -1361,11 +1377,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
vdesc = vchan_find_desc(&chan->vchan, cookie);
if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
residue = stm32_mdma_desc_residue(chan, chan->desc,
chan->curr_hwdesc);
residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
else if (vdesc)
residue = stm32_mdma_desc_residue(chan,
to_stm32_mdma_desc(vdesc), 0);
residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);

View File

@ -220,7 +220,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);

View File

@ -1183,6 +1183,9 @@ static void disable_vbios_mode_if_required(
if (stream == NULL)
continue;
if (stream->apply_seamless_boot_optimization)
continue;
// only looking for first odm pipe
if (pipe->prev_odm_pipe)
continue;

View File

@ -290,7 +290,8 @@ static int
update_connector_routing(struct drm_atomic_state *state,
struct drm_connector *connector,
struct drm_connector_state *old_connector_state,
struct drm_connector_state *new_connector_state)
struct drm_connector_state *new_connector_state,
bool added_by_user)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
* there's a chance the connector may have been destroyed during the
* process, but it's better to ignore that then cause
* drm_atomic_helper_resume() to fail.
*
* Last, we want to ignore connector registration when the connector
* was not pulled in the atomic state by user-space (ie, was pulled
* in by the driver, e.g. when updating a DP-MST stream).
*/
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
crtc_state->active) {
added_by_user && crtc_state->active) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] is not registered\n",
connector->base.id, connector->name);
@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
unsigned int connectors_mask = 0;
unsigned int connectors_mask = 0, user_connectors_mask = 0;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
user_connectors_mask |= BIT(i);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool has_connectors =
@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
*/
ret = update_connector_routing(state, connector,
old_connector_state,
new_connector_state);
new_connector_state,
BIT(i) & user_connectors_mask);
if (ret)
return ret;
if (old_connector_state->crtc) {

View File

@ -235,8 +235,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
u32 flags = 0;
u32 *cs;
/*
* L3 fabric flush is needed for AUX CCS invalidation
* which happens as part of pipe-control so we can
* ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
* deals with Protected Memory which is not needed for
* AUX CCS invalidation and lead to unwanted side effects.
*/
if (mode & EMIT_FLUSH)
flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */

View File

@ -142,6 +142,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
const struct dpu_format *fmt = NULL;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
int src_width, src_height, dst_height, fps;
u64 plane_pixel_rate, plane_bit_rate;
u64 plane_prefill_bw;
u64 plane_bw;
u32 hw_latency_lines;
@ -164,13 +165,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
plane_bw =
src_width * mode->vtotal * fps * fmt->bpp *
scale_factor;
plane_pixel_rate = src_width * mode->vtotal * fps;
plane_bit_rate = plane_pixel_rate * fmt->bpp;
plane_prefill_bw =
src_width * hw_latency_lines * fps * fmt->bpp *
scale_factor * mode->vtotal;
plane_bw = plane_bit_rate * scale_factor;
plane_prefill_bw = plane_bw * hw_latency_lines;
if ((vbp+vpw) > hw_latency_lines)
do_div(plane_prefill_bw, (vbp+vpw));

View File

@ -1711,13 +1711,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
return rc;
while (--link_train_max_retries) {
rc = dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
rc);
break;
}
training_step = DP_TRAINING_NONE;
rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
@ -1769,6 +1762,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
/* stop link training before start re training */
dp_ctrl_clear_training_pattern(ctrl);
}
rc = dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
break;
}
}
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)

View File

@ -1068,7 +1068,7 @@ int dp_link_process_request(struct dp_link *dp_link)
}
}
drm_dbg_dp(link->drm_dev, "sink request=%#x",
drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
dp_link->sink_request);
return ret;
}

View File

@ -1098,9 +1098,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
{
u32 data;
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
data = dsi_read(msm_host, REG_DSI_STATUS0);
/* if video mode engine is not busy, its because
* either timing engine was not turned on or the
* DSI controller has finished transmitting the video
* data already, so no need to wait in those cases
*/
if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
return;
if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
@ -1960,10 +1972,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
return ret;
if (!msm_host->irq) {
dev_err(&pdev->dev, "failed to get irq\n");
return -EINVAL;
}
/* do not autoenable, will be enabled later */

View File

@ -841,7 +841,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
dma_fence_timestamp(&job->s_fence->finished);
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}

View File

@ -1614,7 +1614,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
((unsigned long) header + header->size + sizeof(*header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;

View File

@ -4275,7 +4275,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto hid_hw_init_fail;
}
hidpp_connect_event(hidpp);
schedule_work(&hidpp->work);
flush_work(&hidpp->work);
if (will_restart) {
/* Reset the HID node state */

View File

@ -38,8 +38,8 @@
#define IMX8QXP_ADR_ADC_FCTRL 0x30
#define IMX8QXP_ADR_ADC_SWTRIG 0x34
#define IMX8QXP_ADR_ADC_TCTRL(tid) (0xc0 + (tid) * 4)
#define IMX8QXP_ADR_ADC_CMDH(cid) (0x100 + (cid) * 8)
#define IMX8QXP_ADR_ADC_CMDL(cid) (0x104 + (cid) * 8)
#define IMX8QXP_ADR_ADC_CMDL(cid) (0x100 + (cid) * 8)
#define IMX8QXP_ADR_ADC_CMDH(cid) (0x104 + (cid) * 8)
#define IMX8QXP_ADR_ADC_RESFIFO 0x300
#define IMX8QXP_ADR_ADC_TST 0xffc

View File

@ -10,6 +10,8 @@ config AD74413R
depends on GPIOLIB && SPI
select REGMAP_SPI
select CRC8
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices AD74412R/AD74413R
quad-channel software configurable input/output solution.

View File

@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select {
};
enum ad3542r_id {
AD3542R_ID = 0x4008,
AD3552R_ID = 0x4009,
AD3542R_ID = 0x4009,
AD3552R_ID = 0x4008,
};
enum ad3552r_ch_output_range {

View File

@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
if (vcm < 0)
return vcm;
if (vcm < 1800000)
if (vcm <= 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
else if (vcm > 1800000 && vcm < 2600000)
else if (vcm > 1800000 && vcm <= 2600000)
mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
else
return -EINVAL;

View File

@ -2,6 +2,8 @@
config BOSCH_BNO055
tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
config BOSCH_BNO055_SERIAL
tristate "Bosch BNO055 attached via UART"

View File

@ -1786,7 +1786,7 @@ int bmp280_common_probe(struct device *dev,
* however as it happens, the BMP085 shares the chip ID of BMP180
* so we look for an IRQ if we have that.
*/
if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
if (irq > 0 && (chip_id == BMP180_CHIP_ID)) {
ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
if (ret)
return ret;

View File

@ -57,8 +57,8 @@
#define DPS310_RESET_MAGIC 0x09
#define DPS310_COEF_BASE 0x10
/* Make sure sleep time is <= 20ms for usleep_range */
#define DPS310_POLL_SLEEP_US(t) min(20000, (t) / 8)
/* Make sure sleep time is <= 30ms for usleep_range */
#define DPS310_POLL_SLEEP_US(t) min(30000, (t) / 8)
/* Silently handle error in rate value here */
#define DPS310_POLL_TIMEOUT_US(rc) ((rc) <= 0 ? 1000000 : 1000000 / (rc))
@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data)
if (rc)
return rc;
/* Wait for device chip access: 2.5ms in specification */
usleep_range(2500, 12000);
/* Wait for device chip access: 15ms in specification */
usleep_range(15000, 55000);
return 0;
}

View File

@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len)
crc = (crc >> 12) & 0x000F;
return crc_orig != 0x0000 && crc == crc_orig;
return crc == crc_orig;
}
static int ms5611_read_prom(struct iio_dev *indio_dev)

View File

@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));

View File

@ -272,6 +272,7 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@ -474,6 +475,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */
XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */

View File

@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf)
pm->requires_update = 0;
usb_kill_urb(pm->irq);
input_unregister_device(pm->input);
usb_kill_urb(pm->config);
usb_free_urb(pm->irq);
usb_free_urb(pm->config);
powermate_free_buffers(interface_to_usbdev(intf), pm);

View File

@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
psmouse->protocol_handler = elantech_process_byte;
psmouse->disconnect = elantech_disconnect;
psmouse->reconnect = elantech_reconnect;
psmouse->fast_reconnect = NULL;
psmouse->pktsize = info->hw_version > 1 ? 6 : 4;
return 0;

View File

@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse,
psmouse->set_rate = synaptics_set_rate;
psmouse->disconnect = synaptics_disconnect;
psmouse->reconnect = synaptics_reconnect;
psmouse->fast_reconnect = NULL;
psmouse->cleanup = synaptics_reset;
/* Synaptics can usually stay in sync without extra help */
psmouse->resync_time = 0;

View File

@ -618,6 +618,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Fujitsu Lifebook E5411 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"),
},
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
/* Gigabyte M912 */
.matches = {

View File

@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_last_gpios;
} else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) {
/*
* On newer devices there is only 1 GpioInt resource and _PS0
* does the whole reset sequence for us.
*/
acpi_device_fix_up_power(ACPI_COMPANION(dev));
/*
* Before the _PS0 call the int GPIO may have been in output
* mode and the call should have put the int GPIO in input mode,
* but the GPIO subsys cached state may still think it is
* in output mode, causing gpiochip_lock_as_irq() failure.
*
* Add a mapping for the int GPIO to make the
* gpiod_int = gpiod_get(..., GPIOD_IN) call succeed,
* which will explicitly set the direction to input.
*/
ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
gpio_mapping = acpi_goodix_int_first_gpios;
} else {
dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
ts->gpio_count, ts->gpio_int_idx);

View File

@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
reg &= ~(TSSEL_MASK << tssr_offset);
reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}

View File

@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
struct mcb_device *mdev = to_mcb_device(dev);
int retval;
if (mdev->is_added)
return 0;
retval = device_attach(dev);
if (retval < 0)
if (retval < 0) {
dev_err(dev, "Error adding device (%d)\n", retval);
mdev->is_added = true;
return retval;
}
return 0;
}

View File

@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
mdev->mem.end = mdev->mem.start + size - 1;
mdev->mem.flags = IORESOURCE_MEM;
mdev->is_added = false;
ret = mcb_device_register(bus, mdev);
if (ret < 0)
goto err;

View File

@ -174,7 +174,7 @@ config CAN_SLCAN
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
help
Say Y here if you want to use CAN controller found on Allwinner
A10/A20/D1 SoCs.

View File

@ -544,6 +544,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
goto err_read_skb;
}
/* It seems that accessing the switch's internal PHYs via management
* packets still uses the MDIO bus within the switch internally, and
* these accesses can conflict with external MDIO accesses to other
* devices on the MDIO bus.
* We therefore need to lock the MDIO bus onto which the switch is
* connected.
*/
mutex_lock(&priv->bus->mdio_lock);
/* Actually start the request:
* 1. Send mdio master packet
* 2. Busy Wait for mdio master command
@ -556,6 +565,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
mgmt_master = priv->mgmt_master;
if (!mgmt_master) {
mutex_unlock(&mgmt_eth_data->mutex);
mutex_unlock(&priv->bus->mdio_lock);
ret = -EINVAL;
goto err_mgmt_master;
}
@ -643,6 +653,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
mutex_unlock(&priv->bus->mdio_lock);
return ret;

View File

@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
struct vf_macvlans *mv_list;
int num_vf_macvlans, i;
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
if (!num_vf_macvlans)
@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
mv_list[i].vf = -1;
mv_list[i].free = true;

View File

@ -611,7 +611,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
if (ctx->sa.update_pn) {
netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
assoc_num);
err = -EINVAL;
@ -1016,7 +1016,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out;
}
if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
if (ctx->sa.update_pn) {
netdev_err(ctx->netdev,
"MACsec offload update RX sa %d PN isn't supported\n",
assoc_num);

View File

@ -3862,13 +3862,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
struct mlx5e_channels *chs = &priv->channels;
struct mlx5e_params new_params;
int err;
bool rx_ts_over_crc = !enable;
mutex_lock(&priv->state_lock);
new_params = chs->params;
new_params.scatter_fcs_en = enable;
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
&new_params.scatter_fcs_en, true);
&rx_ts_over_crc, true);
mutex_unlock(&priv->state_lock);
return err;
}

View File

@ -308,8 +308,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
bool learning_en)
static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
bool learning_en)
{
char tnpc_pl[MLXSW_REG_TNPC_LEN];

View File

@ -1003,17 +1003,21 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
case CQE_TX_VPORT_DISABLED:
case CQE_TX_VLAN_TAGGING_VIOLATION:
WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
cqe_oob->cqe_hdr.cqe_type);
if (net_ratelimit())
netdev_err(ndev, "TX: CQE error %d\n",
cqe_oob->cqe_hdr.cqe_type);
break;
default:
/* If the CQE type is unexpected, log an error, assert,
* and go through the error path.
/* If the CQE type is unknown, log an error,
* and still free the SKB, update tail, etc.
*/
WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
cqe_oob->cqe_hdr.cqe_type);
return;
if (net_ratelimit())
netdev_err(ndev, "TX: unknown CQE type %d\n",
cqe_oob->cqe_hdr.cqe_type);
break;
}
if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))

View File

@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_flower_cmsg_merge_hint *msg;
struct nfp_fl_payload *sub_flows[2];
struct nfp_flower_priv *priv;
int err, i, flow_cnt;
msg = nfp_flower_cmsg_get_data(skb);
@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
return;
}
rtnl_lock();
priv = app->priv;
mutex_lock(&priv->nfp_fl_lock);
for (i = 0; i < flow_cnt; i++) {
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
if (!sub_flows[i]) {
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
goto err_rtnl_unlock;
goto err_mutex_unlock;
}
}
@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
if (err == -ENOMEM)
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
err_rtnl_unlock:
rtnl_unlock();
err_mutex_unlock:
mutex_unlock(&priv->nfp_fl_lock);
}
static void

View File

@ -1971,8 +1971,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
struct nfp_fl_ct_flow_entry *ct_entry;
struct netlink_ext_ack *extack = NULL;
ASSERT_RTNL();
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
@ -2015,9 +2013,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
switch (type) {
case TC_SETUP_CLSFLOWER:
rtnl_lock();
while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
if (!zt->nft) /* avoid deadlock */
return err;
msleep(20);
}
err = nfp_fl_ct_offload_nft_flow(zt, flow);
rtnl_unlock();
mutex_unlock(&zt->priv->nfp_fl_lock);
break;
default:
return -EOPNOTSUPP;
@ -2045,6 +2047,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
struct rhashtable *m_table;
struct nf_flowtable *nft;
if (!ct_map_ent)
return -ENOENT;
@ -2061,8 +2064,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_fl_ct_clean_flow_entry(ct_entry);
kfree(ct_map_ent);
if (!zt->pre_ct_count) {
zt->nft = NULL;
if (!zt->pre_ct_count && zt->nft) {
nft = zt->nft;
zt->nft = NULL; /* avoid deadlock */
nf_flow_table_offload_del_cb(nft,
nfp_fl_ct_handle_nft_flow,
zt);
nfp_fl_ct_clean_nft_entries(zt);
}
break;

View File

@ -281,6 +281,7 @@ struct nfp_fl_internal_ports {
* @predt_list: List to keep track of decap pretun flows
* @neigh_table: Table to keep track of neighbor entries
* @predt_lock: Lock to serialise predt/neigh table updates
* @nfp_fl_lock: Lock to protect the flow offload operation
*/
struct nfp_flower_priv {
struct nfp_app *app;
@ -323,6 +324,7 @@ struct nfp_flower_priv {
struct list_head predt_list;
struct rhashtable neigh_table;
spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
struct mutex nfp_fl_lock; /* Protect the flow operation */
};
/**

View File

@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
mutex_init(&priv->nfp_fl_lock);
err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
if (err)
goto err_free_merge_table;

View File

@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
u64 parent_ctx = 0;
int err;
ASSERT_RTNL();
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@ -1727,19 +1725,30 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flower)
{
struct nfp_flower_priv *priv = app->priv;
int ret;
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
mutex_lock(&priv->nfp_fl_lock);
switch (flower->command) {
case FLOW_CLS_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
ret = nfp_flower_add_offload(app, netdev, flower);
break;
case FLOW_CLS_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
ret = nfp_flower_del_offload(app, netdev, flower);
break;
case FLOW_CLS_STATS:
return nfp_flower_get_stats(app, netdev, flower);
ret = nfp_flower_get_stats(app, netdev, flower);
break;
default:
return -EOPNOTSUPP;
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&priv->nfp_fl_lock);
return ret;
}
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
repr_priv = repr->app_priv;
repr_priv->block_shared = f->block_shared;
f->driver_block_list = &nfp_block_cb_list;
f->unlocked_driver_cb = true;
switch (f->command) {
case FLOW_BLOCK_BIND:
@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
f->unlocked_driver_cb = true;
switch (f->command) {
case FLOW_BLOCK_BIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);

View File

@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
{
struct netlink_ext_ack *extack = flow->common.extack;
struct nfp_flower_priv *fl_priv = app->priv;
int ret;
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
return -EOPNOTSUPP;
}
mutex_lock(&fl_priv->nfp_fl_lock);
switch (flow->command) {
case TC_CLSMATCHALL_REPLACE:
return nfp_flower_install_rate_limiter(app, netdev, flow,
extack);
ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
break;
case TC_CLSMATCHALL_DESTROY:
return nfp_flower_remove_rate_limiter(app, netdev, flow,
extack);
ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
break;
case TC_CLSMATCHALL_STATS:
return nfp_flower_stats_rate_limiter(app, netdev, flow,
extack);
ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
break;
default:
return -EOPNOTSUPP;
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&fl_priv->nfp_fl_lock);
return ret;
}
/* Offload tc action, currently only for tc police */

View File

@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
struct device_node *np = spi->dev.of_node;
struct ca8210_priv *priv = spi_get_drvdata(spi);
struct ca8210_platform_data *pdata = spi->dev.platform_data;
int ret = 0;
if (!np)
return -EFAULT;
@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
dev_crit(&spi->dev, "Failed to register external clk\n");
return PTR_ERR(priv->clk);
}
ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
if (ret) {
clk_unregister(priv->clk);
dev_crit(
&spi->dev,
"Failed to register external clock as clock provider\n"
);
} else {
dev_info(&spi->dev, "External clock set as clock provider\n");
}
return ret;
return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
}
/**
@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
{
struct ca8210_priv *priv = spi_get_drvdata(spi);
if (!priv->clk)
return
if (IS_ERR_OR_NULL(priv->clk))
return;
of_clk_del_provider(spi->dev.of_node);
clk_unregister(priv->clk);

View File

@ -2384,6 +2384,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.sa.update_pn = !!prev_pn.full64;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
@ -2477,6 +2478,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.sa.update_pn = !!prev_pn.full64;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);

View File

@ -844,6 +844,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
struct macsec_flow *flow;
int ret;
if (ctx->sa.update_pn)
return -EINVAL;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
@ -897,6 +900,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
struct macsec_flow *flow;
int ret;
if (ctx->sa.update_pn)
return -EINVAL;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);

View File

@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
int err;
if (phy_id) {
netdev_dbg(dev->net, "Only internal phy supported\n");
return 0;
}
dm_read_shared_word(dev, 1, loc, &res);
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
return err;
}
netdev_dbg(dev->net,
"dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",

View File

@ -41,8 +41,6 @@
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops;
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;

View File

@ -1822,7 +1822,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
u64 delta;
int i;
for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
if (status & (1U << i)) {
ret = IRQ_HANDLED;
if (WARN_ON(!dtc->counters[i]))

View File

@ -126,6 +126,10 @@ struct lynx_28g_lane {
struct lynx_28g_priv {
void __iomem *base;
struct device *dev;
/* Serialize concurrent access to registers shared between lanes,
* like PCCn
*/
spinlock_t pcc_lock;
struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
@ -396,6 +400,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
if (powered_up)
lynx_28g_power_off(phy);
spin_lock(&priv->pcc_lock);
switch (submode) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
@ -412,6 +418,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
lane->interface = submode;
out:
spin_unlock(&priv->pcc_lock);
/* Power up the lane if necessary */
if (powered_up)
lynx_28g_power_on(phy);
@ -507,11 +515,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
lane = &priv->lane[i];
if (!lane->init)
continue;
mutex_lock(&lane->phy->mutex);
if (!lane->powered_up)
if (!lane->init || !lane->powered_up) {
mutex_unlock(&lane->phy->mutex);
continue;
}
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
@ -520,6 +529,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
} while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
}
mutex_unlock(&lane->phy->mutex);
}
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
msecs_to_jiffies(1000));
@ -592,6 +603,7 @@ static int lynx_28g_probe(struct platform_device *pdev)
dev_set_drvdata(dev, priv);
spin_lock_init(&priv->pcc_lock);
INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
@ -603,6 +615,14 @@ static int lynx_28g_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(provider);
}
static void lynx_28g_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct lynx_28g_priv *priv = dev_get_drvdata(dev);
cancel_delayed_work_sync(&priv->cdr_check);
}
static const struct of_device_id lynx_28g_of_match_table[] = {
{ .compatible = "fsl,lynx-28g" },
{ },
@ -611,6 +631,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
static struct platform_driver lynx_28g_driver = {
.probe = lynx_28g_probe,
.remove_new = lynx_28g_remove,
.driver = {
.name = "lynx-28g",
.of_match_table = lynx_28g_of_match_table,

View File

@ -1007,17 +1007,20 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
static struct pinctrl *find_pinctrl(struct device *dev)
{
struct pinctrl *p;
struct pinctrl *entry, *p = NULL;
mutex_lock(&pinctrl_list_mutex);
list_for_each_entry(p, &pinctrl_list, node)
if (p->dev == dev) {
mutex_unlock(&pinctrl_list_mutex);
return p;
list_for_each_entry(entry, &pinctrl_list, node) {
if (entry->dev == dev) {
p = entry;
kref_get(&p->users);
break;
}
}
mutex_unlock(&pinctrl_list_mutex);
return NULL;
return p;
}
static void pinctrl_free(struct pinctrl *p, bool inlist);
@ -1126,7 +1129,6 @@ struct pinctrl *pinctrl_get(struct device *dev)
p = find_pinctrl(dev);
if (p) {
dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
kref_get(&p->users);
return p;
}

View File

@ -1041,13 +1041,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
if (ret < 0)
return ret;
gpio = &pctrl->gpio_bank[reg];
gpio->pctrl = pctrl;
if (reg >= WPCM450_NUM_BANKS)
return dev_err_probe(dev, -EINVAL,
"GPIO index %d out of range!\n", reg);
gpio = &pctrl->gpio_bank[reg];
gpio->pctrl = pctrl;
bank = &wpcm450_banks[reg];
gpio->bank = bank;

View File

@ -240,6 +240,7 @@ config PINCTRL_RZN1
depends on OF
depends on ARCH_RZN1 || COMPILE_TEST
select GENERIC_PINCONF
select PINMUX
help
This selects pinctrl driver for Renesas RZ/N1 devices.

View File

@ -1399,7 +1399,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = {
.restore = hp_wmi_resume_handler,
};
static struct platform_driver hp_wmi_driver = {
/*
* hp_wmi_bios_remove() lives in .exit.text. For drivers registered via
* module_platform_driver_probe() this is ok because they cannot get unbound at
* runtime. So mark the driver struct with __refdata to prevent modpost
* triggering a section mismatch warning.
*/
static struct platform_driver hp_wmi_driver __refdata = {
.driver = {
.name = "hp-wmi",
.pm = &hp_wmi_pm_ops,

View File

@ -1245,6 +1245,24 @@ static void tlmi_release_attr(void)
kset_unregister(tlmi_priv.authentication_kset);
}
static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name)
{
struct kobject *duplicate;
if (!strcmp(name, "Reserved"))
return -EINVAL;
duplicate = kset_find_obj(attribute_kset, name);
if (duplicate) {
pr_debug("Duplicate attribute name found - %s\n", name);
/* kset_find_obj() returns a reference */
kobject_put(duplicate);
return -EBUSY;
}
return 0;
}
static int tlmi_sysfs_init(void)
{
int i, ret;
@ -1273,10 +1291,8 @@ static int tlmi_sysfs_init(void)
continue;
/* check for duplicate or reserved values */
if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) ||
!strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) {
pr_debug("duplicate or reserved attribute name found - %s\n",
tlmi_priv.setting[i]->display_name);
if (tlmi_validate_setting_name(tlmi_priv.attribute_kset,
tlmi_priv.setting[i]->display_name) < 0) {
kfree(tlmi_priv.setting[i]->possible_values);
kfree(tlmi_priv.setting[i]);
tlmi_priv.setting[i] = NULL;

View File

@ -1625,12 +1625,13 @@ int scsi_rescan_device(struct scsi_device *sdev)
device_lock(dev);
/*
* Bail out if the device is not running. Otherwise, the rescan may
* block waiting for commands to be executed, with us holding the
* device lock. This can result in a potential deadlock in the power
* management core code when system resume is on-going.
* Bail out if the device or its queue are not running. Otherwise,
* the rescan may block waiting for commands to be executed, with us
* holding the device lock. This can result in a potential deadlock
* in the power management core code when system resume is on-going.
*/
if (sdev->sdev_state != SDEV_RUNNING) {
if (sdev->sdev_state != SDEV_RUNNING ||
blk_queue_pm_only(sdev->request_queue)) {
ret = -EWOULDBLOCK;
goto unlock;
}

View File

@ -217,12 +217,12 @@ static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
return rc;
}
/* mutex must be held by caller */
static void destroy_session(struct kref *ref)
{
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
kfree(sess);
@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx,
if (arg->ret != TEEC_SUCCESS) {
pr_err("open_session failed %d\n", arg->ret);
handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
kref_put_mutex(&sess->refcount, destroy_session,
&session_list_mutex);
goto out;
}
@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx,
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
kref_put_mutex(&sess->refcount, destroy_session,
&session_list_mutex);
rc = -ENOMEM;
goto out;
}
@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
return 0;
}

View File

@ -41,6 +41,7 @@
#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
#define ICM_TIMEOUT 5000 /* ms */
#define ICM_RETRIES 3
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
static int icm_request(struct tb *tb, const void *request, size_t request_size,
void *response, size_t response_size, size_t npackets,
unsigned int timeout_msec)
int retries, unsigned int timeout_msec)
{
struct icm *icm = tb_priv(tb);
int retries = 3;
do {
struct tb_cfg_request *req;
@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
return -ENOMEM;
ret = icm_request(tb, &request, sizeof(request), switches,
sizeof(*switches), npackets, ICM_TIMEOUT);
sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
goto err_free;
@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
/* Use larger timeout as establishing tunnels can take some time */
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_APPROVE_TIMEOUT);
1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1022,7 +1022,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, 20000);
1, 10, 2000);
if (ret)
return ret;
@ -1055,7 +1055,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_APPROVE_TIMEOUT);
1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
@ -1083,7 +1083,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1112,7 +1112,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1146,7 +1146,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1172,7 +1172,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1498,7 +1498,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1524,7 +1524,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1545,7 +1545,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1606,7 +1606,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@ -1628,7 +1628,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, 20000);
1, ICM_RETRIES, 20000);
if (ret)
return ret;
@ -2300,7 +2300,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;

View File

@ -2763,6 +2763,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
!tb_port_is_width_supported(down, 2))
return 0;
/*
* Both lanes need to be in CL0. Here we assume lane 0 already be in
* CL0 and check just for lane 1.
*/
if (tb_wait_for_port(down->dual_link_port, false) <= 0)
return -ENOTCONN;
ret = tb_port_lane_bonding_enable(up);
if (ret) {
tb_port_warn(up, "failed to enable lane bonding\n");

View File

@ -704,6 +704,27 @@ static void update_property_block(struct tb_xdomain *xd)
mutex_unlock(&xdomain_lock);
}
static void start_handshake(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_INIT;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
/* Can be called from state_work */
static void __stop_handshake(struct tb_xdomain *xd)
{
cancel_delayed_work_sync(&xd->properties_changed_work);
xd->properties_changed_retries = 0;
xd->state_retries = 0;
}
static void stop_handshake(struct tb_xdomain *xd)
{
cancel_delayed_work_sync(&xd->state_work);
__stop_handshake(xd);
}
static void tb_xdp_handle_request(struct work_struct *work)
{
struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
@ -766,6 +787,15 @@ static void tb_xdp_handle_request(struct work_struct *work)
case UUID_REQUEST:
tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
/*
* If we've stopped the discovery with an error such as
* timing out, we will restart the handshake now that we
* received UUID request from the remote host.
*/
if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
dev_dbg(&xd->dev, "restarting handshake\n");
start_handshake(xd);
}
break;
case LINK_STATE_STATUS_REQUEST:
@ -1522,6 +1552,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void tb_xdomain_failed(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_ERROR;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_state_work(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
@ -1548,7 +1585,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
xd->state = XDOMAIN_STATE_ERROR;
tb_xdomain_failed(xd);
} else {
tb_xdomain_queue_properties_changed(xd);
if (xd->bonding_possible)
@ -1613,7 +1650,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
xd->state = XDOMAIN_STATE_ERROR;
tb_xdomain_failed(xd);
} else {
xd->state = XDOMAIN_STATE_ENUMERATED;
}
@ -1624,6 +1661,8 @@ static void tb_xdomain_state_work(struct work_struct *work)
break;
case XDOMAIN_STATE_ERROR:
dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
__stop_handshake(xd);
break;
default:
@ -1793,21 +1832,6 @@ static void tb_xdomain_release(struct device *dev)
kfree(xd);
}
static void start_handshake(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_INIT;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void stop_handshake(struct tb_xdomain *xd)
{
cancel_delayed_work_sync(&xd->properties_changed_work);
cancel_delayed_work_sync(&xd->state_work);
xd->properties_changed_retries = 0;
xd->state_retries = 0;
}
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
{
stop_handshake(tb_to_xdomain(dev));

View File

@ -7041,7 +7041,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
mask, 0, 1000, 1000);
dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
tag, err ? "succeeded" : "failed");
tag, err < 0 ? "failed" : "succeeded");
out:
return err;

View File

@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
unsigned long flags;
int ret;
if (request->status != -EINPROGRESS)
return 0;
if (!pep->endpoint.desc) {
dev_err(pdev->dev,
"%s: can't dequeue to disabled endpoint\n",

View File

@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active);
#else /* CONFIG_PM_SLEEP */
static inline int cdns_resume(struct cdns *cdns)
{ return 0; }
static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
{ return 0; }
static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { }
static inline int cdns_suspend(struct cdns *cdns)
{ return 0; }
#endif /* CONFIG_PM_SLEEP */

View File

@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev)
if (udev->quirks & USB_QUIRK_NO_LPM)
return 0;
/* Skip if the device BOS descriptor couldn't be read */
if (!udev->bos)
return 0;
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
/* Skip if the device BOS descriptor couldn't be read */
if (!udev->bos)
return;
hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
@ -2720,13 +2728,17 @@ int usb_authorize_device(struct usb_device *usb_dev)
static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev,
u32 ext_portstatus)
{
struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
u32 attr;
u8 speed_id;
u8 ssac;
u8 lanes;
int i;
if (!hdev->bos)
goto out;
ssp_cap = hdev->bos->ssp_cap;
if (!ssp_cap)
goto out;
@ -4244,8 +4256,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
int timeout;
__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
__u8 u1_mel;
__le16 u2_mel;
/* Skip if the device BOS descriptor couldn't be read */
if (!udev->bos)
return;
u1_mel = udev->bos->ss_cap->bU1devExitLat;
u2_mel = udev->bos->ss_cap->bU2DevExitLat;
/* If the device says it doesn't have *any* exit latency to come out of
* U1 or U2, it's probably lying. Assume it doesn't implement that link

View File

@ -153,7 +153,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
hdev->bos->ssp_cap);
hdev->bos && hdev->bos->ssp_cap);
}
static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)

View File

@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode or current role is host, then we can return early.
*/
if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
/*
* If the dr_mode is host and the dwc->current_dr_role is not the
* corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
* isn't executed yet. Ensure the phy is ready before the controller
* updates the GCTL.PRTCAPDIR or other settings by soft-resetting
* the phy.
*
* Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
* is port index. If this is a multiport host, then we need to reset
* all active ports.
*/
if (dwc->dr_mode == USB_DR_MODE_HOST) {
u32 usb3_port;
u32 usb2_port;
usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
/* Small delay for phy reset assertion */
usleep_range(1000, 2000);
usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
/* Wait for clock synchronization */
msleep(50);
return 0;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
reg &= ~DWC3_DCTL_RUN_STOP;

View File

@ -1171,7 +1171,8 @@ static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff_head *list)
{
struct f_ncm *ncm = func_to_ncm(&port->func);
__le16 *tmp = (void *) skb->data;
unsigned char *ntb_ptr = skb->data;
__le16 *tmp;
unsigned index, index2;
int ndp_index;
unsigned dg_len, dg_len2;
@ -1184,6 +1185,10 @@ static int ncm_unwrap_ntb(struct gether *port,
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
int to_process = skb->len;
parse_ntb:
tmp = (__le16 *)ntb_ptr;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
@ -1230,7 +1235,7 @@ static int ncm_unwrap_ntb(struct gether *port,
* walk through NDP
* dwSignature
*/
tmp = (void *)(skb->data + ndp_index);
tmp = (__le16 *)(ntb_ptr + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
goto err;
@ -1287,11 +1292,11 @@ static int ncm_unwrap_ntb(struct gether *port,
if (ncm->is_crc) {
uint32_t crc, crc2;
crc = get_unaligned_le32(skb->data +
crc = get_unaligned_le32(ntb_ptr +
index + dg_len -
crc_len);
crc2 = ~crc32_le(~0,
skb->data + index,
ntb_ptr + index,
dg_len - crc_len);
if (crc != crc2) {
INFO(port->func.config->cdev,
@ -1318,7 +1323,7 @@ static int ncm_unwrap_ntb(struct gether *port,
dg_len - crc_len);
if (skb2 == NULL)
goto err;
skb_put_data(skb2, skb->data + index,
skb_put_data(skb2, ntb_ptr + index,
dg_len - crc_len);
skb_queue_tail(list, skb2);
@ -1331,10 +1336,17 @@ static int ncm_unwrap_ntb(struct gether *port,
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
dev_consume_skb_any(skb);
VDBG(port->func.config->cdev,
"Parsed NTB with %d frames\n", dgram_counter);
to_process -= block_len;
if (to_process != 0) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb;
}
dev_consume_skb_any(skb);
return 0;
err:
skb_queue_purge(list);

View File

@ -499,11 +499,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase);
if (ep->is_in) {
memcpy(eprambase, bufferptr, bytestosend);
memcpy_toio((void __iomem *)eprambase, bufferptr,
bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
} else {
memcpy(bufferptr, eprambase, bytestosend);
memcpy_toio((void __iomem *)bufferptr, eprambase,
bytestosend);
}
/*
* Enable the buffer for transmission.
@ -517,11 +519,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
eprambase = (u32 __force *)(udc->addr + ep->rambase +
ep->ep_usb.maxpacket);
if (ep->is_in) {
memcpy(eprambase, bufferptr, bytestosend);
memcpy_toio((void __iomem *)eprambase, bufferptr,
bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
} else {
memcpy(bufferptr, eprambase, bytestosend);
memcpy_toio((void __iomem *)bufferptr, eprambase,
bytestosend);
}
/*
* Enable the buffer for transmission.
@ -1023,7 +1027,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
udc->addr);
length = req->usb_req.actual = min_t(u32, length,
EP0_MAX_PACKET);
memcpy(corebuf, req->usb_req.buf, length);
memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
} else {
@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc)
/* Load up the chapter 9 command buffer.*/
ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
memcpy(&setup, ep0rambase, 8);
memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
udc->setup = setup;
udc->setup.wValue = cpu_to_le16(setup.wValue);
@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc)
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
memcpy(buffer, ep0rambase, bytes_to_rx);
memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
if (req->usb_req.length == req->usb_req.actual) {
/* Data transfer completed get ready for Status stage */
@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + length;
memcpy(ep0rambase, buffer, length);
memcpy_toio((void __iomem *)ep0rambase, buffer, length);
}
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);

View File

@ -766,7 +766,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
struct xhci_ring *ring, struct xhci_td *td)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
size_t len;
@ -3457,7 +3457,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
u32 *trb_buff_len, struct xhci_segment *seg)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;

View File

@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = {
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
{ "VControl", 0x68, 32 },
{ "HWVers", 0x69, 16 },
{ "HWVers", MUSB_HWVERS, 16 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },

View File

@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
musb_giveback(musb, urb, status);
qh->is_ready = ready;
/*
* musb->lock had been unlocked in musb_giveback, so qh may
* be freed, need to get it again
*/
qh = musb_ep_get_qh(hw_ep, is_in);
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
if (list_empty(&qh->hep->urb_list)) {
if (qh && list_empty(&qh->hep->urb_list)) {
struct list_head *head;
struct dma_controller *dma = musb->dma_controller;
@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* and its URB list has emptied, recycle this qh.
*/
if (ready && list_empty(&qh->hep->urb_list)) {
musb_ep_set_qh(qh->hw_ep, is_in, NULL);
qh->hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);

View File

@ -303,6 +303,7 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
dp->data.status = 0;
dp->data.conf = 0;
if (dp->hpd) {
drm_connector_oob_hotplug_event(dp->connector_fwnode);
dp->hpd = false;
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}

Some files were not shown because too many files have changed in this diff Show More