Merge 5.10.228 into android12-5.10-lts

Changes in 5.10.228
	ALSA: hda/conexant - Fix audio routing for HP EliteOne 1000 G2
	net: enetc: add missing static descriptor and inline keyword
	posix-clock: Fix missing timespec64 check in pc_clock_settime()
	arm64: probes: Remove broken LDR (literal) uprobe support
	arm64: probes: Fix simulate_ldr*_literal()
	net: macb: Avoid 20s boot delay by skipping MDIO bus registration for fixed-link PHY
	irqchip/gic-v3-its: Fix VSYNC referencing an unmapped VPE on GIC v4.1
	fat: fix uninitialized variable
	mm/swapfile: skip HugeTLB pages for unuse_vma
	wifi: mac80211: fix potential key use-after-free
	KVM: Fix a data race on last_boosted_vcpu in kvm_vcpu_on_spin()
	io_uring/sqpoll: do not allow pinning outside of cpuset
	io_uring/sqpoll: retain test for whether the CPU is valid
	io_uring/sqpoll: do not put cpumask on stack
	s390/sclp_vt220: Convert newlines to CRLF instead of LFCR
	KVM: s390: Change virtual to physical address access in diag 0x258 handler
	x86/cpufeatures: Define X86_FEATURE_AMD_IBPB_RET
	x86/cpufeatures: Add a IBPB_NO_RET BUG flag
	x86/entry: Have entry_ibpb() invalidate return predictions
	x86/bugs: Skip RSB fill at VMEXIT
	x86/bugs: Do not use UNTRAIN_RET with IBPB on entry
	blk-rq-qos: fix crash on rq_qos_wait vs. rq_qos_wake_function race
	io_uring/sqpoll: close race on waiting for sqring entries
	drm/radeon: Fix encoder->possible_clones
	drm/vmwgfx: Handle surface check failure correctly
	iio: dac: ad5770r: add missing select REGMAP_SPI in Kconfig
	iio: dac: ltc1660: add missing select REGMAP_SPI in Kconfig
	iio: dac: stm32-dac-core: add missing select REGMAP_MMIO in Kconfig
	iio: adc: ti-ads8688: add missing select IIO_(TRIGGERED_)BUFFER in Kconfig
	iio: hid-sensors: Fix an error handling path in _hid_sensor_set_report_latency()
	iio: light: veml6030: fix ALS sensor resolution
	iio: light: veml6030: fix IIO device retrieval from embedded device
	iio: light: opt3001: add missing full-scale range value
	iio: proximity: mb1232: add missing select IIO_(TRIGGERED_)BUFFER in Kconfig
	iio: adc: ti-ads124s08: add missing select IIO_(TRIGGERED_)BUFFER in Kconfig
	Bluetooth: Remove debugfs directory on module init failure
	Bluetooth: btusb: Fix regression with fake CSR controllers 0a12:0001
	xhci: Fix incorrect stream context type macro
	USB: serial: option: add support for Quectel EG916Q-GL
	USB: serial: option: add Telit FN920C04 MBIM compositions
	parport: Proper fix for array out-of-bounds access
	x86/resctrl: Annotate get_mem_config() functions as __init
	x86/apic: Always explicitly disarm TSC-deadline timer
	x86/entry_32: Do not clobber user EFLAGS.ZF
	x86/entry_32: Clear CPU buffers after register restore in NMI return
	irqchip/gic-v4: Don't allow a VMOVP on a dying VPE
	mptcp: track and update contiguous data status
	mptcp: handle consistently DSS corruption
	tcp: fix mptcp DSS corruption due to large pmtu xmit
	nilfs2: propagate directory read errors from nilfs_find_entry()
	powerpc/mm: Always update max/min_low_pfn in mem_topology_setup()
	ALSA: hda/conexant - Use cached pin control for Node 0x1d on HP EliteOne 1000 G2
	Linux 5.10.228

Change-Id: I46a08618e1091915449af89690af27a230a28855
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-09 12:08:08 +00:00
commit 012423e6bd
48 changed files with 307 additions and 112 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 227
SUBLEVEL = 228
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) {
api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
} else {
/*
* Instruction cannot be stepped out-of-line and we don't
@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
probe_opcode_t insn = le32_to_cpu(*addr);
probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0;
struct arch_probe_insn *api = &asi->api;
if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
decoded = INSN_GOOD_NO_SLOT;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
decoded = INSN_GOOD_NO_SLOT;
} else {
decoded = arm_probe_decode_insn(insn, &asi->api);
}
/*
* If there's a symbol defined in front of and near enough to
@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
}
decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end))

View File

@ -170,17 +170,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
{
u64 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (u64 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
if (opcode & (1 << 30)) /* x0-x30 */
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
else /* w0-w30 */
set_w_reg(regs, xn, *load_addr);
set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}
@ -188,14 +186,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
{
s32 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (s32 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}

View File

@ -1177,6 +1177,9 @@ void __init mem_topology_setup(void)
{
int cpu;
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
min_low_pfn = MEMORY_START >> PAGE_SHIFT;
/*
* Linux/mm assumes node 0 to be online at boot. However this is not
* true on PowerPC, where node 0 is similar to any other node, it
@ -1221,9 +1224,6 @@ void __init initmem_init(void)
{
int nid;
max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
max_pfn = max_low_pfn;
memblock_dump_all();
for_each_online_node(nid) {

View File

@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
vcpu->stat.diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)

View File

@ -9,6 +9,8 @@
#include <asm/unwind_hints.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/cpufeatures.h>
#include <asm/nospec-branch.h>
.pushsection .noinstr.text, "ax"
@ -17,6 +19,9 @@ SYM_FUNC_START(entry_ibpb)
movl $PRED_CMD_IBPB, %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
SYM_FUNC_END(entry_ibpb)
/* For KVM */

View File

@ -939,6 +939,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
/* Now ready to switch the cr3 */
SWITCH_TO_USER_CR3 scratch_reg=%eax
/* Clobbers ZF */
CLEAR_CPU_BUFFERS
/*
* Restore all flags except IF. (We restore IF separately because
@ -949,7 +951,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1
popfl
popl %eax
CLEAR_CPU_BUFFERS
/*
* Return back to the vDSO, which will pop ecx and edx.
@ -1221,7 +1222,6 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */
call exc_nmi
CLEAR_CPU_BUFFERS
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
@ -1242,6 +1242,7 @@ SYM_CODE_START(asm_exc_nmi)
CHECK_AND_APPLY_ESPFIX
RESTORE_ALL_NMI cr3_reg=%edi pop=4
CLEAR_CPU_BUFFERS
jmp .Lirq_return
#ifdef CONFIG_X86_ESPFIX32
@ -1283,6 +1284,7 @@ SYM_CODE_START(asm_exc_nmi)
* 1 - orig_ax
*/
lss (1+5+6)*4(%esp), %esp # back to espfix stack
CLEAR_CPU_BUFFERS
jmp .Lirq_return
#endif
SYM_CODE_END(asm_exc_nmi)

View File

@ -217,7 +217,7 @@
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
@ -324,6 +324,7 @@
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* "" IBPB clears return address predictor */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@ -455,4 +456,6 @@
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -491,7 +491,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0);
/*
* Setting APIC_LVT_MASKED (above) should be enough to tell
* the hardware that this timer will never fire. But AMD
* erratum 411 and some Intel CPU behavior circa 2024 say
* otherwise. Time for belt and suspenders programming: mask
* the timer _and_ zero the counter registers:
*/
if (v & APIC_LVT_TIMER_TSCDEADLINE)
wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
else
apic_write(APIC_TMICT, 0);
return 0;
}

View File

@ -1061,7 +1061,24 @@ static void __init retbleed_select_mitigation(void)
case RETBLEED_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like SRSO has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
mitigate_smt = true;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break;
default:
@ -2453,6 +2470,14 @@ static void __init srso_select_mitigation(void)
if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
srso_mitigation = SRSO_MITIGATION_IBPB;
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like Retbleed has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
}
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
@ -2465,6 +2490,13 @@ static void __init srso_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
}
} else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");

View File

@ -1335,6 +1335,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (vulnerable_to_rfds(ia32_cap))
setup_force_cpu_bug(X86_BUG_RFDS);
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@ -251,7 +251,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
return false;
}
static bool __get_mem_config_intel(struct rdt_resource *r)
static __init bool __get_mem_config_intel(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
@ -285,7 +285,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
return true;
}
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;

View File

@ -225,8 +225,8 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
data->got_token = true;
smp_wmb();
list_del_init(&curr->entry);
wake_up_process(data->task);
list_del_init_careful(&curr->entry);
return 1;
}

View File

@ -928,10 +928,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
/* Fake CSR devices don't seem to support sort-transter */
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
else
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {

View File

@ -47,7 +47,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
uint32_t index_mask = 0;
uint32_t index_mask = drm_encoder_mask(encoder);
int count;
/* DIG routing gets problematic */

View File

@ -1402,6 +1402,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
DRM_ERROR("Surface size cannot exceed %dx%d",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
ret = -EINVAL;
goto err_out;
}

View File

@ -1126,6 +1126,8 @@ config TI_ADS8344
config TI_ADS8688
tristate "Texas Instruments ADS8688"
depends on SPI && OF
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADS8684 and
and ADS8688 ADC chips
@ -1136,6 +1138,8 @@ config TI_ADS8688
config TI_ADS124S08
tristate "Texas Instruments ADS124S08"
depends on SPI && OF
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADS124S08
and ADS124S06 ADC chips

View File

@ -35,7 +35,7 @@ static ssize_t _hid_sensor_set_report_latency(struct device *dev,
latency = integer * 1000 + fract / 1000;
ret = hid_sensor_set_report_latency(attrb, latency);
if (ret < 0)
return len;
return ret;
attrb->latency_ms = hid_sensor_get_report_latency(attrb);

View File

@ -191,6 +191,7 @@ config AD5764
config AD5770R
tristate "Analog Devices AD5770R IDAC driver"
depends on SPI_MASTER
select REGMAP_SPI
help
Say yes here to build support for Analog Devices AD5770R Digital to
Analog Converter.
@ -271,6 +272,7 @@ config LPC18XX_DAC
config LTC1660
tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver"
depends on SPI
select REGMAP_SPI
help
Say yes here to build support for Linear Technology
LTC1660 and LTC1665 Digital to Analog Converters.
@ -357,6 +359,7 @@ config STM32_DAC
config STM32_DAC_CORE
tristate
select REGMAP_MMIO
config TI_DAC082S085
tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver"

View File

@ -138,6 +138,10 @@ static const struct opt3001_scale opt3001_scales[] = {
.val = 20966,
.val2 = 400000,
},
{
.val = 41932,
.val2 = 800000,
},
{
.val = 83865,
.val2 = 600000,

View File

@ -99,9 +99,8 @@ static const char * const period_values[] = {
static ssize_t in_illuminance_period_available_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct veml6030_data *data = iio_priv(dev_to_iio_dev(dev));
int ret, reg, x;
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct veml6030_data *data = iio_priv(indio_dev);
ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
if (ret) {
@ -780,7 +779,7 @@ static int veml6030_hw_init(struct iio_dev *indio_dev)
/* Cache currently active measurement parameters */
data->cur_gain = 3;
data->cur_resolution = 4608;
data->cur_resolution = 5376;
data->cur_integration_time = 3;
return ret;

View File

@ -49,6 +49,8 @@ config LIDAR_LITE_V2
config MB1232
tristate "MaxSonar I2CXL family ultrasonic sensors"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y to build a driver for the ultrasonic sensors I2CXL of
MaxBotix which have an i2c interface. It can be used to measure

View File

@ -779,6 +779,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@ -788,9 +789,14 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
if (!desc->its_vmapp_cmd.valid) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
/*
* Unmapping a VPE is self-synchronizing on GICv4.1,
* no need to issue a VSYNC.
*/
vpe = NULL;
}
goto out;
@ -803,13 +809,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (!is_v4_1(its))
goto out;
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
/* We can only signal PTZ when alloc==1. Why do we have two bits? */
@ -820,7 +826,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmapp_cmd.vpe);
return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@ -3796,6 +3802,13 @@ static int its_vpe_set_affinity(struct irq_data *d,
unsigned long flags;
int from, cpu;
/*
* Check if we're racing against a VPE being destroyed, for
* which we don't want to allow a VMOVP.
*/
if (!atomic_read(&vpe->vmapp_count))
return -EINVAL;
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
@ -4432,9 +4445,8 @@ static int its_vpe_init(struct its_vpe *vpe)
raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
if (gic_rdists->has_rvpeid)
atomic_set(&vpe->vmapp_count, 0);
else
atomic_set(&vpe->vmapp_count, 0);
if (!gic_rdists->has_rvpeid)
vpe->vpe_proxy_event = -1;
return 0;

View File

@ -742,9 +742,6 @@ static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
if (of_phy_is_fixed_link(np))
return mdiobus_register(bp->mii_bus);
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@ -765,8 +762,19 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
/* With fixed-link, we don't need to register the MDIO bus,
* except if we have a child named "mdio" in the device tree.
* In that case, some devices may be attached to the MACB's MDIO bus.
*/
child = of_get_child_by_name(np, "mdio");
if (child)
of_node_put(child);
else if (of_phy_is_fixed_link(np))
return macb_mii_probe(bp->dev);
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));

View File

@ -51,12 +51,12 @@ static int do_active_device(struct ctl_table *table, int write,
for (dev = port->devices; dev ; dev = dev->next) {
if(dev == port->cad) {
len += snprintf(buffer, sizeof(buffer), "%s\n", dev->name);
len += scnprintf(buffer, sizeof(buffer), "%s\n", dev->name);
}
}
if(!len) {
len += snprintf(buffer, sizeof(buffer), "%s\n", "none");
len += scnprintf(buffer, sizeof(buffer), "%s\n", "none");
}
if (len > *lenp)
@ -87,19 +87,19 @@ static int do_autoprobe(struct ctl_table *table, int write,
}
if ((str = info->class_name) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str);
if ((str = info->model) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str);
if ((str = info->mfr) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str);
if ((str = info->description) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str);
if ((str = info->cmdset) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str);
if (len > *lenp)
len = *lenp;
@ -128,7 +128,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
if (write) /* permissions prevent this anyway */
return -EACCES;
len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi);
len += scnprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi);
if (len > *lenp)
len = *lenp;
@ -155,7 +155,7 @@ static int do_hardware_irq(struct ctl_table *table, int write,
if (write) /* permissions prevent this anyway */
return -EACCES;
len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq);
len += scnprintf (buffer, sizeof(buffer), "%d\n", port->irq);
if (len > *lenp)
len = *lenp;
@ -182,7 +182,7 @@ static int do_hardware_dma(struct ctl_table *table, int write,
if (write) /* permissions prevent this anyway */
return -EACCES;
len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma);
len += scnprintf (buffer, sizeof(buffer), "%d\n", port->dma);
if (len > *lenp)
len = *lenp;
@ -213,7 +213,7 @@ static int do_hardware_modes(struct ctl_table *table, int write,
#define printmode(x) \
do { \
if (port->modes & PARPORT_MODE_##x) \
len += snprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \
len += scnprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \
} while (0)
int f = 0;
printmode(PCSPP);

View File

@ -325,7 +325,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
buffer = (void *) ((addr_t) sccb + sccb->header.length);
if (convertlf) {
/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
/* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/
for (from=0, to=0;
(from < count) && (to < sclp_vt220_space_left(request));
from++) {
@ -334,8 +334,8 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
/* Perform conversion */
if (c == 0x0a) {
if (to + 1 < sclp_vt220_space_left(request)) {
((unsigned char *) buffer)[to++] = c;
((unsigned char *) buffer)[to++] = 0x0d;
((unsigned char *) buffer)[to++] = c;
} else
break;

View File

@ -1285,7 +1285,7 @@ enum xhci_setup_dev {
/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
#define SCT_FOR_TRB(p) (((p) & 0x7) << 1)
/* Link TRB specific fields */
#define TRB_TC (1<<1)

View File

@ -279,6 +279,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EG912Y 0x6001
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200A 0x6005
#define QUECTEL_PRODUCT_EG916Q 0x6007
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026
@ -1270,6 +1271,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@ -1380,10 +1382,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),

View File

@ -1019,7 +1019,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
__func__, new_i_pos);
}
goto out;
}

View File

@ -331,6 +331,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
* returns the page in which the entry was found, and the entry itself
* (as a parameter - res_dir). Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*
* On failure, returns an error pointer and the caller should ignore res_page.
*/
struct nilfs_dir_entry *
nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
@ -358,22 +360,24 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
do {
char *kaddr = nilfs_get_page(dir, n, &page);
if (!IS_ERR(kaddr)) {
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
nilfs_error(dir->i_sb,
"zero-length directory entry");
nilfs_put_page(page);
goto out;
}
if (nilfs_match(namelen, name, de))
goto found;
de = nilfs_next_entry(de);
if (IS_ERR(kaddr))
return ERR_CAST(kaddr);
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *)de <= kaddr) {
if (de->rec_len == 0) {
nilfs_error(dir->i_sb,
"zero-length directory entry");
nilfs_put_page(page);
goto out;
}
nilfs_put_page(page);
if (nilfs_match(namelen, name, de))
goto found;
de = nilfs_next_entry(de);
}
nilfs_put_page(page);
if (++n >= npages)
n = 0;
/* next page is past the blocks we've got */
@ -386,7 +390,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
}
} while (n != start);
out:
return NULL;
return ERR_PTR(-ENOENT);
found:
*res_page = page;
@ -431,19 +435,19 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
return NULL;
}
ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
{
ino_t res = 0;
struct nilfs_dir_entry *de;
struct page *page;
de = nilfs_find_entry(dir, qstr, &page);
if (de) {
res = le64_to_cpu(de->inode);
kunmap(page);
put_page(page);
}
return res;
if (IS_ERR(de))
return PTR_ERR(de);
*ino = le64_to_cpu(de->inode);
kunmap(page);
put_page(page);
return 0;
}
/* Releases the page */

View File

@ -55,12 +55,20 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
ino_t ino;
int res;
if (dentry->d_name.len > NILFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ino = nilfs_inode_by_name(dir, &dentry->d_name);
inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL;
res = nilfs_inode_by_name(dir, &dentry->d_name, &ino);
if (res) {
if (res != -ENOENT)
return ERR_PTR(res);
inode = NULL;
} else {
inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
}
return d_splice_alias(inode, dentry);
}
@ -261,10 +269,11 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
struct page *page;
int err;
err = -ENOENT;
de = nilfs_find_entry(dir, &dentry->d_name, &page);
if (!de)
if (IS_ERR(de)) {
err = PTR_ERR(de);
goto out;
}
inode = d_inode(dentry);
err = -EIO;
@ -358,10 +367,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(err))
return err;
err = -ENOENT;
old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
if (IS_ERR(old_de)) {
err = PTR_ERR(old_de);
goto out;
}
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
@ -378,10 +388,12 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (dir_de && !nilfs_empty_dir(new_inode))
goto out_dir;
err = -ENOENT;
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
&new_page);
if (IS_ERR(new_de)) {
err = PTR_ERR(new_de);
goto out_dir;
}
nilfs_set_link(new_dir, new_de, new_page, old_inode);
nilfs_mark_inode_dirty(new_dir);
new_inode->i_ctime = current_time(new_inode);
@ -435,14 +447,15 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
static struct dentry *nilfs_get_parent(struct dentry *child)
{
unsigned long ino;
ino_t ino;
int res;
struct inode *inode;
struct qstr dotdot = QSTR_INIT("..", 2);
struct nilfs_root *root;
ino = nilfs_inode_by_name(d_inode(child), &dotdot);
if (!ino)
return ERR_PTR(-ENOENT);
res = nilfs_inode_by_name(d_inode(child), &dotdot, &ino);
if (res)
return ERR_PTR(res);
root = NILFS_I(d_inode(child))->i_root;

View File

@ -233,7 +233,7 @@ static inline __u32 nilfs_mask_flags(umode_t mode, __u32 flags)
/* dir.c */
extern int nilfs_add_link(struct dentry *, struct inode *);
extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino);
extern int nilfs_make_empty(struct inode *, struct inode *);
extern struct nilfs_dir_entry *
nilfs_find_entry(struct inode *, const struct qstr *, struct page **);

View File

@ -48,7 +48,8 @@ static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif

View File

@ -58,10 +58,12 @@ struct its_vpe {
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count;
};
};
/* Track the VPE being mapped */
atomic_t vmapp_count;
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.

View File

@ -56,6 +56,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/percpu.h>
#include <linux/cpuset.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/bvec.h>
@ -1589,7 +1590,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
/*
* SQPOLL must use the actual sqring head, as using the cached_sq_head
* is race prone if the SQPOLL thread has grabbed entries but not yet
* committed them to the ring. For !SQPOLL, this doesn't matter, but
* since this helper is just used for SQPOLL sqring waits (or POLLOUT),
* just read the actual sqring head unconditionally.
*/
return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
}
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
@ -8581,11 +8589,22 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
return 0;
if (p->flags & IORING_SETUP_SQ_AFF) {
cpumask_var_t allowed_mask;
int cpu = p->sq_thread_cpu;
ret = -EINVAL;
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
goto err_sqpoll;
ret = -ENOMEM;
if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
goto err_sqpoll;
ret = -EINVAL;
cpuset_cpus_allowed(current, allowed_mask);
if (!cpumask_test_cpu(cpu, allowed_mask)) {
free_cpumask_var(allowed_mask);
goto err_sqpoll;
}
free_cpumask_var(allowed_mask);
sqd->sq_cpu = cpu;
} else {
sqd->sq_cpu = -1;

View File

@ -299,6 +299,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
goto out;
}
if (!timespec64_valid_strict(ts))
return -EINVAL;
if (cd.clk->ops.clock_settime)
err = cd.clk->ops.clock_settime(cd.clk, ts);
else

View File

@ -2158,7 +2158,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma) {
if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
ret = unuse_vma(vma, type, frontswap,
fs_pages_to_unuse);
if (ret)

View File

@ -779,6 +779,7 @@ static int __init bt_init(void)
bt_sysfs_cleanup();
cleanup_led:
bt_leds_cleanup();
debugfs_remove_recursive(bt_debugfs);
return err;
}

View File

@ -2305,7 +2305,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (len <= skb->len)
break;
if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next))
return false;
len -= skb->len;

View File

@ -509,6 +509,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
sta->cipher_scheme = cs;
err = ieee80211_key_link(key, sdata, sta);
/* KRACK protection, shouldn't happen but just silently accept key */
if (err == -EALREADY)
err = 0;
out_unlock:
mutex_unlock(&local->sta_mtx);

View File

@ -843,7 +843,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
ieee80211_key_free_unused(key);
ret = 0;
ret = -EALREADY;
goto out;
}

View File

@ -23,6 +23,8 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
SNMP_MIB_ITEM("DSSCorruptionFallback", MPTCP_MIB_DSSCORRUPTIONFALLBACK),
SNMP_MIB_ITEM("DSSCorruptionReset", MPTCP_MIB_DSSCORRUPTIONRESET),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
SNMP_MIB_ITEM("OFOQueueTail", MPTCP_MIB_OFOQUEUETAIL),
SNMP_MIB_ITEM("OFOQueue", MPTCP_MIB_OFOQUEUE),

View File

@ -16,6 +16,8 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
MPTCP_MIB_DSSCORRUPTIONFALLBACK,/* DSS corruption detected, fallback */
MPTCP_MIB_DSSCORRUPTIONRESET, /* DSS corruption detected, MPJ subflow reset */
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
MPTCP_MIB_OFOQUEUETAIL, /* Segments inserted into OoO queue tail */
MPTCP_MIB_OFOQUEUE, /* Segments inserted into OoO queue */

View File

@ -457,6 +457,18 @@ static void mptcp_check_data_fin(struct sock *sk)
}
}
static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
{
if (READ_ONCE(msk->allow_infinite_fallback)) {
MPTCP_INC_STATS(sock_net(ssk),
MPTCP_MIB_DSSCORRUPTIONFALLBACK);
mptcp_do_fallback(ssk);
} else {
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
mptcp_subflow_reset(ssk);
}
}
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct sock *ssk,
unsigned int *bytes)
@ -519,10 +531,12 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
moved += len;
seq += len;
if (WARN_ON_ONCE(map_remaining < len))
break;
if (unlikely(map_remaining < len))
mptcp_dss_corruption(msk, ssk);
} else {
WARN_ON_ONCE(!fin);
if (unlikely(!fin))
mptcp_dss_corruption(msk, ssk);
sk_eat_skb(ssk, skb);
done = true;
}
@ -1810,9 +1824,11 @@ static void mptcp_worker(struct work_struct *work)
if (!mptcp_ext_cache_refill(msk))
break;
}
if (copied)
if (copied) {
tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
size_goal);
WRITE_ONCE(msk->allow_infinite_fallback, false);
}
dfrag->data_seq = orig_write_seq;
dfrag->offset = orig_offset;
@ -1845,6 +1861,7 @@ static int __mptcp_init_sock(struct sock *sk)
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
WRITE_ONCE(msk->allow_infinite_fallback, true);
mptcp_pm_data_init(msk);
@ -2543,6 +2560,7 @@ bool mptcp_finish_join(struct sock *sk)
if (parent_sock && !sk->sk_socket)
mptcp_sock_graft(sk, parent_sock);
subflow->map_seq = READ_ONCE(msk->ack_seq);
WRITE_ONCE(msk->allow_infinite_fallback, false);
return true;
}

View File

@ -213,6 +213,7 @@ struct mptcp_sock {
bool rcv_data_fin;
bool snd_data_fin_enable;
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
bool allow_infinite_fallback;
spinlock_t join_list_lock;
struct work_struct work;
struct sk_buff *ooo_last_skb;

View File

@ -702,7 +702,7 @@ static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
unsigned int skb_consumed;
skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
if (WARN_ON_ONCE(skb_consumed >= skb->len))
if (unlikely(skb_consumed >= skb->len))
return true;
return skb->len - skb_consumed <= subflow->map_data_len -
@ -1179,6 +1179,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
list_add_tail(&subflow->node, &msk->join_list);
spin_unlock_bh(&msk->join_list_lock);
WRITE_ONCE(msk->allow_infinite_fallback, false);
return err;
failed:

View File

@ -234,6 +234,7 @@ enum {
CXT_FIXUP_HP_SPECTRE,
CXT_FIXUP_HP_GATE_MIC,
CXT_FIXUP_MUTE_LED_GPIO,
CXT_FIXUP_HP_ELITEONE_OUT_DIS,
CXT_FIXUP_HP_ZBOOK_MUTE_LED,
CXT_FIXUP_HEADSET_MIC,
CXT_FIXUP_HP_MIC_NO_PRESENCE,
@ -251,6 +252,19 @@ static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
spec->gen.inv_dmic_split = 1;
}
/* fix widget control pin settings */
static void cxt_fixup_update_pinctl(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
if (action == HDA_FIXUP_ACT_PROBE) {
/* Unset OUT_EN for this Node pin, leaving only HP_EN.
* This is the value stored in the codec register after
* the correct initialization of the previous windows boot.
*/
snd_hda_set_pin_ctl_cache(codec, 0x1d, AC_PINCTL_HP_EN);
}
}
static void cxt5066_increase_mic_boost(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@ -902,6 +916,10 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_mute_led_gpio,
},
[CXT_FIXUP_HP_ELITEONE_OUT_DIS] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_update_pinctl,
},
[CXT_FIXUP_HP_ZBOOK_MUTE_LED] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_hp_zbook_mute_led,
@ -992,6 +1010,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83e5, "HP EliteOne 1000 G2", CXT_FIXUP_HP_ELITEONE_OUT_DIS),
SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),

View File

@ -3090,12 +3090,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
int last_boosted_vcpu;
int yielded = 0;
int try = 3;
int pass;
int i;
last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
@ -3126,7 +3127,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
kvm->last_boosted_vcpu = i;
WRITE_ONCE(kvm->last_boosted_vcpu, i);
break;
} else if (yielded < 0) {
try--;