This is the 6.1.25 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmRBFW0ACgkQONu9yGCS
 aT7Jew//Ytw9+JQ71LT1TuJnQ1GayJOL1BW5hgxoYgnBFasWDwsGA9rzHs6KHqHb
 0Vjk7MX7VZB+6zWakOxY5CFVM33J4fS7wY8WE2bj8X3QQhD/J0HQDMdELvSBi3qF
 7xI6sghEQEwOuwAj2+CBqm/q7rA5FTnO1QgJuk/AKJ6PHGRiQeZ7q1zGpFvSaj7S
 cyKvY99RsjnUN+PYk4LE2+u/6DVCqiWYVDQrdjalb9zsrXg4+nmPH6ZJzZX8+bbM
 eM0xAR675V8TXqDi+8bj7tWmiS52XyjYF3Q/bu9BmU67DqslH9FFyVQxhgTHUZpN
 qWXkojEU2djIc3qt7T/bpZS/vD8Kg3Px1CgyIRN8Y5SlZfhZyqVdTZ4AQCtJuLQJ
 wDIdQCLlGzzDNFvbD+LdfJSjZt7Ig1sM/HwtPZhUA9yF0FN1XV3dcESzCOeI0/S7
 ohRh8cs1sidnxrbvVwiVNENSqbJD7G9/9vVjIfyfcnt57q+fs6xCBhpOyNoVOp74
 I5i6ALMcVZoAB50vDjnoGZsSRe9W2AmOV6UMIkVCvRCWYFqBpgVftMTAACNyljni
 UlXmO7aDQj+nbHD/auclFtU02oHQbk62FSrwoWMFS090zWztQqUhgRY7Qnl13yCM
 poEvrKlskXhvunsNtdVmI5O3N2GANWKgGwkyFIiXvgxKkw1qpUo=
 =zeN9
 -----END PGP SIGNATURE-----

Merge 6.1.25 into android14-6.1

Changes in 6.1.25
	Revert "pinctrl: amd: Disable and mask interrupts on resume"
	drm/amd/display: Pass the right info to drm_dp_remove_payload
	ALSA: emu10k1: fix capture interrupt handler unlinking
	ALSA: hda/sigmatel: add pin overrides for Intel DP45SG motherboard
	ALSA: i2c/cs8427: fix iec958 mixer control deactivation
	ALSA: hda: patch_realtek: add quirk for Asus N7601ZM
	ALSA: hda/realtek: Add quirks for Lenovo Z13/Z16 Gen2
	ALSA: firewire-tascam: add missing unwind goto in snd_tscm_stream_start_duplex()
	ALSA: emu10k1: don't create old pass-through playback device on Audigy
	ALSA: hda/sigmatel: fix S/PDIF out on Intel D*45* motherboards
	ALSA: hda/hdmi: disable KAE for Intel DG2
	Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}
	Bluetooth: Fix race condition in hidp_session_thread
	bluetooth: btbcm: Fix logic error in forming the board name.
	Bluetooth: Free potentially unfreed SCO connection
	Bluetooth: hci_conn: Fix possible UAF
	btrfs: restore the thread_pool= behavior in remount for the end I/O workqueues
	btrfs: fix fast csum implementation detection
	fbmem: Reject FB_ACTIVATE_KD_TEXT from userspace
	mtdblock: tolerate corrected bit-flips
	mtd: rawnand: meson: fix bitmask for length in command word
	mtd: rawnand: stm32_fmc2: remove unsupported EDO mode
	mtd: rawnand: stm32_fmc2: use timings.mode instead of checking tRC_min
	KVM: arm64: PMU: Restore the guest's EL0 event counting after migration
	fbcon: Fix error paths in set_con2fb_map
	fbcon: set_con2fb_map needs to set con2fb_map!
	drm/i915/dsi: fix DSS CTL register offsets for TGL+
	clk: sprd: set max_register according to mapping range
	RDMA/irdma: Do not generate SW completions for NOPs
	RDMA/irdma: Fix memory leak of PBLE objects
	RDMA/irdma: Increase iWARP CM default rexmit count
	RDMA/irdma: Add ipv4 check to irdma_find_listener()
	IB/mlx5: Add support for 400G_8X lane speed
	RDMA/erdma: Update default EQ depth to 4096 and max_send_wr to 8192
	RDMA/erdma: Inline mtt entries into WQE if supported
	RDMA/erdma: Defer probing if netdevice can not be found
	clk: rs9: Fix suspend/resume
	RDMA/cma: Allow UD qp_type to join multicast only
	bpf: tcp: Use sock_gen_put instead of sock_put in bpf_iter_tcp
	LoongArch, bpf: Fix jit to skip speculation barrier opcode
	dmaengine: apple-admac: Handle 'global' interrupt flags
	dmaengine: apple-admac: Set src_addr_widths capability
	dmaengine: apple-admac: Fix 'current_tx' not getting freed
	9p/xen : Fix use after free bug in xen_9pfs_front_remove due to race condition
	bpf, arm64: Fixed a BTI error on returning to patched function
	KVM: arm64: Initialise hypervisor copies of host symbols unconditionally
	KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
	niu: Fix missing unwind goto in niu_alloc_channels()
	tcp: restrict net.ipv4.tcp_app_win
	bonding: fix ns validation on backup slaves
	iavf: refactor VLAN filter states
	iavf: remove active_cvlans and active_svlans bitmaps
	net: openvswitch: fix race on port output
	Bluetooth: hci_conn: Fix not cleaning up on LE Connection failure
	Bluetooth: Fix printing errors if LE Connection times out
	Bluetooth: SCO: Fix possible circular locking dependency sco_sock_getsockopt
	Bluetooth: Set ISO Data Path on broadcast sink
	drm/armada: Fix a potential double free in an error handling path
	qlcnic: check pci_reset_function result
	net: wwan: iosm: Fix error handling path in ipc_pcie_probe()
	cgroup,freezer: hold cpu_hotplug_lock before freezer_mutex
	net: qrtr: Fix an uninit variable access bug in qrtr_tx_resume()
	sctp: fix a potential overflow in sctp_ifwdtsn_skip
	RDMA/core: Fix GID entry ref leak when create_ah fails
	selftests: openvswitch: adjust datapath NL message declaration
	udp6: fix potential access to stale information
	net: macb: fix a memory corruption in extended buffer descriptor mode
	skbuff: Fix a race between coalescing and releasing SKBs
	libbpf: Fix single-line struct definition output in btf_dump
	ARM: 9290/1: uaccess: Fix KASAN false-positives
	ARM: dts: qcom: apq8026-lg-lenok: add missing reserved memory
	power: supply: rk817: Fix unsigned comparison with less than zero
	power: supply: cros_usbpd: reclassify "default case!" as debug
	power: supply: axp288_fuel_gauge: Added check for negative values
	selftests/bpf: Fix progs/find_vma_fail1.c build error.
	wifi: mwifiex: mark OF related data as maybe unused
	i2c: imx-lpi2c: clean rx/tx buffers upon new message
	i2c: hisi: Avoid redundant interrupts
	efi: sysfb_efi: Add quirk for Lenovo Yoga Book X91F/L
	block: ublk_drv: mark device as LIVE before adding disk
	ACPI: video: Add backlight=native DMI quirk for Acer Aspire 3830TG
	drm: panel-orientation-quirks: Add quirk for Lenovo Yoga Book X90F
	hwmon: (peci/cputemp) Fix miscalculated DTS for SKX
	hwmon: (xgene) Fix ioremap and memremap leak
	verify_pefile: relax wrapper length check
	asymmetric_keys: log on fatal failures in PE/pkcs7
	nvme: send Identify with CNS 06h only to I/O controllers
	wifi: iwlwifi: mvm: fix mvmtxq->stopped handling
	wifi: iwlwifi: mvm: protect TXQ list manipulation
	drm/amdgpu: add mes resume when do gfx post soft reset
	drm/amdgpu: Force signal hw_fences that are embedded in non-sched jobs
	drm/amdgpu/gfx: set cg flags to enter/exit safe mode
	ACPI: resource: Add Medion S17413 to IRQ override quirk
	x86/hyperv: Move VMCB enlightenment definitions to hyperv-tlfs.h
	KVM: selftests: Move "struct hv_enlightenments" to x86_64/svm.h
	KVM: SVM: Add a proper field for Hyper-V VMCB enlightenments
	x86/hyperv: KVM: Rename "hv_enlightenments" to "hv_vmcb_enlightenments"
	KVM: SVM: Flush Hyper-V TLB when required
	tracing: Add trace_array_puts() to write into instance
	tracing: Have tracing_snapshot_instance_cond() write errors to the appropriate instance
	maple_tree: fix write memory barrier of nodes once dead for RCU mode
	ksmbd: avoid out of bounds access in decode_preauth_ctxt()
	riscv: add icache flush for nommu sigreturn trampoline
	HID: intel-ish-hid: Fix kernel panic during warm reset
	net: sfp: initialize sfp->i2c_block_size at sfp allocation
	net: phy: nxp-c45-tja11xx: add remove callback
	net: phy: nxp-c45-tja11xx: fix unsigned long multiplication overflow
	scsi: ses: Handle enclosure with just a primary component gracefully
	x86/PCI: Add quirk for AMD XHCI controller that loses MSI-X state in D3hot
	cgroup: fix display of forceidle time at root
	cgroup/cpuset: Fix partition root's cpuset.cpus update bug
	cgroup/cpuset: Wake up cpuset_attach_wq tasks in cpuset_cancel_attach()
	drm/amd/pm: correct SMU13.0.7 pstate profiling clock settings
	drm/amd/pm: correct SMU13.0.7 max shader clock reporting
	mptcp: use mptcp_schedule_work instead of open-coding it
	mptcp: stricter state check in mptcp_worker
	ubi: Fix failure attaching when vid_hdr offset equals to (sub)page size
	ubi: Fix deadlock caused by recursively holding work_sem
	i2c: mchp-pci1xxxx: Update Timing registers
	powerpc/papr_scm: Update the NUMA distance table for the target node
	sched/fair: Fix imbalance overflow
	x86/rtc: Remove __init for runtime functions
	i2c: ocores: generate stop condition after timeout in polling mode
	cifs: fix negotiate context parsing
	nvme-pci: mark Lexar NM760 as IGNORE_DEV_SUBNQN
	nvme-pci: add NVME_QUIRK_BOGUS_NID for T-FORCE Z330 SSD
	cgroup/cpuset: Skip spread flags update on v2
	cgroup/cpuset: Make cpuset_fork() handle CLONE_INTO_CGROUP properly
	cgroup/cpuset: Add cpuset_can_fork() and cpuset_cancel_fork() methods
	Linux 6.1.25

Change-Id: Ib4d2c49ea9bacb8d8dbdb7b3a4eecce937016427
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-04-26 13:13:19 +00:00
commit 55e4f0c551
132 changed files with 1294 additions and 608 deletions

View File

@ -337,6 +337,8 @@ tcp_app_win - INTEGER
Reserve max(window/2^tcp_app_win, mss) of window for application
buffer. Value 0 is special, it means that nothing is reserved.
Possible values are [0, 31], inclusive.
Default: 31
tcp_autocorking - BOOLEAN

View File

@ -704,7 +704,7 @@ ref
no-jd
BIOS setup but without jack-detection
intel
Intel DG45* mobos
Intel D*45* mobos
dell-m6-amic
Dell desktops/laptops with analog mics
dell-m6-dmic

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 24
SUBLEVEL = 25
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -26,6 +26,16 @@ chosen {
};
reserved-memory {
sbl_region: sbl@2f00000 {
reg = <0x02f00000 0x100000>;
no-map;
};
external_image_region: external-image@3100000 {
reg = <0x03100000 0x200000>;
no-map;
};
adsp_region: adsp@3300000 {
reg = <0x03300000 0x1400000>;
no-map;

View File

@ -116,7 +116,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
memcpy((void *)to, from, tocopy);
__memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
@ -178,7 +178,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
memset((void *)addr, 0, tocopy);
__memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;

View File

@ -608,6 +608,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
kvm_vcpu_pmu_restore_guest(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)

View File

@ -673,7 +673,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0)

View File

@ -281,4 +281,8 @@
/* DMB */
#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
/* ADR */
#define A64_ADR(Rd, offset) \
aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
#endif /* _BPF_JIT_H */

View File

@ -1905,7 +1905,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
restore_args(ctx, args_off, nargs);
/* call original func */
emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
emit(A64_BLR(A64_R(10)), ctx);
emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
emit(A64_RET(A64_R(10)), ctx);
/* store return value */
emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
/* reserve a nop for bpf_tramp_image_put */

View File

@ -955,6 +955,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
emit_atomic(insn, ctx);
break;
/* Speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
default:
pr_err("bpf_jit: unknown opcode %02x\n", code);
return -EINVAL;

View File

@ -366,6 +366,7 @@ void update_numa_distance(struct device_node *node)
WARN(numa_distance_table[nid][nid] == -1,
"NUMA distance details for node %d not provided\n", nid);
}
EXPORT_SYMBOL_GPL(update_numa_distance);
/*
* ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}

View File

@ -1428,6 +1428,13 @@ static int papr_scm_probe(struct platform_device *pdev)
return -ENODEV;
}
/*
* open firmware platform device create won't update the NUMA
* distance table. For PAPR SCM devices we use numa_map_to_online_node()
* to find the nearest online NUMA node and that requires correct
* distance table information.
*/
update_numa_distance(dn);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)

View File

@ -19,6 +19,7 @@
#include <asm/signal32.h>
#include <asm/switch_to.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
extern u32 __user_rt_sigreturn[2];
@ -181,6 +182,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe __user *frame;
long err = 0;
unsigned long __maybe_unused addr;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
@ -209,7 +211,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
sizeof(frame->sigreturn_code)))
return -EFAULT;
regs->ra = (unsigned long)&frame->sigreturn_code;
addr = (unsigned long)&frame->sigreturn_code;
/* Make sure the two instructions are pushed to icache. */
flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
regs->ra = addr;
#endif /* CONFIG_MMU */
/*

View File

@ -598,6 +598,28 @@ struct hv_enlightened_vmcs {
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
/*
* Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
* SVM enlightenments to guests.
*/
struct hv_vmcb_enlightenments {
struct __packed hv_enlightenments_control {
u32 nested_flush_hypercall:1;
u32 msr_bitmap:1;
u32 enlightened_npt_tlb: 1;
u32 reserved:29;
} __packed hv_enlightenments_control;
u32 hv_vp_id;
u64 hv_vm_id;
u64 partition_assist_page;
u64 reserved;
} __packed;
/*
* Hyper-V uses the software reserved clean bit in VMCB.
*/
#define HV_VMCB_NESTED_ENLIGHTENMENTS 31
struct hv_partition_assist_pg {
u32 tlb_lock_count;
};

View File

@ -5,6 +5,8 @@
#include <uapi/asm/svm.h>
#include <uapi/asm/kvm.h>
#include <asm/hyperv-tlfs.h>
/*
* 32-bit intercept words in the VMCB Control Area, starting
* at Byte offset 000h.
@ -161,7 +163,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
* Offset 0x3e0, 32 bytes reserved
* for use by hypervisor/software.
*/
u8 reserved_sw[32];
union {
struct hv_vmcb_enlightenments hv_enlightenments;
u8 reserved_sw[32];
};
};

View File

@ -33,8 +33,8 @@ static int __init iommu_init_noop(void) { return 0; }
static void iommu_shutdown_noop(void) { }
bool __init bool_x86_init_noop(void) { return false; }
void x86_op_int_noop(int cpu) { }
static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
static __init void get_rtc_noop(struct timespec64 *now) { }
static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
static void get_rtc_noop(struct timespec64 *now) { }
static __initconst const struct of_device_id of_cmos_match[] = {
{ .compatible = "motorola,mc146818" },

View File

@ -12,6 +12,11 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
int hv_remote_flush_tlb(struct kvm *kvm);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
#else /* !CONFIG_HYPERV */
static inline int hv_remote_flush_tlb(struct kvm *kvm)
{
return -EOPNOTSUPP;
}
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{
}

View File

@ -10,26 +10,4 @@
#include "../hyperv.h"
/*
* Hyper-V uses the software reserved 32 bytes in VMCB
* control area to expose SVM enlightenments to guests.
*/
struct hv_enlightenments {
struct __packed hv_enlightenments_control {
u32 nested_flush_hypercall:1;
u32 msr_bitmap:1;
u32 enlightened_npt_tlb: 1;
u32 reserved:29;
} __packed hv_enlightenments_control;
u32 hv_vp_id;
u64 hv_vm_id;
u64 partition_assist_page;
u64 reserved;
} __packed;
/*
* Hyper-V uses the software reserved clean bit in VMCB
*/
#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
#endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */

View File

@ -179,8 +179,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
*/
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
struct hv_enlightenments *hve =
(struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
int i;
/*
@ -194,7 +193,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
if (!svm->nested.force_msr_bitmap_recalc &&
kvm_hv_hypercall_enabled(&svm->vcpu) &&
hve->hv_enlightenments_control.msr_bitmap &&
(svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
(svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
goto set_msrpm_base_pa;
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
@ -369,8 +368,8 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
/* Hyper-V extensions (Enlightened VMCB) */
if (kvm_hv_hypercall_enabled(vcpu)) {
to->clean = from->clean;
memcpy(to->reserved_sw, from->reserved_sw,
sizeof(struct hv_enlightenments));
memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
sizeof(to->hv_enlightenments));
}
}
@ -1485,7 +1484,7 @@ static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
dst->virt_ext = from->virt_ext;
dst->pause_filter_count = from->pause_filter_count;
dst->pause_filter_thresh = from->pause_filter_thresh;
/* 'clean' and 'reserved_sw' are not changed by KVM */
/* 'clean' and 'hv_enlightenments' are not changed by KVM */
}
static int svm_get_nested_state(struct kvm_vcpu *vcpu,

View File

@ -3719,7 +3719,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
}
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@ -3736,6 +3736,37 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
svm->current_vmcb->asid_generation--;
}
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
{
hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
/*
* When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
* flush the NPT mappings via hypercall as flushing the ASID only
* affects virtual to physical mappings, it does not invalidate guest
* physical to host physical mappings.
*/
if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
hyperv_flush_guest_mapping(root_tdp);
svm_flush_tlb_asid(vcpu);
}
static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
{
/*
* When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
* flushes should be routed to hv_remote_flush_tlb() without requesting
* a "regular" remote flush. Reaching this point means either there's
* a KVM bug or a prior hv_remote_flush_tlb() call failed, both of
* which might be fatal to the guest. Yell, but try to recover.
*/
if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
hv_remote_flush_tlb(vcpu->kvm);
svm_flush_tlb_asid(vcpu);
}
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
{
struct vcpu_svm *svm = to_svm(vcpu);
@ -4733,10 +4764,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_rflags = svm_set_rflags,
.get_if_flag = svm_get_if_flag,
.flush_tlb_all = svm_flush_tlb_current,
.flush_tlb_all = svm_flush_tlb_all,
.flush_tlb_current = svm_flush_tlb_current,
.flush_tlb_gva = svm_flush_tlb_gva,
.flush_tlb_guest = svm_flush_tlb_current,
.flush_tlb_guest = svm_flush_tlb_asid,
.vcpu_pre_run = svm_vcpu_pre_run,
.vcpu_run = svm_vcpu_run,

View File

@ -151,7 +151,10 @@ struct vmcb_ctrl_area_cached {
u64 nested_cr3;
u64 virt_ext;
u32 clean;
u8 reserved_sw[32];
union {
struct hv_vmcb_enlightenments hv_enlightenments;
u8 reserved_sw[32];
};
};
struct svm_nested_state {

View File

@ -16,7 +16,7 @@
int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightenments *hve;
struct hv_vmcb_enlightenments *hve;
struct hv_partition_assist_pg **p_hv_pa_pg =
&to_kvm_hv(vcpu->kvm)->hv_pa_pg;
@ -26,13 +26,13 @@ int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
if (!*p_hv_pa_pg)
return -ENOMEM;
hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
hve->partition_assist_page = __pa(*p_hv_pa_pg);
hve->hv_vm_id = (unsigned long)vcpu->kvm;
if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
hve->hv_enlightenments_control.nested_flush_hypercall = 1;
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
vmcb_mark_dirty(to_svm(vcpu)->vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS);
}
return 0;

View File

@ -6,6 +6,8 @@
#ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
#define __ARCH_X86_KVM_SVM_ONHYPERV_H__
#include <asm/mshyperv.h>
#if IS_ENABLED(CONFIG_HYPERV)
#include "kvm_onhyperv.h"
@ -15,10 +17,20 @@ static struct kvm_x86_ops svm_x86_ops;
int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
{
struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
return ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB &&
!!hve->hv_enlightenments_control.enlightened_npt_tlb;
}
static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
{
struct hv_enlightenments *hve =
(struct hv_enlightenments *)vmcb->control.reserved_sw;
struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
BUILD_BUG_ON(sizeof(vmcb->control.hv_enlightenments) !=
sizeof(vmcb->control.reserved_sw));
if (npt_enabled &&
ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
@ -60,27 +72,29 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
struct kvm_vcpu *vcpu)
{
struct vmcb *vmcb = to_svm(vcpu)->vmcb;
struct hv_enlightenments *hve =
(struct hv_enlightenments *)vmcb->control.reserved_sw;
struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
if (hve->hv_enlightenments_control.msr_bitmap)
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS);
}
static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
struct kvm_vcpu *vcpu)
static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu)
{
struct hv_enlightenments *hve =
(struct hv_enlightenments *)vmcb->control.reserved_sw;
struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
u32 vp_index = kvm_hv_get_vpindex(vcpu);
if (hve->hv_vp_id != vp_index) {
hve->hv_vp_id = vp_index;
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS);
}
}
#else
static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
{
return false;
}
static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
{
}

View File

@ -7,6 +7,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
#ifdef CONFIG_AMD_NB
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
{
u32 data;
if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
pci_err(dev, "Failed to write data 0x%x\n", data);
} else {
pci_err(dev, "Failed to read data\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
#endif

View File

@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
}
if (sinfo->msgdigest_len != sig->digest_size) {
pr_debug("Sig %u: Invalid digest size (%u)\n",
sinfo->index, sinfo->msgdigest_len);
pr_warn("Sig %u: Invalid digest size (%u)\n",
sinfo->index, sinfo->msgdigest_len);
ret = -EBADMSG;
goto error;
}
if (memcmp(sig->digest, sinfo->msgdigest,
sinfo->msgdigest_len) != 0) {
pr_debug("Sig %u: Message digest doesn't match\n",
sinfo->index);
pr_warn("Sig %u: Message digest doesn't match\n",
sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
@ -478,7 +478,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
const void *data, size_t datalen)
{
if (pkcs7->data) {
pr_debug("Data already supplied\n");
pr_warn("Data already supplied\n");
return -EINVAL;
}
pkcs7->data = data;

View File

@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
break;
default:
pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
return -ELIBBAD;
}
@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->certs_size = ddir->certs.size;
if (!ddir->certs.virtual_address || !ddir->certs.size) {
pr_debug("Unsigned PE binary\n");
pr_warn("Unsigned PE binary\n");
return -ENODATA;
}
@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
pr_debug("Signature wrapper too short\n");
pr_warn("Signature wrapper too short\n");
return -ELIBBAD;
}
@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
/* Both pesign and sbsign round up the length of certificate table
* (in optional header data directories) to 8 byte alignment.
/* sbsign rounds up the length of certificate table (in optional
* header data directories) to 8 byte alignment. However, the PE
* specification states that while entries are 8-byte aligned, this is
* not included in their length, and as a result, pesign has not
* rounded up since 0.110.
*/
if (round_up(wrapper.length, 8) != ctx->sig_len) {
pr_debug("Signature wrapper len wrong\n");
if (wrapper.length > ctx->sig_len) {
pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {
pr_debug("Signature is not revision 2.0\n");
pr_warn("Signature is not revision 2.0\n");
return -ENOTSUPP;
}
if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
pr_debug("Signature certificate type is not PKCS\n");
pr_warn("Signature certificate type is not PKCS\n");
return -ENOTSUPP;
}
@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
if (ctx->sig_len < 4) {
pr_debug("Signature data missing\n");
pr_warn("Signature data missing\n");
return -EKEYREJECTED;
}
@ -194,7 +198,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
return 0;
}
not_pkcs7:
pr_debug("Signature data not PKCS#7\n");
pr_warn("Signature data not PKCS#7\n");
return -ELIBBAD;
}
@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
digest_size = crypto_shash_digestsize(tfm);
if (digest_size != ctx->digest_len) {
pr_debug("Digest size mismatch (%zx != %x)\n",
digest_size, ctx->digest_len);
pr_warn("Digest size mismatch (%zx != %x)\n",
digest_size, ctx->digest_len);
ret = -EBADMSG;
goto error_no_desc;
}
@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
* PKCS#7 certificate.
*/
if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
pr_debug("Digest mismatch\n");
pr_warn("Digest mismatch\n");
ret = -EKEYREJECTED;
} else {
pr_debug("The digests match!\n");

View File

@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "M17T"),
},
},
{
.ident = "MEDION S17413",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
},
},
{ }
};

View File

@ -530,6 +530,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
{
.callback = video_detect_force_native,
/* Acer Aspire 3830TG */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
},
},
{
.callback = video_detect_force_native,
/* Acer Aspire 4810T */

View File

@ -1571,17 +1571,18 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
get_device(&ub->cdev_dev);
ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = add_disk(disk);
if (ret) {
/*
* Has to drop the reference since ->free_disk won't be
* called in case of add_disk failure.
*/
ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
ub->dev_info.state = UBLK_S_DEV_LIVE;
out_put_disk:
if (ret)
put_disk(disk);

View File

@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
len = strlen(tmp) + 1;
board_type = devm_kzalloc(dev, len, GFP_KERNEL);
strscpy(board_type, tmp, len);
for (i = 0; i < board_type[i]; i++) {
for (i = 0; i < len; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
}

View File

@ -144,8 +144,9 @@ static int rs9_regmap_i2c_read(void *context,
static const struct regmap_config rs9_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
.cache_type = REGCACHE_FLAT,
.max_register = RS9_REG_BCP,
.num_reg_defaults_raw = 0x8,
.rd_table = &rs9_readable_table,
.wr_table = &rs9_writeable_table,
.reg_write = rs9_regmap_i2c_write,

View File

@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xffff,
.fast_io = true,
};
@ -43,6 +42,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
struct resource *res;
struct regmap_config reg_config = sprdclk_regmap_config;
if (of_find_property(node, "sprd,syscon", NULL)) {
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
return PTR_ERR(regmap);
}
} else {
base = devm_platform_ioremap_resource(pdev, 0);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
reg_config.max_register = resource_size(res) - reg_config.reg_stride;
regmap = devm_regmap_init_mmio(&pdev->dev, base,
&sprdclk_regmap_config);
&reg_config);
if (IS_ERR(regmap)) {
pr_err("failed to init regmap\n");
return PTR_ERR(regmap);

View File

@ -75,6 +75,7 @@
#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
#define REG_GLOBAL_INTSTATE(idx) (0x0050 + (idx) * 4)
#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
admac_stop_chan(adchan);
admac_reset_rings(adchan);
adchan->current_tx = NULL;
if (adchan->current_tx) {
list_add_tail(&adchan->current_tx->node, &adchan->to_free);
adchan->current_tx = NULL;
}
/*
* Descriptors can only be freed after the tasklet
* has been killed (in admac_synchronize).
@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
static irqreturn_t admac_interrupt(int irq, void *devid)
{
struct admac_data *ad = devid;
u32 rx_intstate, tx_intstate;
u32 rx_intstate, tx_intstate, global_intstate;
int i;
rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
if (!tx_intstate && !rx_intstate)
if (!tx_intstate && !rx_intstate && !global_intstate)
return IRQ_NONE;
for (i = 0; i < ad->nchannels; i += 2) {
@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
rx_intstate >>= 1;
}
if (global_intstate) {
dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
global_intstate);
writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
}
return IRQ_HANDLED;
}
@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);

View File

@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"IdeaPad Duet 3 10IGL5"),
},
},
{
/* Lenovo Yoga Book X91F / X91L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
},
{},
};

View File

@ -624,6 +624,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) {
struct amdgpu_job *job;
/* For non-scheduler bad job, i.e. failed ib test, we need to signal
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
job = container_of(old, struct amdgpu_job, hw_fence);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old);
}

View File

@ -1316,6 +1316,11 @@ static int gfx_v11_0_sw_init(void *handle)
break;
}
/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
amdgpu_sriov_is_pp_one_vf(adev))
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
@ -4625,6 +4630,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false;
}
static int gfx_v11_0_post_soft_reset(void *handle)
{
/**
* GFX soft reset will impact MES, need resume MES when do GFX soft reset
*/
return amdgpu_mes_resume((struct amdgpu_device *)handle);
}
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@ -6068,6 +6081,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.wait_for_idle = gfx_v11_0_wait_for_idle,
.soft_reset = gfx_v11_0_soft_reset,
.check_soft_reset = gfx_v11_0_check_soft_reset,
.post_soft_reset = gfx_v11_0_post_soft_reset,
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state,

View File

@ -175,6 +175,40 @@ void dm_helpers_dp_update_branch_info(
const struct dc_link *link)
{}
static void dm_helpers_construct_old_payload(
struct dc_link *link,
int pbn_per_slot,
struct drm_dp_mst_atomic_payload *new_payload,
struct drm_dp_mst_atomic_payload *old_payload)
{
struct link_mst_stream_allocation_table current_link_table =
link->mst_stream_alloc_table;
struct link_mst_stream_allocation *dc_alloc;
int i;
*old_payload = *new_payload;
/* Set correct time_slots/PBN of old payload.
* other fields (delete & dsc_enabled) in
* struct drm_dp_mst_atomic_payload are don't care fields
* while calling drm_dp_remove_payload()
*/
for (i = 0; i < current_link_table.stream_count; i++) {
dc_alloc =
&current_link_table.stream_allocations[i];
if (dc_alloc->vcp_id == new_payload->vcpi) {
old_payload->time_slots = dc_alloc->slot_count;
old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
break;
}
}
/* make sure there is an old payload*/
ASSERT(i != current_link_table.stream_count);
}
/*
* Writes payload allocation table in immediate downstream device.
*/
@ -186,7 +220,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@ -202,17 +236,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
if (enable)
drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
else
drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
if (enable) {
target_payload = new_payload;
drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
} else {
/* construct old payload by VCPI*/
dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
new_payload, &old_payload);
target_payload = &old_payload;
drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
}
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
}

View File

@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table);
if (ret)
return ret;
if (skutable->DriverReportedClocks.GameClockAc &&
(dpm_table->dpm_levels[dpm_table->count - 1].value >
skutable->DriverReportedClocks.GameClockAc)) {
dpm_table->dpm_levels[dpm_table->count - 1].value =
skutable->DriverReportedClocks.GameClockAc;
dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
}
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
uint32_t *max)
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
struct smu_13_0_dpm_table *dpm_table;
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
/* uclk dpm table */
dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
/* gfxclk dpm table */
dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
/* socclk dpm table */
dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
/* fclk dpm table */
dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
case SMU_VCLK1:
/* vclk dpm table */
dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
case SMU_DCLK1:
/* dclk dpm table */
dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
dev_err(smu->adev->dev, "Unsupported clock type!\n");
return -EINVAL;
}
if (min)
*min = dpm_table->min;
if (max)
*max = dpm_table->max;
return 0;
}
static int smu_v13_0_7_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
@ -1328,9 +1387,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
DriverReportedClocks_t driver_clocks =
pptable->SkuTable.DriverReportedClocks;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
if (driver_clocks.GameClockAc &&
(driver_clocks.GameClockAc < gfx_table->max))
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
else
pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
@ -1347,12 +1414,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
pstate_table->fclk_pstate.min = fclk_table->min;
pstate_table->fclk_pstate.peak = fclk_table->max;
/*
* For now, just use the mininum clock frequency.
* TODO: update them when the real pstate settings available
*/
pstate_table->gfxclk_pstate.standard = gfx_table->min;
pstate_table->uclk_pstate.standard = mem_table->min;
if (driver_clocks.BaseClockAc &&
driver_clocks.BaseClockAc < gfx_table->max)
pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
else
pstate_table->gfxclk_pstate.standard = gfx_table->max;
pstate_table->uclk_pstate.standard = mem_table->max;
pstate_table->socclk_pstate.standard = soc_table->min;
pstate_table->vclk_pstate.standard = vclk_table->min;
pstate_table->dclk_pstate.standard = dclk_table->min;
@ -1675,7 +1742,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
.get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.read_sensor = smu_v13_0_7_read_sensor,
.feature_is_enabled = smu_cmn_feature_is_enabled,

View File

@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
kfree(priv);
return ret;
}

View File

@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
}, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
/* Non exact match to match all versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X91F / X91L */
.matches = {
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tablet 2 830F / 830L */

View File

@ -299,9 +299,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
/* FIXME: Move all DSS handling to intel_vdsc.c */
if (DISPLAY_VER(dev_priv) >= 12) {
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
} else {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
}
dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@ -322,16 +334,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */

View File

@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
return guid_equal(&driver->id[0].guid,
&device->fw_client->props.protocol_name);
return(device->fw_client ? guid_equal(&driver->id[0].guid,
&device->fw_client->props.protocol_name) : 0);
}
/**

View File

@ -537,6 +537,12 @@ static const struct cpu_info cpu_hsx = {
.thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
};
static const struct cpu_info cpu_skx = {
.reg = &resolved_cores_reg_hsx,
.min_peci_revision = 0x33,
.thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
};
static const struct cpu_info cpu_icx = {
.reg = &resolved_cores_reg_icx,
.min_peci_revision = 0x40,
@ -558,7 +564,7 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
},
{
.name = "peci_cpu.cputemp.skx",
.driver_data = (kernel_ulong_t)&cpu_hsx,
.driver_data = (kernel_ulong_t)&cpu_skx,
},
{
.name = "peci_cpu.cputemp.icx",

View File

@ -698,14 +698,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_HWMON_V2)
ctx->pcc_comm_addr = (void __force *)ioremap(
ctx->comm_base_addr,
pcc_chan->shmem_size);
ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
ctx->comm_base_addr,
pcc_chan->shmem_size);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
pcc_chan->shmem_size,
MEMREMAP_WB);
ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
ctx->comm_base_addr,
pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENODEV;

View File

@ -314,6 +314,13 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
max_write == 0)
break;
}
/*
* Disable the TX_EMPTY interrupt after finishing all the messages to
* avoid overwhelming the CPU.
*/
if (ctlr->msg_tx_idx == ctlr->msg_num)
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
}
static irqreturn_t hisi_i2c_irq(int irq, void *context)

View File

@ -463,6 +463,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
if (num == 1 && msgs[0].len == 0)
goto stop;
lpi2c_imx->rx_buf = NULL;
lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);

View File

@ -48,9 +48,9 @@
* SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
* baud clock required to program 'Hold Time' at X KHz.
*/
#define SR_HOLD_TIME_100K_TICKS 133
#define SR_HOLD_TIME_400K_TICKS 20
#define SR_HOLD_TIME_1000K_TICKS 11
#define SR_HOLD_TIME_100K_TICKS 150
#define SR_HOLD_TIME_400K_TICKS 20
#define SR_HOLD_TIME_1000K_TICKS 12
#define SMB_CORE_COMPLETION_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x23)
@ -65,17 +65,17 @@
* the baud clock required to program 'fair idle delay' at X KHz. Fair idle
* delay establishes the MCTP T(IDLE_DELAY) period.
*/
#define FAIR_BUS_IDLE_MIN_100K_TICKS 969
#define FAIR_BUS_IDLE_MIN_400K_TICKS 157
#define FAIR_BUS_IDLE_MIN_1000K_TICKS 157
#define FAIR_BUS_IDLE_MIN_100K_TICKS 992
#define FAIR_BUS_IDLE_MIN_400K_TICKS 500
#define FAIR_BUS_IDLE_MIN_1000K_TICKS 500
/*
* FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
* baud clock required to satisfy the fairness protocol at X KHz.
*/
#define FAIR_IDLE_DELAY_100K_TICKS 1000
#define FAIR_IDLE_DELAY_400K_TICKS 500
#define FAIR_IDLE_DELAY_1000K_TICKS 500
#define FAIR_IDLE_DELAY_100K_TICKS 963
#define FAIR_IDLE_DELAY_400K_TICKS 156
#define FAIR_IDLE_DELAY_1000K_TICKS 156
#define SMB_IDLE_SCALING_100K \
((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
@ -105,7 +105,7 @@
*/
#define BUS_CLK_100K_LOW_PERIOD_TICKS 156
#define BUS_CLK_400K_LOW_PERIOD_TICKS 41
#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
/*
* BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
@ -131,7 +131,7 @@
*/
#define CLK_SYNC_100K 4
#define CLK_SYNC_400K 4
#define CLK_SYNC_1000K 4
#define CLK_SYNC_1000K 4
#define SMB_CORE_DATA_TIMING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x40)
@ -142,25 +142,25 @@
* determines the SCLK hold time following SDAT driven low during the first
* START bit in a transfer.
*/
#define FIRST_START_HOLD_100K_TICKS 22
#define FIRST_START_HOLD_400K_TICKS 16
#define FIRST_START_HOLD_1000K_TICKS 6
#define FIRST_START_HOLD_100K_TICKS 23
#define FIRST_START_HOLD_400K_TICKS 8
#define FIRST_START_HOLD_1000K_TICKS 12
/*
* STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
* required to program 'STOP_SETUP' timer at X KHz. This timer determines the
* SDAT setup time from the rising edge of SCLK for a STOP condition.
*/
#define STOP_SETUP_100K_TICKS 157
#define STOP_SETUP_100K_TICKS 150
#define STOP_SETUP_400K_TICKS 20
#define STOP_SETUP_1000K_TICKS 12
#define STOP_SETUP_1000K_TICKS 12
/*
* RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
* required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
* SDAT setup time from the rising edge of SCLK for a repeated START condition.
*/
#define RESTART_SETUP_100K_TICKS 157
#define RESTART_SETUP_100K_TICKS 156
#define RESTART_SETUP_400K_TICKS 20
#define RESTART_SETUP_1000K_TICKS 12
@ -169,7 +169,7 @@
* required to program 'DATA_HOLD' timer at X KHz. This timer determines the
* SDAT hold time following SCLK driven low.
*/
#define DATA_HOLD_100K_TICKS 2
#define DATA_HOLD_100K_TICKS 12
#define DATA_HOLD_400K_TICKS 2
#define DATA_HOLD_1000K_TICKS 2
@ -190,35 +190,35 @@
* Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
* (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
*/
#define BUS_IDLE_MIN_100K_TICKS 167UL
#define BUS_IDLE_MIN_400K_TICKS 139UL
#define BUS_IDLE_MIN_1000K_TICKS 133UL
#define BUS_IDLE_MIN_100K_TICKS 36UL
#define BUS_IDLE_MIN_400K_TICKS 10UL
#define BUS_IDLE_MIN_1000K_TICKS 4UL
/*
* CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
* SMBus Controller Cumulative Time-Out duration =
* CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
*/
#define CTRL_CUM_TIME_OUT_100K_TICKS 159
#define CTRL_CUM_TIME_OUT_400K_TICKS 159
#define CTRL_CUM_TIME_OUT_1000K_TICKS 159
#define CTRL_CUM_TIME_OUT_100K_TICKS 76
#define CTRL_CUM_TIME_OUT_400K_TICKS 76
#define CTRL_CUM_TIME_OUT_1000K_TICKS 76
/*
* TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
* SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
* Baud_Clock_Period x 4096
*/
#define TARGET_CUM_TIME_OUT_100K_TICKS 199
#define TARGET_CUM_TIME_OUT_400K_TICKS 199
#define TARGET_CUM_TIME_OUT_1000K_TICKS 199
#define TARGET_CUM_TIME_OUT_100K_TICKS 95
#define TARGET_CUM_TIME_OUT_400K_TICKS 95
#define TARGET_CUM_TIME_OUT_1000K_TICKS 95
/*
* CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
* Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
*/
#define CLOCK_HIGH_TIME_OUT_100K_TICKS 204
#define CLOCK_HIGH_TIME_OUT_400K_TICKS 204
#define CLOCK_HIGH_TIME_OUT_1000K_TICKS 204
#define CLOCK_HIGH_TIME_OUT_100K_TICKS 97
#define CLOCK_HIGH_TIME_OUT_400K_TICKS 97
#define CLOCK_HIGH_TIME_OUT_1000K_TICKS 97
#define TO_SCALING_100K \
((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \

View File

@ -342,18 +342,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
* ocores_isr(), we just add our polling code around it.
*
* It can run in atomic context
*
* Return: 0 on success, -ETIMEDOUT on timeout
*/
static void ocores_process_polling(struct ocores_i2c *i2c)
static int ocores_process_polling(struct ocores_i2c *i2c)
{
while (1) {
irqreturn_t ret;
int err;
irqreturn_t ret;
int err = 0;
while (1) {
err = ocores_poll_wait(i2c);
if (err) {
i2c->state = STATE_ERROR;
if (err)
break; /* timeout */
}
ret = ocores_isr(-1, i2c);
if (ret == IRQ_NONE)
@ -364,13 +364,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
break;
}
}
return err;
}
static int ocores_xfer_core(struct ocores_i2c *i2c,
struct i2c_msg *msgs, int num,
bool polling)
{
int ret;
int ret = 0;
u8 ctrl;
ctrl = oc_getreg(i2c, OCI2C_CONTROL);
@ -388,15 +390,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
if (polling) {
ocores_process_polling(i2c);
ret = ocores_process_polling(i2c);
} else {
ret = wait_event_timeout(i2c->wait,
(i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ);
if (ret == 0) {
ocores_process_timeout(i2c);
return -ETIMEDOUT;
}
if (wait_event_timeout(i2c->wait,
(i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ) == 0)
ret = -ETIMEDOUT;
}
if (ret) {
ocores_process_timeout(i2c);
return ret;
}
return (i2c->state == STATE_DONE) ? num : -EIO;

View File

@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
return id_priv->id.route.addr.src_addr.ss_family;
}
static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
static int cma_set_default_qkey(struct rdma_id_private *id_priv)
{
struct ib_sa_mcmember_rec rec;
int ret = 0;
if (id_priv->qkey) {
if (qkey && id_priv->qkey != qkey)
return -EINVAL;
return 0;
}
if (qkey) {
id_priv->qkey = qkey;
return 0;
}
switch (id_priv->id.ps) {
case RDMA_PS_UDP:
case RDMA_PS_IB:
@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
return ret;
}
static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
{
if (!qkey ||
(id_priv->qkey && (id_priv->qkey != qkey)))
return -EINVAL;
id_priv->qkey = qkey;
return 0;
}
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
dev_addr->dev_type = ARPHRD_INFINIBAND;
@ -1229,7 +1228,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (id_priv->id.qp_type == IB_QPT_UD) {
ret = cma_set_qkey(id_priv, 0);
ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
@ -4558,7 +4557,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
memset(&rep, 0, sizeof rep);
rep.status = status;
if (status == IB_SIDR_SUCCESS) {
ret = cma_set_qkey(id_priv, qkey);
if (qkey)
ret = cma_set_qkey(id_priv, qkey);
else
ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
rep.qp_num = id_priv->qp_num;
@ -4763,9 +4765,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
enum ib_gid_type gid_type;
struct net_device *ndev;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
else
if (status)
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
status);
@ -4793,7 +4793,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
}
event->param.ud.qp_num = 0xFFFFFF;
event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
event->param.ud.qkey = id_priv->qkey;
out:
if (ndev)
@ -4812,8 +4812,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
goto out;
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
if (!ret) {
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
}
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
WARN_ON(ret);
@ -4866,9 +4869,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret)
return ret;
ret = cma_set_qkey(id_priv, 0);
if (ret)
return ret;
if (!id_priv->qkey) {
ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
}
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
rec.qkey = cpu_to_be32(id_priv->qkey);
@ -4945,9 +4950,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
ib.rec.pkey = cpu_to_be16(0xffff);
if (id_priv->id.ps == RDMA_PS_UDP)
ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev)
@ -4973,6 +4975,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (err || !ib.rec.mtu)
return err ?: -EINVAL;
if (!id_priv->qkey)
cma_set_default_qkey(id_priv);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&ib.rec.port_gid);
INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
@ -4998,6 +5003,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
return -EINVAL;
if (id_priv->id.qp_type != IB_QPT_UD)
return -EINVAL;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;

View File

@ -532,6 +532,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
else
ret = device->ops.create_ah(ah, &init_attr, NULL);
if (ret) {
if (ah->sgid_attr)
rdma_put_gid_attr(ah->sgid_attr);
kfree(ah);
return ERR_PTR(ret);
}

View File

@ -420,7 +420,7 @@ struct erdma_reg_mr_sqe {
};
/* EQ related. */
#define ERDMA_DEFAULT_EQ_DEPTH 256
#define ERDMA_DEFAULT_EQ_DEPTH 4096
/* ceqe */
#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)

View File

@ -56,7 +56,7 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
{
struct net_device *netdev;
int ret = -ENODEV;
int ret = -EPROBE_DEFER;
/* Already binded to a net_device, so we skip. */
if (dev->netdev)

View File

@ -402,7 +402,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
mr->mem.mtt_nents);
if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
/* Copy SGLs to SQE content to accelerate */
memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,

View File

@ -11,7 +11,7 @@
/* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024)
#define ERDMA_MAX_SEND_WR 4096
#define ERDMA_MAX_SEND_WR 8192
#define ERDMA_MAX_ORD 128
#define ERDMA_MAX_IRD 128
#define ERDMA_MAX_SGE_RD 1

View File

@ -1458,13 +1458,15 @@ static int irdma_send_fin(struct irdma_cm_node *cm_node)
* irdma_find_listener - find a cm node listening on this addr-port pair
* @cm_core: cm's core
* @dst_addr: listener ip addr
* @ipv4: flag indicating IPv4 when true
* @dst_port: listener tcp port num
* @vlan_id: virtual LAN ID
* @listener_state: state to match with listen node's
*/
static struct irdma_cm_listener *
irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
u16 vlan_id, enum irdma_cm_listener_state listener_state)
irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
u16 dst_port, u16 vlan_id,
enum irdma_cm_listener_state listener_state)
{
struct irdma_cm_listener *listen_node;
static const u32 ip_zero[4] = { 0, 0, 0, 0 };
@ -1477,7 +1479,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
list_for_each_entry (listen_node, &cm_core->listen_list, list) {
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
listen_port = listen_node->loc_port;
if (listen_port != dst_port ||
if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
!(listener_state & listen_node->listener_state))
continue;
/* compare node pair, return node handle if a match */
@ -2902,9 +2904,10 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
unsigned long flags;
/* cannot have multiple matching listeners */
listener = irdma_find_listener(cm_core, cm_info->loc_addr,
cm_info->loc_port, cm_info->vlan_id,
IRDMA_CM_LISTENER_EITHER_STATE);
listener =
irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
cm_info->loc_port, cm_info->vlan_id,
IRDMA_CM_LISTENER_EITHER_STATE);
if (listener &&
listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
refcount_dec(&listener->refcnt);
@ -3153,6 +3156,7 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
listener = irdma_find_listener(cm_core,
cm_info.loc_addr,
cm_info.ipv4,
cm_info.loc_port,
cm_info.vlan_id,
IRDMA_CM_LISTENER_ACTIVE_STATE);

View File

@ -41,7 +41,7 @@
#define TCP_OPTIONS_PADDING 3
#define IRDMA_DEFAULT_RETRYS 64
#define IRDMA_DEFAULT_RETRANS 8
#define IRDMA_DEFAULT_RETRANS 32
#define IRDMA_DEFAULT_TTL 0x40
#define IRDMA_DEFAULT_RTT_VAR 6
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff

View File

@ -41,6 +41,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_XFFL,
IRDMA_HMC_IW_Q1,
IRDMA_HMC_IW_Q1FL,
IRDMA_HMC_IW_PBLE,
IRDMA_HMC_IW_TIMER,
IRDMA_HMC_IW_FSIMC,
IRDMA_HMC_IW_FSIAV,
@ -827,6 +828,8 @@ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
info.entry_type = rf->sd_type;
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
continue;
if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;

View File

@ -2595,7 +2595,10 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
/* remove the SQ WR by moving SQ tail*/
IRDMA_RING_SET_TAIL(*sq_ring,
sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
kfree(cmpl);
continue;
}
ibdev_dbg(iwqp->iwscq->ibcq.device,
"DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id);

View File

@ -442,6 +442,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;

View File

@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
if (ret)
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
return mtd_read(mtd, pos, len, &retlen, buf);
if (!sect_size) {
ret = mtd_read(mtd, pos, len, &retlen, buf);
if (ret && !mtd_is_bitflip(ret))
return ret;
return 0;
}
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;

View File

@ -280,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
if (raw) {
len = mtd->writesize + mtd->oobsize;
cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
return;
}
@ -544,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
goto out;
cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
@ -568,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
return ret;
cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);

View File

@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
if (conf->timings.mode > 3)
return -EOPNOTSUPP;
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;

View File

@ -664,12 +664,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
ubi->vid_hdr_alsize)) {
ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
return -EINVAL;
}
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@ -687,6 +681,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
/*
* Memory allocation for VID header is ubi->vid_hdr_alsize
* which is described in comments in io.c.
* Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
* ubi->vid_hdr_alsize, so that all vid header operations
* won't access memory out of bounds.
*/
if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
" + VID header size(%zu) > VID header aligned size(%d).",
ubi->vid_hdr_offset, ubi->vid_hdr_shift,
UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
return -EINVAL;
}
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);

View File

@ -575,7 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
* @nested: denotes whether the work_sem is already held in read mode
* @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@ -1131,7 +1131,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);

View File

@ -3266,7 +3266,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
(combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
goto out;
saddr = &combined->ip6.saddr;
@ -3288,7 +3289,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
bond_validate_na(bond, slave, saddr, daddr);
bond_validate_na(bond, slave, daddr, saddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_na(bond, slave, saddr, daddr);

View File

@ -1010,6 +1010,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
#ifdef CONFIG_MACB_USE_HWSTAMP
if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
addr &= ~GEM_BIT(DMA_RXVALID);
#endif
return addr;
}

View File

@ -58,8 +58,6 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
struct iavf_adapter *back;
struct net_device *netdev;
unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@ -157,15 +155,20 @@ struct iavf_vlan {
u16 tpid;
};
enum iavf_vlan_state_t {
IAVF_VLAN_INVALID,
IAVF_VLAN_ADD, /* filter needs to be added */
IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */
IAVF_VLAN_ACTIVE, /* filter is accepted by PF */
IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */
IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */
IAVF_VLAN_REMOVE, /* filter needs to be removed from list */
};
struct iavf_vlan_filter {
struct list_head list;
struct iavf_vlan vlan;
struct {
u8 is_new_vlan:1; /* filter is new, wait for PF answer */
u8 remove:1; /* filter needs to be removed */
u8 add:1; /* filter needs to be added */
u8 padding:5;
};
enum iavf_vlan_state_t state;
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@ -257,6 +260,7 @@ struct iavf_adapter {
wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
int num_vlan_filters;
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;

View File

@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->vlan = vlan;
list_add_tail(&f->list, &adapter->vlan_filter_list);
f->add = true;
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
f->state = IAVF_VLAN_REMOVE;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
}
@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
**/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
u16 vid;
struct iavf_vlan_filter *f;
/* re-add all VLAN filters */
for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
spin_lock_bh(&adapter->mac_vlan_list_lock);
for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->state == IAVF_VLAN_INACTIVE)
f->state = IAVF_VLAN_ADD;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
/**
@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
*/
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
return adapter->num_vlan_filters;
}
/**
@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
if (proto == cpu_to_be16(ETH_P_8021Q))
clear_bit(vid, adapter->vsi.active_cvlans);
else
clear_bit(vid, adapter->vsi.active_svlans);
return 0;
}
@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
}
}
/* remove all VLAN filters */
/* disable all VLAN filters */
list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
list) {
if (vlf->add) {
list_del(&vlf->list);
kfree(vlf);
} else {
vlf->remove = true;
}
}
list)
vlf->state = IAVF_VLAN_DISABLE;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@ -2905,6 +2899,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
list_del(&fv->list);
kfree(fv);
}
adapter->num_vlan_filters = 0;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@ -3122,9 +3117,6 @@ static void iavf_reset_task(struct work_struct *work)
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some

View File

@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
if (f->is_new_vlan) {
if (f->vlan.tpid == ETH_P_8021Q)
clear_bit(f->vlan.vid,
adapter->vsi.active_cvlans);
else
clear_bit(f->vlan.vid,
adapter->vsi.active_svlans);
if (f->state == IAVF_VLAN_IS_NEW) {
list_del(&f->list);
kfree(f);
adapter->num_vlan_filters--;
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add)
if (f->state == IAVF_VLAN_ADD)
count++;
}
if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add) {
if (f->state == IAVF_VLAN_ADD) {
vvfl->vlan_id[i] = f->vlan.vid;
i++;
f->add = false;
f->is_new_vlan = true;
f->state = IAVF_VLAN_IS_NEW;
if (i == count)
break;
}
@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add) {
if (f->state == IAVF_VLAN_ADD) {
struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vlan->tpid = f->vlan.tpid;
i++;
f->add = false;
f->is_new_vlan = true;
f->state = IAVF_VLAN_IS_NEW;
}
}
@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
* filters marked for removal to enable bailing out before
* sending a virtchnl message
*/
if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
if (f->state == IAVF_VLAN_REMOVE &&
!VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
} else if (f->remove) {
adapter->num_vlan_filters--;
} else if (f->state == IAVF_VLAN_DISABLE &&
!VLAN_FILTERING_ALLOWED(adapter)) {
f->state = IAVF_VLAN_INACTIVE;
} else if (f->state == IAVF_VLAN_REMOVE ||
f->state == IAVF_VLAN_DISABLE) {
count++;
}
}
@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
if (f->remove) {
if (f->state == IAVF_VLAN_DISABLE) {
vvfl->vlan_id[i] = f->vlan.vid;
f->state = IAVF_VLAN_INACTIVE;
i++;
if (i == count)
break;
} else if (f->state == IAVF_VLAN_REMOVE) {
vvfl->vlan_id[i] = f->vlan.vid;
list_del(&f->list);
kfree(f);
adapter->num_vlan_filters--;
i++;
if (i == count)
break;
}
@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
if (f->remove) {
if (f->state == IAVF_VLAN_DISABLE ||
f->state == IAVF_VLAN_REMOVE) {
struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vlan->tci = f->vlan.vid;
vlan->tpid = f->vlan.tpid;
list_del(&f->list);
kfree(f);
if (f->state == IAVF_VLAN_DISABLE) {
f->state = IAVF_VLAN_INACTIVE;
} else {
list_del(&f->list);
kfree(f);
adapter->num_vlan_filters--;
}
i++;
if (i == count)
break;
@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
vlf->add = true;
vlf->state = IAVF_VLAN_ADD;
adapter->aq_required |=
IAVF_FLAG_AQ_ADD_VLAN_FILTER;
@ -2252,7 +2263,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
vlf->add = true;
vlf->state = IAVF_VLAN_ADD;
aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@ -2436,15 +2447,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->is_new_vlan) {
f->is_new_vlan = false;
if (f->vlan.tpid == ETH_P_8021Q)
set_bit(f->vlan.vid,
adapter->vsi.active_cvlans);
else
set_bit(f->vlan.vid,
adapter->vsi.active_svlans);
}
if (f->state == IAVF_VLAN_IS_NEW)
f->state = IAVF_VLAN_ACTIVE;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}

View File

@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
int i, err, ring;
if (dev->flags & QLCNIC_NEED_FLR) {
pci_reset_function(dev->pdev);
err = pci_reset_function(dev->pdev);
if (err) {
dev_err(&dev->pdev->dev,
"Adapter reset failed (%d). Please reboot\n",
err);
return err;
}
dev->flags &= ~QLCNIC_NEED_FLR;
}

View File

@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
err = niu_rbr_fill(np, rp, GFP_KERNEL);
if (err)
return err;
goto out_err;
}
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),

View File

@ -191,7 +191,7 @@
#define MAX_ID_PS 2260U
#define DEFAULT_ID_PS 2000U
#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK_ULL(31, 0) * (ppb) * \
PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
@ -1337,6 +1337,17 @@ static int nxp_c45_probe(struct phy_device *phydev)
return ret;
}
static void nxp_c45_remove(struct phy_device *phydev)
{
struct nxp_c45_phy *priv = phydev->priv;
if (priv->ptp_clock)
ptp_clock_unregister(priv->ptp_clock);
skb_queue_purge(&priv->tx_queue);
skb_queue_purge(&priv->rx_queue);
}
static struct phy_driver nxp_c45_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
.set_loopback = genphy_c45_loopback,
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
},
};

View File

@ -212,6 +212,12 @@ static const enum gpiod_flags gpio_flags[] = {
#define SFP_PHY_ADDR 22
#define SFP_PHY_ADDR_ROLLBALL 17
/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
* at a time. Some SFP modules and also some Linux I2C drivers do not like
* reads longer than 16 bytes.
*/
#define SFP_EEPROM_BLOCK_SIZE 16
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@ -1928,11 +1934,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
/* Some SFP modules and also some Linux I2C drivers do not like reads
* longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
* a time.
*/
sfp->i2c_block_size = 16;
sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@ -2615,6 +2617,7 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);

View File

@ -729,7 +729,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
rcu_read_lock();
do {
while (likely(!mvmtxq->stopped &&
while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
&mvmtxq->state) &&
!test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
&mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
@ -754,42 +757,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
/*
* Please note that racing is handled very carefully here:
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
* deleted afterwards.
* This means that if:
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
* queue is allocated and we can TX.
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
* a race, should defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
* need to allocate the queue and defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
* queue is already scheduled for allocation, no need to allocate,
* should defer the frame.
*/
/* If the queue is allocated TX and return. */
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
/*
* Check that list is empty to avoid a race where txq_id is
* already updated, but the queue allocation work wasn't
* finished
*/
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
return;
if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
!txq->sta) {
iwl_mvm_mac_itxq_xmit(hw, txq);
return;
}
/* The list is being deleted only after the queue is fully allocated. */
if (!list_empty(&mvmtxq->list))
return;
/* iwl_mvm_mac_itxq_xmit() will later be called by the worker
* to handle any packets we leave on the txq now
*/
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk);
spin_lock_bh(&mvm->add_stream_lock);
/* The list is being deleted only after the queue is fully allocated. */
if (list_empty(&mvmtxq->list) &&
/* recheck under lock */
!test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk);
}
spin_unlock_bh(&mvm->add_stream_lock);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \

View File

@ -729,7 +729,10 @@ struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
atomic_t tx_request;
bool stopped;
#define IWL_MVM_TXQ_STATE_STOP_FULL 0
#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
#define IWL_MVM_TXQ_STATE_READY 2
unsigned long state;
};
static inline struct iwl_mvm_txq *
@ -827,6 +830,7 @@ struct iwl_mvm {
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
};
struct work_struct add_stream_wk; /* To add streams to queues */
spinlock_t add_stream_lock;
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;

View File

@ -1184,6 +1184,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
init_waitqueue_head(&mvm->rx_sync_waitq);
@ -1680,7 +1681,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
txq = sta->txq[tid];
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
mvmtxq->stopped = !start;
if (start)
clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
else
set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);

View File

@ -383,8 +383,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_unlock_bh(&mvm->add_stream_lock);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
@ -478,8 +481,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_unlock_bh(&mvm->add_stream_lock);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@ -692,7 +698,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop the queue and wait for it to empty */
txq->stopped = true;
set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
@ -735,7 +741,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
out:
/* Continue using the queue */
txq->stopped = false;
clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
return ret;
}
@ -1443,12 +1449,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
* a queue in the function itself.
*/
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
spin_unlock_bh(&mvm->add_stream_lock);
continue;
}
list_del_init(&mvmtxq->list);
/* now we're ready, any remaining races/concurrency will be
* handled in iwl_mvm_mac_itxq_xmit()
*/
set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
local_bh_disable();
spin_lock(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
spin_unlock(&mvm->add_stream_lock);
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
@ -1862,8 +1878,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]);
spin_lock_bh(&mvm->add_stream_lock);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
spin_unlock_bh(&mvm->add_stream_lock);
}
}

View File

@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
.can_ext_scan = true,
};
static const struct of_device_id mwifiex_pcie_of_match_table[] = {
static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
{ .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" },
{ }

View File

@ -479,7 +479,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
{ .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8997" },

View File

@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
return ret;
goto set_mask_fail;
}
ipc_pcie_config_aspm(ipc_pcie);
@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
imem_init_fail:
ipc_pcie_resources_release(ipc_pcie);
resources_req_fail:
set_mask_fail:
pci_disable_device(pci);
pci_enable_fail:
kfree(ipc_pcie);

View File

@ -3074,7 +3074,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
else
ctrl->max_zeroes_sectors = 0;
if (nvme_ctrl_limited_cns(ctrl))
if (ctrl->subsys->subtype != NVME_NQN_NVME ||
nvme_ctrl_limited_cns(ctrl))
return 0;
id = kzalloc(sizeof(*id), GFP_KERNEL);

View File

@ -3551,6 +3551,9 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
.driver_data = NVME_QUIRK_BOGUS_NID |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },

View File

@ -865,34 +865,32 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
const struct pin_desc *pd;
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
u32 pin_reg, mask;
int i;
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
BIT(WAKE_CNTRL_OFF_S4);
pd = pin_desc_get(gpio_dev->pctrl, pin);
if (!pd)
return;
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + pin * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + pin * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
if (!pd)
continue;
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
int i;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
for (i = 0; i < desc->npins; i++)
amd_gpio_irq_init_pin(gpio_dev, i);
pin_reg = readl(gpio_dev->base + i * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
}
#ifdef CONFIG_PM_SLEEP
@ -945,10 +943,8 @@ static int amd_gpio_resume(struct device *dev)
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
if (!amd_gpio_should_save(gpio_dev, pin)) {
amd_gpio_irq_init_pin(gpio_dev, pin);
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
}
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;

View File

@ -724,6 +724,8 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
pirq = platform_get_irq(pdev, i);
if (pirq < 0)
continue;
ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq);
if (ret < 0)
return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq);

View File

@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_current_max = 0;
break;
default:
dev_err(dev, "Port %d: default case!\n", port->port_number);
dev_dbg(dev, "Port %d: default case!\n", port->port_number);
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}

View File

@ -785,8 +785,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
if (tmp < 0)
tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
charger->res_div) / 1000;
/*
@ -825,8 +823,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
if (tmp < 0)
tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
bulk_reg, 2);

View File

@ -509,9 +509,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
int i;
struct ses_component *scomp;
if (!edev->component[0].scratch)
return 0;
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
@ -602,8 +599,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
components++,
type_ptr[0],
name);
else
else if (components < edev->components)
ecomp = &edev->component[components++];
else
ecomp = ERR_PTR(-EINVAL);
if (!IS_ERR(ecomp)) {
if (addl_desc_ptr) {
@ -734,11 +733,6 @@ static int ses_intf_add(struct device *cdev,
components += type_ptr[1];
}
if (components == 0) {
sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
goto err_free;
}
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
@ -780,9 +774,11 @@ static int ses_intf_add(struct device *cdev,
buf = NULL;
}
page2_not_supported:
scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
if (!scomp)
goto err_free;
if (components > 0) {
scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
if (!scomp)
goto err_free;
}
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
components, &ses_enclosure_callbacks);

View File

@ -823,7 +823,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
int oldidx = con2fb_map[unit];
struct fb_info *info = fbcon_registered_fb[newidx];
struct fb_info *oldinfo = NULL;
int found, err = 0, show_logo;
int err = 0, show_logo;
WARN_CONSOLE_UNLOCKED();
@ -841,26 +841,26 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (oldidx != -1)
oldinfo = fbcon_registered_fb[oldidx];
found = search_fb_in_map(newidx);
if (!err && !found) {
if (!search_fb_in_map(newidx)) {
err = con2fb_acquire_newinfo(vc, info, unit);
if (!err)
con2fb_map[unit] = newidx;
if (err)
return err;
fbcon_add_cursor_work(info);
}
con2fb_map[unit] = newidx;
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
if (!err && oldinfo && !search_fb_in_map(oldidx))
if (oldinfo && !search_fb_in_map(oldidx))
con2fb_release_oldinfo(vc, oldinfo, info);
show_logo = (fg_console == 0 && !user &&
logo_shown != FBCON_LOGO_DONTSHOW);
if (!found)
fbcon_add_cursor_work(info);
con2fb_map_boot[unit] = newidx;
con2fb_init_display(vc, info, unit, show_logo);

View File

@ -1116,6 +1116,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
/* only for kernel-internal use */
var.activate &= ~FB_ACTIVATE_KD_TEXT;
console_lock();
lock_fb_info(info);
ret = fbcon_modechange_possible(info, &var);

View File

@ -2347,6 +2347,20 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
/*
* Check if the checksum implementation is a fast accelerated one.
* As-is this is a bit of a hack and should be replaced once the csum
* implementations provide that information themselves.
*/
switch (csum_type) {
case BTRFS_CSUM_TYPE_CRC32:
if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
break;
default:
break;
}
btrfs_info(fs_info, "using %s (%s) checksum algorithm",
btrfs_super_csum_name(csum_type),
crypto_shash_driver_name(csum_shash));

View File

@ -1824,8 +1824,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
s->s_id);
btrfs_sb(s)->bdev_holder = fs_type;
if (!strstr(crc32c_impl(), "generic"))
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
error = btrfs_fill_super(s, fs_devices, data);
}
if (!error)
@ -1939,6 +1937,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);

View File

@ -588,11 +588,15 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
}
/* If invalid preauth context warn but use what we requested, SHA-512 */
static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
{
unsigned int len = le16_to_cpu(ctxt->DataLength);
/* If invalid preauth context warn but use what we requested, SHA-512 */
/*
* Caller checked that DataLength remains within SMB boundary. We still
* need to confirm that one HashAlgorithms member is accounted for.
*/
if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
pr_warn_once("server sent bad preauth context\n");
return;
@ -611,7 +615,11 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
{
unsigned int len = le16_to_cpu(ctxt->DataLength);
/* sizeof compress context is a one element compression capbility struct */
/*
* Caller checked that DataLength remains within SMB boundary. We still
* need to confirm that one CompressionAlgorithms member is accounted
* for.
*/
if (len < 10) {
pr_warn_once("server sent bad compression cntxt\n");
return;
@ -633,6 +641,11 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
unsigned int len = le16_to_cpu(ctxt->DataLength);
cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
/*
* Caller checked that DataLength remains within SMB boundary. We still
* need to confirm that one Cipher flexible array member is accounted
* for.
*/
if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
pr_warn_once("server sent bad crypto ctxt len\n");
return -EINVAL;
@ -679,6 +692,11 @@ static void decode_signing_ctx(struct TCP_Server_Info *server,
{
unsigned int len = le16_to_cpu(pctxt->DataLength);
/*
* Caller checked that DataLength remains within SMB boundary. We still
* need to confirm that one SigningAlgorithms flexible array member is
* accounted for.
*/
if ((len < 4) || (len > 16)) {
pr_warn_once("server sent bad signing negcontext\n");
return;
@ -720,14 +738,19 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
for (i = 0; i < ctxt_cnt; i++) {
int clen;
/* check that offset is not beyond end of SMB */
if (len_of_ctxts == 0)
break;
if (len_of_ctxts < sizeof(struct smb2_neg_context))
break;
pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
clen = le16_to_cpu(pctx->DataLength);
clen = sizeof(struct smb2_neg_context)
+ le16_to_cpu(pctx->DataLength);
/*
* 2.2.4 SMB2 NEGOTIATE Response
* Subsequent negotiate contexts MUST appear at the first 8-byte
* aligned offset following the previous negotiate context.
*/
if (i + 1 != ctxt_cnt)
clen = ALIGN(clen, 8);
if (clen > len_of_ctxts)
break;
@ -748,12 +771,10 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
else
cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
le16_to_cpu(pctx->ContextType));
if (rc)
break;
/* offsets must be 8 byte aligned */
clen = ALIGN(clen, 8);
offset += clen + sizeof(struct smb2_neg_context);
offset += clen;
len_of_ctxts -= clen;
}
return rc;

View File

@ -872,17 +872,21 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
}
static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
struct smb2_preauth_neg_context *pneg_ctxt)
struct smb2_preauth_neg_context *pneg_ctxt,
int len_of_ctxts)
{
__le32 err = STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
/*
* sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
* which may not be present. Only check for used HashAlgorithms[1].
*/
if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
return STATUS_INVALID_PARAMETER;
if (pneg_ctxt->HashAlgorithms == SMB2_PREAUTH_INTEGRITY_SHA512) {
conn->preauth_info->Preauth_HashId =
SMB2_PREAUTH_INTEGRITY_SHA512;
err = STATUS_SUCCESS;
}
if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
return err;
conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512;
return STATUS_SUCCESS;
}
static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
@ -1010,7 +1014,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
break;
status = decode_preauth_ctxt(conn,
(struct smb2_preauth_neg_context *)pctx);
(struct smb2_preauth_neg_context *)pctx,
len_of_ctxts);
if (status != STATUS_SUCCESS)
break;
} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {

View File

@ -33,6 +33,18 @@ struct trace_array;
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
/**
* trace_array_puts - write a constant string into the trace buffer.
* @tr: The trace array to write to
* @str: The constant string to write
*/
#define trace_array_puts(tr, str) \
({ \
str ? __trace_array_puts(tr, _THIS_IP_, str, strlen(str)) : -1; \
})
int __trace_array_puts(struct trace_array *tr, unsigned long ip,
const char *str, int size);
void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,

View File

@ -953,6 +953,7 @@ enum {
HCI_CONN_STK_ENCRYPT,
HCI_CONN_AUTH_INITIATOR,
HCI_CONN_DROP,
HCI_CONN_CANCEL,
HCI_CONN_PARAM_REMOVAL_PEND,
HCI_CONN_NEW_LINK_KEY,
HCI_CONN_SCANNING,

View File

@ -765,13 +765,17 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
#if IS_ENABLED(CONFIG_IPV6)
static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
{
struct in6_addr mcaddr;
int i;
for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
if (ipv6_addr_equal(&targets[i], ip))
for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
addrconf_addr_solict_mult(&targets[i], &mcaddr);
if ((ipv6_addr_equal(&targets[i], ip)) ||
(ipv6_addr_equal(&mcaddr, ip)))
return i;
else if (ipv6_addr_any(&targets[i]))
break;
}
return -1;
}

View File

@ -540,11 +540,15 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_rwsem held. The check can be skipped
* if on default hierarchy.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
static void cpuset_update_task_spread_flags(struct cpuset *cs,
struct task_struct *tsk)
{
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
return;
if (is_spread_page(cs))
task_set_spread_page(tsk);
else
@ -1507,7 +1511,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
spin_unlock_irq(&callback_lock);
if (adding || deleting)
update_tasks_cpumask(parent, tmp->new_cpus);
update_tasks_cpumask(parent, tmp->addmask);
/*
* Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
@ -1765,10 +1769,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
/*
* Use the cpumasks in trialcs for tmpmasks when they are pointers
* to allocated cpumasks.
*
* Note that update_parent_subparts_cpumask() uses only addmask &
* delmask, but not new_cpus.
*/
tmp.addmask = trialcs->subparts_cpus;
tmp.delmask = trialcs->effective_cpus;
tmp.new_cpus = trialcs->cpus_allowed;
tmp.new_cpus = NULL;
#endif
retval = validate_change(cs, trialcs);
@ -1834,6 +1841,11 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
}
spin_unlock_irq(&callback_lock);
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Now trialcs->cpus_allowed is available */
tmp.new_cpus = trialcs->cpus_allowed;
#endif
/* effective_cpus will be updated here */
update_cpumasks_hier(cs, &tmp, false);
@ -2157,7 +2169,7 @@ static void update_tasks_flags(struct cpuset *cs)
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
cpuset_update_task_spread_flag(cs, task);
cpuset_update_task_spread_flags(cs, task);
css_task_iter_end(&it);
}
@ -2441,6 +2453,20 @@ static int fmeter_getrate(struct fmeter *fmp)
static struct cpuset *cpuset_attach_old_cs;
/*
* Check to see if a cpuset can accept a new task
* For v1, cpus_allowed and mems_allowed can't be empty.
* For v2, effective_cpus can't be empty.
* Note that in v1, effective_cpus = cpus_allowed.
*/
static int cpuset_can_attach_check(struct cpuset *cs)
{
if (cpumask_empty(cs->effective_cpus) ||
(!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
return -ENOSPC;
return 0;
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
@ -2455,16 +2481,9 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
percpu_down_write(&cpuset_rwsem);
/* allow moving tasks into an empty cpuset if on default hierarchy */
ret = -ENOSPC;
if (!is_in_v2_mode() &&
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock;
/*
* Task cannot be moved to a cpuset with empty effective cpus.
*/
if (cpumask_empty(cs->effective_cpus))
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
cgroup_taskset_for_each(task, css, tset) {
@ -2481,7 +2500,6 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
ret = 0;
out_unlock:
percpu_up_write(&cpuset_rwsem);
return ret;
@ -2490,25 +2508,46 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
percpu_down_write(&cpuset_rwsem);
css_cs(css)->attach_in_progress--;
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
}
/*
* Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
* Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
static cpumask_var_t cpus_attach;
static nodemask_t cpuset_attach_nodemask_to;
static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
{
percpu_rwsem_assert_held(&cpuset_rwsem);
if (cs != &top_cpuset)
guarantee_online_cpus(task, cpus_attach);
else
cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flags(cs, task);
}
static void cpuset_attach(struct cgroup_taskset *tset)
{
/* static buf protected by cpuset_rwsem */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
struct task_struct *leader;
struct cgroup_subsys_state *css;
@ -2523,20 +2562,8 @@ static void cpuset_attach(struct cgroup_taskset *tset)
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cgroup_taskset_for_each(task, css, tset) {
if (cs != &top_cpuset)
guarantee_online_cpus(task, cpus_attach);
else
cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flag(cs, task);
}
cgroup_taskset_for_each(task, css, tset)
cpuset_attach_task(cs, task);
/*
* Change mm for all threadgroup leaders. This is expensive and may
@ -3218,6 +3245,68 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
percpu_up_write(&cpuset_rwsem);
}
/*
* In case the child is cloned into a cpuset different from its parent,
* additional checks are done to see if the move is allowed.
*/
static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
{
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
bool same_cs;
int ret;
rcu_read_lock();
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs)
return 0;
lockdep_assert_held(&cgroup_mutex);
percpu_down_write(&cpuset_rwsem);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
ret = task_can_attach(task, cs->effective_cpus);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
out_unlock:
percpu_up_write(&cpuset_rwsem);
return ret;
}
static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
{
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
bool same_cs;
rcu_read_lock();
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs)
return;
percpu_down_write(&cpuset_rwsem);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
}
/*
* Make sure the new task conform to the current state of its parent,
* which could have been changed by cpuset just after it inherits the
@ -3225,11 +3314,33 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
*/
static void cpuset_fork(struct task_struct *task)
{
if (task_css_is_root(task, cpuset_cgrp_id))
return;
struct cpuset *cs;
bool same_cs;
set_cpus_allowed_ptr(task, current->cpus_ptr);
task->mems_allowed = current->mems_allowed;
rcu_read_lock();
cs = task_cs(task);
same_cs = (cs == task_cs(current));
rcu_read_unlock();
if (same_cs) {
if (cs == &top_cpuset)
return;
set_cpus_allowed_ptr(task, current->cpus_ptr);
task->mems_allowed = current->mems_allowed;
return;
}
/* CLONE_INTO_CGROUP */
percpu_down_write(&cpuset_rwsem);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cpuset_attach_task(cs, task);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
}
struct cgroup_subsys cpuset_cgrp_subsys = {
@ -3242,6 +3353,8 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.attach = cpuset_attach,
.post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.can_fork = cpuset_can_fork,
.cancel_fork = cpuset_cancel_fork,
.fork = cpuset_fork,
.legacy_cftypes = legacy_files,
.dfl_cftypes = dfl_files,

View File

@ -22,6 +22,7 @@
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
/*
* A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
@ -350,7 +351,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
if (freeze) {
if (!(freezer->state & CGROUP_FREEZING))
static_branch_inc(&freezer_active);
static_branch_inc_cpuslocked(&freezer_active);
freezer->state |= state;
freeze_cgroup(freezer);
} else {
@ -361,7 +362,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
if (!(freezer->state & CGROUP_FREEZING)) {
freezer->state &= ~CGROUP_FROZEN;
if (was_freezing)
static_branch_dec(&freezer_active);
static_branch_dec_cpuslocked(&freezer_active);
unfreeze_cgroup(freezer);
}
}
@ -379,6 +380,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
{
struct cgroup_subsys_state *pos;
cpus_read_lock();
/*
* Update all its descendants in pre-order traversal. Each
* descendant will try to inherit its parent's FREEZING state as
@ -407,6 +409,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
}
rcu_read_unlock();
mutex_unlock(&freezer_mutex);
cpus_read_unlock();
}
static ssize_t freezer_write(struct kernfs_open_file *of,

View File

@ -457,9 +457,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
struct task_cputime *cputime = &bstat->cputime;
int i;
cputime->stime = 0;
cputime->utime = 0;
cputime->sum_exec_runtime = 0;
memset(bstat, 0, sizeof(*bstat));
for_each_possible_cpu(i) {
struct kernel_cpustat kcpustat;
u64 *cpustat = kcpustat.cpustat;

View File

@ -10133,6 +10133,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
sds->total_capacity;
/*
* If the local group is more loaded than the average system
* load, don't try to pull any tasks.
*/
if (local->avg_load >= sds->avg_load) {
env->imbalance = 0;
return;
}
}
/*

Some files were not shown because too many files have changed in this diff Show More