This is the 5.10.71 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmFdqxMACgkQONu9yGCS
 aT4n8BAAt6WBtGY6OmnqqVDriJQxYPmF5oL+rpREdBRks97sinOCI4sAQS6NRb1T
 J8GUzwv1A2KbDOW+iky+XUhYV6wF6RFaiUnYbEAz0hbg+FEbJYBLcO98naJpReTr
 GnyjVEyMQ/NO/xDuJlguI3+6UHl6LPXmqoYR2XD77cwQiXEZW588VtbhtYoK4M8k
 r/Fh0bIbhS5CkWF7TYnzUD3ceSwHWq7N4yGK86s+yrkaeMJ0BsKeisOe4PW5JI3f
 iiqB4FJMbnNe412SdmYoPKfDcNWQbirJ4UnS1hdVslZMCyPktMiI2sRiVr1Euz45
 zh221ObMIqyFK4attV809C2dtyqdI2Zt3maMCwtJWgOJOrpdeUpjyQ91cZ0WJcW0
 2d0ZW0AqpkMpERFsHtcZNtkCBzLNcIgPu+yYJRlimG/Sh95VQWtMbtFsS0W5ZI5D
 F+2PC8cluXwGFLgHvxfkpas/KXVhv2w3m9x0xEgaWxZis31lKzQ4vRVzLewNqhJ9
 C5S7Qb6qEVjRzY9CzT07AV66+faai2RZp1UtC0Lf+mbh4nW4JN0jDc2uxggZWGMb
 inTxl9LfIFFK0apCt6xvuEDPYvMwySKumeNJK3VMP2F3Py/PuZ4SW5Z/OH09+0/S
 liA2dMFBOp8h/AivWQ7qV7B/qGcpasn5ZRabIkLYiaF6zftpUmo=
 =ZCXg
 -----END PGP SIGNATURE-----

Merge 5.10.71 into android12-5.10-lts

Changes in 5.10.71
	tty: Fix out-of-bound vmalloc access in imageblit
	cpufreq: schedutil: Use kobject release() method to free sugov_tunables
	scsi: qla2xxx: Changes to support kdump kernel for NVMe BFS
	cpufreq: schedutil: Destroy mutex before kobject_put() frees the memory
	usb: cdns3: fix race condition before setting doorbell
	ALSA: hda/realtek: Quirks to enable speaker output for Lenovo Legion 7i 15IMHG05, Yoga 7i 14ITL5/15ITL5, and 13s Gen2 laptops.
	ACPI: NFIT: Use fallback node id when numa info in NFIT table is incorrect
	fs-verity: fix signed integer overflow with i_size near S64_MAX
	hwmon: (tmp421) handle I2C errors
	hwmon: (w83793) Fix NULL pointer dereference by removing unnecessary structure field
	hwmon: (w83792d) Fix NULL pointer dereference by removing unnecessary structure field
	hwmon: (w83791d) Fix NULL pointer dereference by removing unnecessary structure field
	gpio: pca953x: do not ignore i2c errors
	scsi: ufs: Fix illegal offset in UPIU event trace
	mac80211: fix use-after-free in CCMP/GCMP RX
	x86/kvmclock: Move this_cpu_pvti into kvmclock.h
	KVM: x86: Fix stack-out-of-bounds memory access from ioapic_write_indirect()
	KVM: x86: nSVM: don't copy virt_ext from vmcb12
	KVM: nVMX: Filter out all unsupported controls when eVMCS was activated
	KVM: rseq: Update rseq when processing NOTIFY_RESUME on xfer to KVM guest
	media: ir_toy: prevent device from hanging during transmit
	RDMA/cma: Do not change route.addr.src_addr.ss_family
	drm/amd/display: Pass PCI deviceid into DC
	drm/amdgpu: correct initial cp_hqd_quantum for gfx9
	ipvs: check that ip_vs_conn_tab_bits is between 8 and 20
	bpf: Handle return value of BPF_PROG_TYPE_STRUCT_OPS prog
	IB/cma: Do not send IGMP leaves for sendonly Multicast groups
	RDMA/cma: Fix listener leak in rdma_cma_listen_on_all() failure
	bpf, mips: Validate conditional branch offsets
	hwmon: (mlxreg-fan) Return non-zero value when fan current state is enforced from sysfs
	mac80211: Fix ieee80211_amsdu_aggregate frag_tail bug
	mac80211: limit injected vht mcs/nss in ieee80211_parse_tx_radiotap
	mac80211: mesh: fix potentially unaligned access
	mac80211-hwsim: fix late beacon hrtimer handling
	sctp: break out if skb_header_pointer returns NULL in sctp_rcv_ootb
	mptcp: don't return sockets in foreign netns
	hwmon: (tmp421) report /PVLD condition as fault
	hwmon: (tmp421) fix rounding for negative values
	net: enetc: fix the incorrect clearing of IF_MODE bits
	net: ipv4: Fix rtnexthop len when RTA_FLOW is present
	smsc95xx: fix stalled rx after link change
	drm/i915/request: fix early tracepoints
	dsa: mv88e6xxx: 6161: Use chip wide MAX MTU
	dsa: mv88e6xxx: Fix MTU definition
	dsa: mv88e6xxx: Include tagger overhead when setting MTU for DSA and CPU ports
	e100: fix length calculation in e100_get_regs_len
	e100: fix buffer overrun in e100_get_regs
	RDMA/hns: Fix inaccurate prints
	bpf: Exempt CAP_BPF from checks against bpf_jit_limit
	selftests, bpf: Fix makefile dependencies on libbpf
	selftests, bpf: test_lwt_ip_encap: Really disable rp_filter
	net: ks8851: fix link error
	Revert "block, bfq: honor already-setup queue merges"
	scsi: csiostor: Add module softdep on cxgb4
	ixgbe: Fix NULL pointer dereference in ixgbe_xdp_setup
	net: hns3: do not allow call hns3_nic_net_open repeatedly
	net: hns3: keep MAC pause mode when multiple TCs are enabled
	net: hns3: fix mixed flag HCLGE_FLAG_MQPRIO_ENABLE and HCLGE_FLAG_DCB_ENABLE
	net: hns3: fix show wrong state when add existing uc mac address
	net: hns3: fix prototype warning
	net: hns3: reconstruct function hns3_self_test
	net: hns3: fix always enable rx vlan filter problem after selftest
	net: phy: bcm7xxx: Fixed indirect MMD operations
	net: sched: flower: protect fl_walk() with rcu
	af_unix: fix races in sk_peer_pid and sk_peer_cred accesses
	perf/x86/intel: Update event constraints for ICX
	hwmon: (pmbus/mp2975) Add missed POUT attribute for page 1 mp2975 controller
	nvme: add command id quirk for apple controllers
	elf: don't use MAP_FIXED_NOREPLACE for elf interpreter mappings
	debugfs: debugfs_create_file_size(): use IS_ERR to check for error
	ipack: ipoctal: fix stack information leak
	ipack: ipoctal: fix tty registration race
	ipack: ipoctal: fix tty-registration error handling
	ipack: ipoctal: fix missing allocation-failure check
	ipack: ipoctal: fix module reference leak
	ext4: fix loff_t overflow in ext4_max_bitmap_size()
	ext4: limit the number of blocks in one ADD_RANGE TLV
	ext4: fix reserved space counter leakage
	ext4: add error checking to ext4_ext_replay_set_iblocks()
	ext4: fix potential infinite loop in ext4_dx_readdir()
	HID: u2fzero: ignore incomplete packets without data
	net: udp: annotate data race around udp_sk(sk)->corkflag
	ASoC: dapm: use component prefix when checking widget names
	usb: hso: remove the bailout parameter
	crypto: ccp - fix resource leaks in ccp_run_aes_gcm_cmd()
	HID: betop: fix slab-out-of-bounds Write in betop_probe
	netfilter: ipset: Fix oversized kvmalloc() calls
	mm: don't allow oversized kvmalloc() calls
	HID: usbhid: free raw_report buffers in usbhid_stop
	KVM: x86: Handle SRCU initialization failure during page track init
	netfilter: conntrack: serialize hash resizes and cleanups
	netfilter: nf_tables: Fix oversized kvmalloc() calls
	Linux 5.10.71

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I238c3de739c3d4ba0a04a484460356161899f222
This commit is contained in:
Greg Kroah-Hartman 2021-10-06 17:33:06 +02:00
commit c23269dad5
114 changed files with 1204 additions and 564 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 70 SUBLEVEL = 71
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
func##_positive) func##_positive)
static bool is_bad_offset(int b_off)
{
return b_off > 0x1ffff || b_off < -0x20000;
}
static int build_body(struct jit_ctx *ctx) static int build_body(struct jit_ctx *ctx)
{ {
const struct bpf_prog *prog = ctx->skf; const struct bpf_prog *prog = ctx->skf;
@ -728,7 +733,10 @@ static int build_body(struct jit_ctx *ctx)
/* Load return register on DS for failures */ /* Load return register on DS for failures */
emit_reg_move(r_ret, r_zero, ctx); emit_reg_move(r_ret, r_zero, ctx);
/* Return with error */ /* Return with error */
emit_b(b_imm(prog->len, ctx), ctx); b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx); emit_nop(ctx);
break; break;
case BPF_LD | BPF_W | BPF_IND: case BPF_LD | BPF_W | BPF_IND:
@ -775,8 +783,10 @@ static int build_body(struct jit_ctx *ctx)
emit_jalr(MIPS_R_RA, r_s0, ctx); emit_jalr(MIPS_R_RA, r_s0, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
/* Check the error value */ /* Check the error value */
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx); emit_reg_move(r_ret, r_zero, ctx);
/* We are good */ /* We are good */
/* X <- P[1:K] & 0xf */ /* X <- P[1:K] & 0xf */
@ -855,8 +865,10 @@ static int build_body(struct jit_ctx *ctx)
/* A /= X */ /* A /= X */
ctx->flags |= SEEN_X | SEEN_A; ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */ /* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */ emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_div(r_A, r_X, ctx); emit_div(r_A, r_X, ctx);
break; break;
@ -864,8 +876,10 @@ static int build_body(struct jit_ctx *ctx)
/* A %= X */ /* A %= X */
ctx->flags |= SEEN_X | SEEN_A; ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */ /* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */ emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_mod(r_A, r_X, ctx); emit_mod(r_A, r_X, ctx);
break; break;
@ -926,7 +940,10 @@ static int build_body(struct jit_ctx *ctx)
break; break;
case BPF_JMP | BPF_JA: case BPF_JMP | BPF_JA:
/* pc += K */ /* pc += K */
emit_b(b_imm(i + k + 1, ctx), ctx); b_off = b_imm(i + k + 1, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx); emit_nop(ctx);
break; break;
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
@ -1056,12 +1073,16 @@ static int build_body(struct jit_ctx *ctx)
break; break;
case BPF_RET | BPF_A: case BPF_RET | BPF_A:
ctx->flags |= SEEN_A; ctx->flags |= SEEN_A;
if (i != prog->len - 1) if (i != prog->len - 1) {
/* /*
* If this is not the last instruction * If this is not the last instruction
* then jump to the epilogue * then jump to the epilogue
*/ */
emit_b(b_imm(prog->len, ctx), ctx); b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
}
emit_reg_move(r_ret, r_A, ctx); /* delay slot */ emit_reg_move(r_ret, r_A, ctx); /* delay slot */
break; break;
case BPF_RET | BPF_K: case BPF_RET | BPF_K:
@ -1075,7 +1096,10 @@ static int build_body(struct jit_ctx *ctx)
* If this is not the last instruction * If this is not the last instruction
* then jump to the epilogue * then jump to the epilogue
*/ */
emit_b(b_imm(prog->len, ctx), ctx); b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx); emit_nop(ctx);
} }
break; break;
@ -1133,8 +1157,10 @@ static int build_body(struct jit_ctx *ctx)
/* Load *dev pointer */ /* Load *dev pointer */
emit_load_ptr(r_s0, r_skb, off, ctx); emit_load_ptr(r_s0, r_skb, off, ctx);
/* error (0) in the delay slot */ /* error (0) in the delay slot */
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx); emit_reg_move(r_ret, r_zero, ctx);
if (code == (BPF_ANC | SKF_AD_IFINDEX)) { if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* Generate the actual JIT code */ /* Generate the actual JIT code */
build_prologue(&ctx); build_prologue(&ctx);
build_body(&ctx); if (build_body(&ctx)) {
module_memfree(ctx.target);
goto out;
}
build_epilogue(&ctx); build_epilogue(&ctx);
/* Update the icache */ /* Update the icache */

View File

@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
INTEL_EVENT_CONSTRAINT(0xef, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };

View File

@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node); struct kvm_page_track_notifier_node *node);
}; };
void kvm_page_track_init(struct kvm *kvm); int kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);

View File

@ -2,6 +2,20 @@
#ifndef _ASM_X86_KVM_CLOCK_H #ifndef _ASM_X86_KVM_CLOCK_H
#define _ASM_X86_KVM_CLOCK_H #define _ASM_X86_KVM_CLOCK_H
#include <linux/percpu.h>
extern struct clocksource kvm_clock; extern struct clocksource kvm_clock;
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}
#endif /* _ASM_X86_KVM_CLOCK_H */ #endif /* _ASM_X86_KVM_CLOCK_H */

View File

@ -50,18 +50,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
static struct pvclock_vsyscall_time_info static struct pvclock_vsyscall_time_info
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static struct pvclock_wall_clock wall_clock __bss_decrypted; static struct pvclock_wall_clock wall_clock __bss_decrypted;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static struct pvclock_vsyscall_time_info *hvclock_mem; static struct pvclock_vsyscall_time_info *hvclock_mem;
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}
/* /*
* The wallclock is the time of day when we booted. Since then, some time may * The wallclock is the time of day when we booted. Since then, some time may

View File

@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
unsigned index; unsigned index;
bool mask_before, mask_after; bool mask_before, mask_after;
union kvm_ioapic_redirect_entry *e; union kvm_ioapic_redirect_entry *e;
unsigned long vcpu_bitmap;
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode; int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
switch (ioapic->ioregsel) { switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION: case IOAPIC_REG_VERSION:
@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
irq.shorthand = APIC_DEST_NOSHORT; irq.shorthand = APIC_DEST_NOSHORT;
irq.dest_id = e->fields.dest_id; irq.dest_id = e->fields.dest_id;
irq.msi_redir_hint = false; irq.msi_redir_hint = false;
bitmap_zero(&vcpu_bitmap, 16); bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap); vcpu_bitmap);
if (old_dest_mode != e->fields.dest_mode || if (old_dest_mode != e->fields.dest_mode ||
old_dest_id != e->fields.dest_id) { old_dest_id != e->fields.dest_id) {
/* /*
@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
kvm_lapic_irq_dest_mode( kvm_lapic_irq_dest_mode(
!!e->fields.dest_mode); !!e->fields.dest_mode);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap); vcpu_bitmap);
} }
kvm_make_scan_ioapic_request_mask(ioapic->kvm, kvm_make_scan_ioapic_request_mask(ioapic->kvm,
&vcpu_bitmap); vcpu_bitmap);
} else { } else {
kvm_make_scan_ioapic_request(ioapic->kvm); kvm_make_scan_ioapic_request(ioapic->kvm);
} }

View File

@ -163,13 +163,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
cleanup_srcu_struct(&head->track_srcu); cleanup_srcu_struct(&head->track_srcu);
} }
void kvm_page_track_init(struct kvm *kvm) int kvm_page_track_init(struct kvm *kvm)
{ {
struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head; head = &kvm->arch.track_notifier_head;
init_srcu_struct(&head->track_srcu);
INIT_HLIST_HEAD(&head->track_notifier_list); INIT_HLIST_HEAD(&head->track_notifier_list);
return init_srcu_struct(&head->track_srcu);
} }
/* /*

View File

@ -447,7 +447,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
(svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits); (svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits);
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
svm->vmcb->control.int_state = svm->nested.ctl.int_state; svm->vmcb->control.int_state = svm->nested.ctl.int_state;
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;

View File

@ -352,14 +352,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
switch (msr_index) { switch (msr_index) {
case MSR_IA32_VMX_EXIT_CTLS: case MSR_IA32_VMX_EXIT_CTLS:
case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS:
ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
break; break;
case MSR_IA32_VMX_ENTRY_CTLS: case MSR_IA32_VMX_ENTRY_CTLS:
case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
break; break;
case MSR_IA32_VMX_PROCBASED_CTLS2: case MSR_IA32_VMX_PROCBASED_CTLS2:
ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
break;
case MSR_IA32_VMX_PINBASED_CTLS:
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
break;
case MSR_IA32_VMX_VMFUNC:
ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
break; break;
} }

View File

@ -1867,10 +1867,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&msr_info->data)) &msr_info->data))
return 1; return 1;
/* /*
* Enlightened VMCS v1 doesn't have certain fields, but buggy * Enlightened VMCS v1 doesn't have certain VMCS fields but
* Hyper-V versions are still trying to use corresponding * instead of just ignoring the features, different Hyper-V
* features when they are exposed. Filter out the essential * versions are either trying to use them and fail or do some
* minimum. * sanity checking and refuse to boot. Filter all unsupported
* features out.
*/ */
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
vmx->nested.enlightened_vmcs_enabled) vmx->nested.enlightened_vmcs_enabled)

View File

@ -10392,9 +10392,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int ret;
if (type) if (type)
return -EINVAL; return -EINVAL;
ret = kvm_page_track_init(kvm);
if (ret)
return ret;
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@ -10421,7 +10427,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm_hv_init_vm(kvm); kvm_hv_init_vm(kvm);
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm); kvm_mmu_init_vm(kvm);
return kvm_x86_ops.vm_init(kvm); return kvm_x86_ops.vm_init(kvm);

View File

@ -1544,7 +1544,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
} }
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_prog *p, int stack_size, bool mod_ret) struct bpf_prog *p, int stack_size, bool save_ret)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = 0; int cnt = 0;
@ -1570,11 +1570,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (emit_call(&prog, p->bpf_func, prog)) if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL; return -EINVAL;
/* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return /*
* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
* of the previous call which is then passed on the stack to * of the previous call which is then passed on the stack to
* the next BPF program. * the next BPF program.
*
* BPF_TRAMP_FENTRY trampoline may need to return the return
* value of BPF_PROG_TYPE_STRUCT_OPS prog.
*/ */
if (mod_ret) if (save_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
if (p->aux->sleepable) { if (p->aux->sleepable) {
@ -1642,13 +1646,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
} }
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_progs *tp, int stack_size) struct bpf_tramp_progs *tp, int stack_size,
bool save_ret)
{ {
int i; int i;
u8 *prog = *pprog; u8 *prog = *pprog;
for (i = 0; i < tp->nr_progs; i++) { for (i = 0; i < tp->nr_progs; i++) {
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
save_ret))
return -EINVAL; return -EINVAL;
} }
*pprog = prog; *pprog = prog;
@ -1691,6 +1697,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
return 0; return 0;
} }
static bool is_valid_bpf_tramp_flags(unsigned int flags)
{
if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
(flags & BPF_TRAMP_F_SKIP_FRAME))
return false;
/*
* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
* and it must be used alone.
*/
if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
(flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
return false;
return true;
}
/* Example: /* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2 * its 'struct btf_func_model' will be nr_args=2
@ -1763,17 +1786,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
u8 **branches = NULL; u8 **branches = NULL;
u8 *prog; u8 *prog;
bool save_ret;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */ /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
if (nr_args > 6) if (nr_args > 6)
return -ENOTSUPP; return -ENOTSUPP;
if ((flags & BPF_TRAMP_F_RESTORE_REGS) && if (!is_valid_bpf_tramp_flags(flags))
(flags & BPF_TRAMP_F_SKIP_FRAME))
return -EINVAL; return -EINVAL;
if (flags & BPF_TRAMP_F_CALL_ORIG) /* room for return value of orig_call or fentry prog */
stack_size += 8; /* room for return value of orig_call */ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret)
stack_size += 8;
if (flags & BPF_TRAMP_F_SKIP_FRAME) if (flags & BPF_TRAMP_F_SKIP_FRAME)
/* skip patched call instruction and point orig_call to actual /* skip patched call instruction and point orig_call to actual
@ -1800,7 +1825,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (fentry->nr_progs) if (fentry->nr_progs)
if (invoke_bpf(m, &prog, fentry, stack_size)) if (invoke_bpf(m, &prog, fentry, stack_size,
flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL; return -EINVAL;
if (fmod_ret->nr_progs) { if (fmod_ret->nr_progs) {
@ -1847,7 +1873,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (fexit->nr_progs) if (fexit->nr_progs)
if (invoke_bpf(m, &prog, fexit, stack_size)) { if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
@ -1867,9 +1893,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
/* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
} }
/* restore return value of orig_call or fentry prog back into RAX */
if (save_ret)
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
EMIT1(0x5B); /* pop rbx */ EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */ EMIT1(0xC9); /* leave */

View File

@ -2526,15 +2526,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* are likely to increase the throughput. * are likely to increase the throughput.
*/ */
bfqq->new_bfqq = new_bfqq; bfqq->new_bfqq = new_bfqq;
/*
* The above assignment schedules the following redirections:
* each time some I/O for bfqq arrives, the process that
* generated that I/O is disassociated from bfqq and
* associated with new_bfqq. Here we increases new_bfqq->ref
* in advance, adding the number of processes that are
* expected to be associated with new_bfqq as they happen to
* issue I/O.
*/
new_bfqq->ref += process_refs; new_bfqq->ref += process_refs;
return new_bfqq; return new_bfqq;
} }
@ -2594,10 +2585,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{ {
struct bfq_queue *in_service_bfqq, *new_bfqq; struct bfq_queue *in_service_bfqq, *new_bfqq;
/* if a merge has already been setup, then proceed with that first */
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
/* /*
* Do not perform queue merging if the device is non * Do not perform queue merging if the device is non
* rotational and performs internal queueing. In fact, such a * rotational and performs internal queueing. In fact, such a
@ -2652,6 +2639,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfq_too_late_for_merging(bfqq)) if (bfq_too_late_for_merging(bfqq))
return NULL; return NULL;
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL; return NULL;

View File

@ -3017,6 +3017,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc->target_node = NUMA_NO_NODE; ndr_desc->target_node = NUMA_NO_NODE;
} }
/* Fallback to address based numa information if node lookup failed */
if (ndr_desc->numa_node == NUMA_NO_NODE) {
ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
}
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
}
/* /*
* Persistence domain bits are hierarchical, if * Persistence domain bits are hierarchical, if
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then

View File

@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
if (count) if (count)
return count; return count;
kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock); mutex_destroy(&attr_set->update_lock);
kobject_put(&attr_set->kobj);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(gov_attr_set_put); EXPORT_SYMBOL_GPL(gov_attr_set_put);

View File

@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
in_place ? DMA_BIDIRECTIONAL in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE); : DMA_TO_DEVICE);
if (ret) if (ret)
goto e_ctx; goto e_aad;
if (in_place) { if (in_place) {
dst = src; dst = src;
@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.aes.size = 0; op.u.aes.size = 0;
ret = cmd_q->ccp->vdata->perform->aes(&op); ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) if (ret)
goto e_dst; goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) { if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */ /* Put the ciphered tag after the ciphertext. */
@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (ret) if (ret)
goto e_tag; goto e_final_wa;
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
if (ret) if (ret) {
goto e_tag; ccp_dm_free(&tag);
goto e_final_wa;
}
ret = crypto_memneq(tag.address, final_wa.address, ret = crypto_memneq(tag.address, final_wa.address,
authsize) ? -EBADMSG : 0; authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag); ccp_dm_free(&tag);
} }
e_tag: e_final_wa:
ccp_dm_free(&final_wa); ccp_dm_free(&final_wa);
e_dst: e_dst:

View File

@ -467,15 +467,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
mutex_lock(&chip->i2c_lock); mutex_lock(&chip->i2c_lock);
ret = regmap_read(chip->regmap, inreg, &reg_val); ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock); mutex_unlock(&chip->i2c_lock);
if (ret < 0) { if (ret < 0)
/* return ret;
* NOTE:
* diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
* from their nonsleeping siblings (and report faults).
*/
return 0;
}
return !!(reg_val & bit); return !!(reg_val & bit);
} }

View File

@ -3542,7 +3542,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
/* set static priority for a queue/ring */ /* set static priority for a queue/ring */
gfx_v9_0_mqd_set_priority(ring, mqd); gfx_v9_0_mqd_set_priority(ring, mqd);
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue, /* map_queues packet doesn't need activate the queue,
* so only kiq need set this field. * so only kiq need set this field.

View File

@ -951,6 +951,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id; init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.chip_id = adev->pdev->device;
init_data.asic_id.vram_width = adev->gmc.vram_width; init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */ /* TODO: initialize init_data.asic_id.vram_type here!!!! */

View File

@ -776,8 +776,6 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify); i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify); i915_sw_fence_init(&rq->semaphore, semaphore_notify);
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
rq->capture_list = NULL; rq->capture_list = NULL;
init_llist_head(&rq->execute_cb); init_llist_head(&rq->execute_cb);
@ -840,17 +838,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->ring = ce->ring; rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask; rq->execution_mask = ce->engine->mask;
kref_init(&rq->fence.refcount);
rq->fence.flags = 0;
rq->fence.error = 0;
INIT_LIST_HEAD(&rq->fence.cb_list);
ret = intel_timeline_get_seqno(tl, rq, &seqno); ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret) if (ret)
goto err_free; goto err_free;
rq->fence.context = tl->fence_context; dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
rq->fence.seqno = seqno; tl->fence_context, seqno);
RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);

View File

@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
{ {
struct betopff_device *betopff; struct betopff_device *betopff;
struct hid_report *report; struct hid_report *report;
struct hid_input *hidinput = struct hid_input *hidinput;
list_first_entry(&hid->inputs, struct hid_input, list);
struct list_head *report_list = struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list; &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input; struct input_dev *dev;
int field_count = 0; int field_count = 0;
int error; int error;
int i, j; int i, j;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) { if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n"); hid_err(hid, "no output reports found\n");
return -ENODEV; return -ENODEV;

View File

@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
} }
ret = u2fzero_recv(dev, &req, &resp); ret = u2fzero_recv(dev, &req, &resp);
if (ret < 0)
/* ignore errors or packets without data */
if (ret < offsetof(struct u2f_hid_msg, init.data))
return 0; return 0;
/* only take the minimum amount of data it is safe to take */ /* only take the minimum amount of data it is safe to take */

View File

@ -503,7 +503,7 @@ static void hid_ctrl(struct urb *urb)
if (unplug) { if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead; usbhid->ctrltail = usbhid->ctrlhead;
} else { } else if (usbhid->ctrlhead != usbhid->ctrltail) {
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
if (usbhid->ctrlhead != usbhid->ctrltail && if (usbhid->ctrlhead != usbhid->ctrltail &&
@ -1221,9 +1221,20 @@ static void usbhid_stop(struct hid_device *hid)
mutex_lock(&usbhid->mutex); mutex_lock(&usbhid->mutex);
clear_bit(HID_STARTED, &usbhid->iofl); clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl); set_bit(HID_DISCONNECTED, &usbhid->iofl);
while (usbhid->ctrltail != usbhid->ctrlhead) {
if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) {
kfree(usbhid->ctrl[usbhid->ctrltail].raw_report);
usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
}
usbhid->ctrltail = (usbhid->ctrltail + 1) &
(HID_CONTROL_FIFO_SIZE - 1);
}
spin_unlock_irq(&usbhid->lock); spin_unlock_irq(&usbhid->lock);
usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbin);
usb_kill_urb(usbhid->urbout); usb_kill_urb(usbhid->urbout);
usb_kill_urb(usbhid->urbctrl); usb_kill_urb(usbhid->urbctrl);

View File

@ -291,8 +291,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
{ {
struct mlxreg_fan *fan = cdev->devdata; struct mlxreg_fan *fan = cdev->devdata;
unsigned long cur_state; unsigned long cur_state;
int i, config = 0;
u32 regval; u32 regval;
int i;
int err; int err;
/* /*
@ -305,6 +305,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
* overwritten. * overwritten.
*/ */
if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) { if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
/*
* This is configuration change, which is only supported through sysfs.
* For configuration non-zero value is to be returned to avoid thermal
* statistics update.
*/
config = 1;
state -= MLXREG_FAN_MAX_STATE; state -= MLXREG_FAN_MAX_STATE;
for (i = 0; i < state; i++) for (i = 0; i < state; i++)
fan->cooling_levels[i] = state; fan->cooling_levels[i] = state;
@ -319,7 +325,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval); cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
if (state < cur_state) if (state < cur_state)
return 0; return config;
state = cur_state; state = cur_state;
} }
@ -335,7 +341,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
dev_err(fan->dev, "Failed to write PWM duty\n"); dev_err(fan->dev, "Failed to write PWM duty\n");
return err; return err;
} }
return 0; return config;
} }
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = { static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {

View File

@ -54,7 +54,7 @@
#define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \ #define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
PMBUS_PHASE_VIRTUAL) PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
struct mp2975_data { struct mp2975_data {
struct pmbus_driver_info info; struct pmbus_driver_info info;

View File

@ -100,71 +100,81 @@ struct tmp421_data {
s16 temp[4]; s16 temp[4];
}; };
static int temp_from_s16(s16 reg) static int temp_from_raw(u16 reg, bool extended)
{ {
/* Mask out status bits */ /* Mask out status bits */
int temp = reg & ~0xf; int temp = reg & ~0xf;
return (temp * 1000 + 128) / 256; if (extended)
temp = temp - 64 * 256;
else
temp = (s16)temp;
return DIV_ROUND_CLOSEST(temp * 1000, 256);
} }
static int temp_from_u16(u16 reg) static int tmp421_update_device(struct tmp421_data *data)
{ {
/* Mask out status bits */
int temp = reg & ~0xf;
/* Add offset for extended temperature range. */
temp -= 64 * 256;
return (temp * 1000 + 128) / 256;
}
static struct tmp421_data *tmp421_update_device(struct device *dev)
{
struct tmp421_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client; struct i2c_client *client = data->client;
int ret = 0;
int i; int i;
mutex_lock(&data->update_lock); mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + (HZ / 2)) || if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
!data->valid) { !data->valid) {
data->config = i2c_smbus_read_byte_data(client, ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
TMP421_CONFIG_REG_1); if (ret < 0)
goto exit;
data->config = ret;
for (i = 0; i < data->channels; i++) { for (i = 0; i < data->channels; i++) {
data->temp[i] = i2c_smbus_read_byte_data(client, ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
TMP421_TEMP_MSB[i]) << 8; if (ret < 0)
data->temp[i] |= i2c_smbus_read_byte_data(client, goto exit;
TMP421_TEMP_LSB[i]); data->temp[i] = ret << 8;
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
if (ret < 0)
goto exit;
data->temp[i] |= ret;
} }
data->last_updated = jiffies; data->last_updated = jiffies;
data->valid = 1; data->valid = 1;
} }
exit:
mutex_unlock(&data->update_lock); mutex_unlock(&data->update_lock);
return data; if (ret < 0) {
data->valid = 0;
return ret;
}
return 0;
} }
static int tmp421_read(struct device *dev, enum hwmon_sensor_types type, static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val) u32 attr, int channel, long *val)
{ {
struct tmp421_data *tmp421 = tmp421_update_device(dev); struct tmp421_data *tmp421 = dev_get_drvdata(dev);
int ret = 0;
ret = tmp421_update_device(tmp421);
if (ret)
return ret;
switch (attr) { switch (attr) {
case hwmon_temp_input: case hwmon_temp_input:
if (tmp421->config & TMP421_CONFIG_RANGE) *val = temp_from_raw(tmp421->temp[channel],
*val = temp_from_u16(tmp421->temp[channel]); tmp421->config & TMP421_CONFIG_RANGE);
else
*val = temp_from_s16(tmp421->temp[channel]);
return 0; return 0;
case hwmon_temp_fault: case hwmon_temp_fault:
/* /*
* The OPEN bit signals a fault. This is bit 0 of the temperature * Any of OPEN or /PVLD bits indicate a hardware mulfunction
* register (low byte). * and the conversion result may be incorrect
*/ */
*val = tmp421->temp[channel] & 0x01; *val = !!(tmp421->temp[channel] & 0x03);
return 0; return 0;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
{ {
switch (attr) { switch (attr) {
case hwmon_temp_fault: case hwmon_temp_fault:
if (channel == 0)
return 0;
return 0444;
case hwmon_temp_input: case hwmon_temp_input:
return 0444; return 0444;
default: default:

View File

@ -273,9 +273,6 @@ struct w83791d_data {
char valid; /* !=0 if following fields are valid */ char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */ unsigned long last_updated; /* In jiffies */
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
/* volts */ /* volts */
u8 in[NUMBER_OF_VIN]; /* Register value */ u8 in[NUMBER_OF_VIN]; /* Register value */
u8 in_max[NUMBER_OF_VIN]; /* Register value */ u8 in_max[NUMBER_OF_VIN]; /* Register value */
@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
static int w83791d_detect_subclients(struct i2c_client *client) static int w83791d_detect_subclients(struct i2c_client *client)
{ {
struct i2c_adapter *adapter = client->adapter; struct i2c_adapter *adapter = client->adapter;
struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr; int address = client->addr;
int i, id; int i, id;
u8 val; u8 val;
@ -1280,22 +1276,19 @@ static int w83791d_detect_subclients(struct i2c_client *client)
} }
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR); val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
if (!(val & 0x08))
data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter, if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
0x48 + (val & 0x7)); dev_err(&client->dev,
if (!(val & 0x80)) { "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
if (!IS_ERR(data->lm75[0]) && return -ENODEV;
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclient\n",
data->lm75[0]->addr);
return -ENODEV;
}
data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
0x48 + ((val >> 4) & 0x7));
} }
if (!(val & 0x08))
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
if (!(val & 0x80))
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
return 0; return 0;
} }

View File

@ -264,9 +264,6 @@ struct w83792d_data {
char valid; /* !=0 if following fields are valid */ char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */ unsigned long last_updated; /* In jiffies */
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
u8 in[9]; /* Register value */ u8 in[9]; /* Register value */
u8 in_max[9]; /* Register value */ u8 in_max[9]; /* Register value */
u8 in_min[9]; /* Register value */ u8 in_min[9]; /* Register value */
@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
int address = new_client->addr; int address = new_client->addr;
u8 val; u8 val;
struct i2c_adapter *adapter = new_client->adapter; struct i2c_adapter *adapter = new_client->adapter;
struct w83792d_data *data = i2c_get_clientdata(new_client);
id = i2c_adapter_id(adapter); id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) { if (force_subclients[0] == id && force_subclients[1] == address) {
@ -946,21 +942,19 @@ w83792d_detect_subclients(struct i2c_client *new_client)
} }
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR); val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
if (!(val & 0x08))
data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter, if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
0x48 + (val & 0x7)); dev_err(&new_client->dev,
if (!(val & 0x80)) { "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
if (!IS_ERR(data->lm75[0]) && return -ENODEV;
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&new_client->dev,
"duplicate addresses 0x%x, use force_subclient\n",
data->lm75[0]->addr);
return -ENODEV;
}
data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
0x48 + ((val >> 4) & 0x7));
} }
if (!(val & 0x08))
devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
if (!(val & 0x80))
devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
return 0; return 0;
} }

View File

@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
} }
struct w83793_data { struct w83793_data {
struct i2c_client *lm75[2];
struct device *hwmon_dev; struct device *hwmon_dev;
struct mutex update_lock; struct mutex update_lock;
char valid; /* !=0 if following fields are valid */ char valid; /* !=0 if following fields are valid */
@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
int address = client->addr; int address = client->addr;
u8 tmp; u8 tmp;
struct i2c_adapter *adapter = client->adapter; struct i2c_adapter *adapter = client->adapter;
struct w83793_data *data = i2c_get_clientdata(client);
id = i2c_adapter_id(adapter); id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) { if (force_subclients[0] == id && force_subclients[1] == address) {
@ -1586,21 +1584,19 @@ w83793_detect_subclients(struct i2c_client *client)
} }
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR); tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
if (!(tmp & 0x08))
data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter, if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
0x48 + (tmp & 0x7)); dev_err(&client->dev,
if (!(tmp & 0x80)) { "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
if (!IS_ERR(data->lm75[0]) return -ENODEV;
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclients\n", data->lm75[0]->addr);
return -ENODEV;
}
data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
0x48 + ((tmp >> 4) & 0x7));
} }
if (!(tmp & 0x08))
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
if (!(tmp & 0x80))
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
return 0; return 0;
} }

View File

@ -1750,15 +1750,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
} }
} }
static void cma_cancel_listens(struct rdma_id_private *id_priv) static void _cma_cancel_listens(struct rdma_id_private *id_priv)
{ {
struct rdma_id_private *dev_id_priv; struct rdma_id_private *dev_id_priv;
lockdep_assert_held(&lock);
/* /*
* Remove from listen_any_list to prevent added devices from spawning * Remove from listen_any_list to prevent added devices from spawning
* additional listen requests. * additional listen requests.
*/ */
mutex_lock(&lock);
list_del(&id_priv->list); list_del(&id_priv->list);
while (!list_empty(&id_priv->listen_list)) { while (!list_empty(&id_priv->listen_list)) {
@ -1772,6 +1773,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
rdma_destroy_id(&dev_id_priv->id); rdma_destroy_id(&dev_id_priv->id);
mutex_lock(&lock); mutex_lock(&lock);
} }
}
static void cma_cancel_listens(struct rdma_id_private *id_priv)
{
mutex_lock(&lock);
_cma_cancel_listens(id_priv);
mutex_unlock(&lock); mutex_unlock(&lock);
} }
@ -1814,6 +1821,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
static void destroy_mc(struct rdma_id_private *id_priv, static void destroy_mc(struct rdma_id_private *id_priv,
struct cma_multicast *mc) struct cma_multicast *mc)
{ {
bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
ib_sa_free_multicast(mc->sa_mc); ib_sa_free_multicast(mc->sa_mc);
@ -1830,7 +1839,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
&mgid); &mgid);
cma_igmp_send(ndev, &mgid, false);
if (!send_only)
cma_igmp_send(ndev, &mgid, false);
dev_put(ndev); dev_put(ndev);
} }
@ -2577,7 +2589,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
return 0; return 0;
err_listen: err_listen:
list_del(&id_priv->list); _cma_cancel_listens(id_priv);
mutex_unlock(&lock); mutex_unlock(&lock);
if (to_destroy) if (to_destroy)
rdma_destroy_id(&to_destroy->id); rdma_destroy_id(&to_destroy->id);
@ -3732,9 +3744,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
int ret; int ret;
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
struct sockaddr_in any_in = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_ANY),
};
/* For a well behaved ULP state will be RDMA_CM_IDLE */ /* For a well behaved ULP state will be RDMA_CM_IDLE */
id->route.addr.src_addr.ss_family = AF_INET; ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
ret = rdma_bind_addr(id, cma_src_addr(id_priv));
if (ret) if (ret)
return ret; return ret;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,

View File

@ -240,7 +240,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt; end = start + buf_cnt;
if (end > buf->npages) { if (end > buf->npages) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"Failed to check kmem bufs, end %d + %d total %d!\n", "failed to check kmem bufs, end %d + %d total %u!\n",
start, buf_cnt, buf->npages); start, buf_cnt, buf->npages);
return -EINVAL; return -EINVAL;
} }
@ -262,7 +262,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
u64 addr; u64 addr;
if (page_shift < HNS_HW_PAGE_SHIFT) { if (page_shift < HNS_HW_PAGE_SHIFT) {
dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n", dev_err(hr_dev->dev, "failed to check umem page shift %u!\n",
page_shift); page_shift);
return -EINVAL; return -EINVAL;
} }

View File

@ -50,29 +50,29 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
&dma_handle); &dma_handle);
if (ret < 1) { if (!ret) {
ibdev_err(ibdev, "Failed to find CQ mtr\n"); ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
return -EINVAL; return -EINVAL;
} }
cq_table = &hr_dev->cq_table; cq_table = &hr_dev->cq_table;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret); ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret);
return ret; return ret;
} }
/* Get CQC memory HEM(Hardware Entry Memory) table */ /* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n", ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
hr_cq->cqn, ret); hr_cq->cqn, ret);
goto err_out; goto err_out;
} }
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to xa_store CQ\n"); ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
goto err_put; goto err_put;
} }
@ -91,7 +91,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"Failed to send create cmd for CQ(0x%lx), err %d\n", "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
hr_cq->cqn, ret); hr_cq->cqn, ret);
goto err_xa; goto err_xa;
} }
@ -147,7 +147,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
{ {
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {}; struct hns_roce_buf_attr buf_attr = {};
int err; int ret;
buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT; buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
@ -155,13 +155,13 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
buf_attr.region_count = 1; buf_attr.region_count = 1;
buf_attr.fixed_page = true; buf_attr.fixed_page = true;
err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr, ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT, hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr); udata, addr);
if (err) if (ret)
ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err); ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
return err; return ret;
} }
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
@ -252,13 +252,13 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
int ret; int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n", ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
cq_entries, hr_dev->caps.max_cqes); cq_entries, hr_dev->caps.max_cqes);
return -EINVAL; return -EINVAL;
} }
if (vector >= hr_dev->caps.num_comp_vectors) { if (vector >= hr_dev->caps.num_comp_vectors) {
ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n", ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n",
vector, hr_dev->caps.num_comp_vectors); vector, hr_dev->caps.num_comp_vectors);
return -EINVAL; return -EINVAL;
} }
@ -276,7 +276,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
ret = ib_copy_from_udata(&ucmd, udata, ret = ib_copy_from_udata(&ucmd, udata,
min(udata->inlen, sizeof(ucmd))); min(udata->inlen, sizeof(ucmd)));
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n", ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n",
ret); ret);
return ret; return ret;
} }
@ -286,19 +286,20 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr); ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret); ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
return ret; return ret;
} }
ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp); ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret); ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
goto err_cq_buf; goto err_cq_buf;
} }
ret = alloc_cqc(hr_dev, hr_cq); ret = alloc_cqc(hr_dev, hr_cq);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret); ibdev_err(ibdev,
"failed to alloc CQ context, ret = %d.\n", ret);
goto err_cq_db; goto err_cq_db;
} }

View File

@ -184,7 +184,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
mhop->hop_num = hr_dev->caps.srqc_hop_num; mhop->hop_num = hr_dev->caps.srqc_hop_num;
break; break;
default: default:
dev_err(dev, "Table %d not support multi-hop addressing!\n", dev_err(dev, "table %u not support multi-hop addressing!\n",
type); type);
return -EINVAL; return -EINVAL;
} }
@ -232,8 +232,8 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
mhop->l0_idx = table_idx; mhop->l0_idx = table_idx;
break; break;
default: default:
dev_err(dev, "Table %d not support hop_num = %d!\n", dev_err(dev, "table %u not support hop_num = %u!\n",
table->type, mhop->hop_num); table->type, mhop->hop_num);
return -EINVAL; return -EINVAL;
} }
if (mhop->l0_idx >= mhop->ba_l0_num) if (mhop->l0_idx >= mhop->ba_l0_num)
@ -438,13 +438,13 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev,
index->buf = l0_idx; index->buf = l0_idx;
break; break;
default: default:
ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n", ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
table->type, mhop->hop_num); table->type, mhop->hop_num);
return -EINVAL; return -EINVAL;
} }
if (unlikely(index->buf >= table->num_hem)) { if (unlikely(index->buf >= table->num_hem)) {
ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n", ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
table->type, index->buf, table->num_hem); table->type, index->buf, table->num_hem);
return -EINVAL; return -EINVAL;
} }
@ -714,15 +714,15 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
step_idx = hop_num; step_idx = hop_num;
if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx)) if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
ibdev_warn(ibdev, "Clear hop%d HEM failed.\n", hop_num); ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
if (index->inited & HEM_INDEX_L1) if (index->inited & HEM_INDEX_L1)
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
ibdev_warn(ibdev, "Clear HEM step 1 failed.\n"); ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
if (index->inited & HEM_INDEX_L0) if (index->inited & HEM_INDEX_L0)
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
ibdev_warn(ibdev, "Clear HEM step 0 failed.\n"); ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
} }
} }
@ -1234,7 +1234,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
} }
if (offset < r->offset) { if (offset < r->offset) {
dev_err(hr_dev->dev, "invalid offset %d,min %d!\n", dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
offset, r->offset); offset, r->offset);
return -EINVAL; return -EINVAL;
} }

View File

@ -361,7 +361,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
} else if (unlikely(hr_qp->state == IB_QPS_RESET || } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT || hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) { hr_qp->state == IB_QPS_RTR)) {
ibdev_err(ibdev, "failed to post WQE, QP state %d!\n", ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
hr_qp->state); hr_qp->state);
return -EINVAL; return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
@ -665,7 +665,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
if (unlikely(wr->num_sge > qp->sq.max_gs)) { if (unlikely(wr->num_sge > qp->sq.max_gs)) {
ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n", ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
wr->num_sge, qp->sq.max_gs); wr->num_sge, qp->sq.max_gs);
ret = -EINVAL; ret = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
@ -750,7 +750,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
wr->num_sge, hr_qp->rq.max_gs); wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL; ret = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
@ -1920,8 +1920,8 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
obj_per_chunk = ctx_bt_num * obj_per_chunk_default; obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
break; break;
default: default:
pr_err("Table %d not support hop_num = %d!\n", hem_type, pr_err("table %u not support hop_num = %u!\n", hem_type,
hop_num); hop_num);
return; return;
} }
@ -3562,7 +3562,7 @@ static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
break; break;
default: default:
dev_warn(hr_dev->dev, dev_warn(hr_dev->dev,
"Table %d not to be written by mailbox!\n", type); "table %u not to be written by mailbox!\n", type);
return -EINVAL; return -EINVAL;
} }
@ -3681,7 +3681,7 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
break; break;
default: default:
dev_warn(dev, "Table %d not to be destroyed by mailbox!\n", dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
table->type); table->type);
return 0; return 0;
} }
@ -4318,7 +4318,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask); ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret); ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
return ret; return ret;
} }
@ -4804,7 +4804,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* SW pass context to HW */ /* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp); ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret); ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
goto out; goto out;
} }
@ -4897,7 +4897,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret); ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -5018,7 +5018,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state, IB_QPS_RESET); hr_qp->state, IB_QPS_RESET);
if (ret) if (ret)
ibdev_err(ibdev, ibdev_err(ibdev,
"failed to modify QP to RST, ret = %d\n", "failed to modify QP to RST, ret = %d.\n",
ret); ret);
} }
@ -5057,7 +5057,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret) if (ret)
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"failed to destroy QP 0x%06lx, ret = %d\n", "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
hr_qp->qpn, ret); hr_qp->qpn, ret);
hns_roce_qp_destroy(hr_dev, hr_qp, udata); hns_roce_qp_destroy(hr_dev, hr_qp, udata);
@ -5080,7 +5080,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret); ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
goto out; goto out;
} }
@ -5090,7 +5090,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
clr->qpn = cpu_to_le32(hr_qp->qpn); clr->qpn = cpu_to_le32(hr_qp->qpn);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret); ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
goto out; goto out;
} }
@ -5339,7 +5339,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) if (ret)
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"failed to process cmd when modifying CQ, ret = %d\n", "failed to process cmd when modifying CQ, ret = %d.\n",
ret); ret);
return ret; return ret;

View File

@ -185,14 +185,14 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
else else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) { if (ret) {
dev_err(dev, "Write mtpt fail!\n"); dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
goto err_page; goto err_page;
} }
ret = hns_roce_hw_create_mpt(hr_dev, mailbox, ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
mtpt_idx & (hr_dev->caps.num_mtpts - 1)); mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) { if (ret) {
dev_err(dev, "CREATE_MPT failed (%d)\n", ret); dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
goto err_page; goto err_page;
} }
@ -495,7 +495,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
if (ret < 1) { if (ret < 1) {
ibdev_err(ibdev, "failed to store sg pages %d %d, cnt = %d.\n", ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
goto err_page_list; goto err_page_list;
} }
@ -862,7 +862,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
if (r->offset + r->count > page_cnt) { if (r->offset + r->count > page_cnt) {
err = -EINVAL; err = -EINVAL;
ibdev_err(ibdev, ibdev_err(ibdev,
"Failed to check mtr%d end %d + %d, max %d\n", "failed to check mtr%u end %u + %u, max %u.\n",
i, r->offset, r->count, page_cnt); i, r->offset, r->count, page_cnt);
return err; return err;
} }
@ -870,7 +870,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r); err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
if (err) { if (err) {
ibdev_err(ibdev, ibdev_err(ibdev,
"Failed to map mtr%d offset %d, err %d\n", "failed to map mtr%u offset %u, ret = %d.\n",
i, r->offset, err); i, r->offset, err);
return err; return err;
} }

View File

@ -65,7 +65,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) { if (ret) {
ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret); ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret);
return ret; return ret;
} }

View File

@ -452,12 +452,12 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if (ucmd->log_sq_stride > max_sq_stride || if (ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n"); ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
return -EINVAL; return -EINVAL;
} }
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n", ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
cap->max_send_sge); cap->max_send_sge);
return -EINVAL; return -EINVAL;
} }
@ -563,7 +563,7 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
if (cnt > hr_dev->caps.max_wqes) { if (cnt > hr_dev->caps.max_wqes) {
ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n", ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
cnt); cnt);
return -EINVAL; return -EINVAL;
} }
@ -736,7 +736,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
&hr_qp->sdb); &hr_qp->sdb);
if (ret) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"Failed to map user SQ doorbell\n"); "failed to map user SQ doorbell, ret = %d.\n",
ret);
goto err_out; goto err_out;
} }
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
@ -747,7 +748,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
&hr_qp->rdb); &hr_qp->rdb);
if (ret) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"Failed to map user RQ doorbell\n"); "failed to map user RQ doorbell, ret = %d.\n",
ret);
goto err_sdb; goto err_sdb;
} }
hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
@ -763,7 +765,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"Failed to alloc kernel RQ doorbell\n"); "failed to alloc kernel RQ doorbell, ret = %d.\n",
ret);
goto err_out; goto err_out;
} }
*hr_qp->rdb.db_record = 0; *hr_qp->rdb.db_record = 0;
@ -806,14 +809,14 @@ static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(sq_wrid)) { if (ZERO_OR_NULL_PTR(sq_wrid)) {
ibdev_err(ibdev, "Failed to alloc SQ wrid\n"); ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
return -ENOMEM; return -ENOMEM;
} }
if (hr_qp->rq.wqe_cnt) { if (hr_qp->rq.wqe_cnt) {
rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(rq_wrid)) { if (ZERO_OR_NULL_PTR(rq_wrid)) {
ibdev_err(ibdev, "Failed to alloc RQ wrid\n"); ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_sq; goto err_sq;
} }
@ -873,7 +876,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
if (ret) if (ret)
ibdev_err(ibdev, "Failed to set user SQ size\n"); ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
} else { } else {
if (init_attr->create_flags & if (init_attr->create_flags &
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
@ -888,7 +893,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret) if (ret)
ibdev_err(ibdev, "Failed to set kernel SQ size\n"); ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
} }
return ret; return ret;
@ -914,45 +921,48 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to set QP param\n"); ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
return ret; return ret;
} }
if (!udata) { if (!udata) {
ret = alloc_kernel_wrid(hr_dev, hr_qp); ret = alloc_kernel_wrid(hr_dev, hr_qp);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc wrid\n"); ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
ret);
return ret; return ret;
} }
} }
ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc QP doorbell\n"); ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
ret);
goto err_wrid; goto err_wrid;
} }
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc QP buffer\n"); ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
goto err_db; goto err_db;
} }
ret = alloc_qpn(hr_dev, hr_qp); ret = alloc_qpn(hr_dev, hr_qp);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc QPN\n"); ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
goto err_buf; goto err_buf;
} }
ret = alloc_qpc(hr_dev, hr_qp); ret = alloc_qpc(hr_dev, hr_qp);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc QP context\n"); ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
ret);
goto err_qpn; goto err_qpn;
} }
ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to store QP\n"); ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
goto err_qpc; goto err_qpc;
} }
@ -1098,9 +1108,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if ((attr_mask & IB_QP_PORT) && if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
"attr port_num invalid.attr->port_num=%d\n", attr->port_num);
attr->port_num);
return -EINVAL; return -EINVAL;
} }
@ -1108,8 +1117,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"attr pkey_index invalid.attr->pkey_index=%d\n", "invalid attr, pkey_index = %u.\n",
attr->pkey_index); attr->pkey_index);
return -EINVAL; return -EINVAL;
} }
} }
@ -1117,16 +1126,16 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", "invalid attr, max_rd_atomic = %u.\n",
attr->max_rd_atomic); attr->max_rd_atomic);
return -EINVAL; return -EINVAL;
} }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", "invalid attr, max_dest_rd_atomic = %u.\n",
attr->max_dest_rd_atomic); attr->max_dest_rd_atomic);
return -EINVAL; return -EINVAL;
} }

View File

@ -93,7 +93,8 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
ARRAY_SIZE(mtts_wqe), &dma_handle_wqe); ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
if (ret < 1) { if (ret < 1) {
ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n"); ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
ret);
return -ENOBUFS; return -ENOBUFS;
} }
@ -101,32 +102,34 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx, ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
ARRAY_SIZE(mtts_idx), &dma_handle_idx); ARRAY_SIZE(mtts_idx), &dma_handle_idx);
if (ret < 1) { if (ret < 1) {
ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n"); ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
ret);
return -ENOBUFS; return -ENOBUFS;
} }
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn); ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret); ibdev_err(ibdev,
"failed to alloc SRQ number, ret = %d.\n", ret);
return -ENOMEM; return -ENOMEM;
} }
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret); ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
goto err_out; goto err_out;
} }
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret); ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put; goto err_put;
} }
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR_OR_NULL(mailbox)) { if (IS_ERR_OR_NULL(mailbox)) {
ret = -ENOMEM; ret = -ENOMEM;
ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n"); ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
goto err_xa; goto err_xa;
} }
@ -137,7 +140,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn); ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret); ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
goto err_xa; goto err_xa;
} }
@ -198,7 +201,8 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
hr_dev->caps.srqwqe_ba_pg_sz + hr_dev->caps.srqwqe_ba_pg_sz +
HNS_HW_PAGE_SHIFT, udata, addr); HNS_HW_PAGE_SHIFT, udata, addr);
if (err) if (err)
ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err); ibdev_err(ibdev,
"failed to alloc SRQ buf mtr, ret = %d.\n", err);
return err; return err;
} }
@ -229,14 +233,15 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT, hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr); udata, addr);
if (err) { if (err) {
ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err); ibdev_err(ibdev,
"failed to alloc SRQ idx mtr, ret = %d.\n", err);
return err; return err;
} }
if (!udata) { if (!udata) {
idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL); idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap) { if (!idx_que->bitmap) {
ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n"); ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
err = -ENOMEM; err = -ENOMEM;
goto err_idx_mtr; goto err_idx_mtr;
} }
@ -303,7 +308,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
ret = ib_copy_from_udata(&ucmd, udata, ret = ib_copy_from_udata(&ucmd, udata,
min(udata->inlen, sizeof(ucmd))); min(udata->inlen, sizeof(ucmd)));
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n", ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
ret); ret);
return ret; return ret;
} }
@ -311,20 +316,21 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr); ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret); ibdev_err(ibdev,
"failed to alloc SRQ buffer, ret = %d.\n", ret);
return ret; return ret;
} }
ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr); ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret); ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
goto err_buf_alloc; goto err_buf_alloc;
} }
if (!udata) { if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq); ret = alloc_srq_wrid(hr_dev, srq);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n", ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
ret); ret);
goto err_idx_alloc; goto err_idx_alloc;
} }
@ -336,7 +342,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0); ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
if (ret) { if (ret) {
ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret); ibdev_err(ibdev,
"failed to alloc SRQ context, ret = %d.\n", ret);
goto err_wrid_alloc; goto err_wrid_alloc;
} }

View File

@ -35,6 +35,7 @@ struct ipoctal_channel {
unsigned int pointer_read; unsigned int pointer_read;
unsigned int pointer_write; unsigned int pointer_write;
struct tty_port tty_port; struct tty_port tty_port;
bool tty_registered;
union scc2698_channel __iomem *regs; union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs; union scc2698_block __iomem *block_regs;
unsigned int board_id; unsigned int board_id;
@ -83,22 +84,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
return 0; return 0;
} }
static int ipoctal_open(struct tty_struct *tty, struct file *file) static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
{ {
struct ipoctal_channel *channel = dev_get_drvdata(tty->dev); struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
int err; int res;
tty->driver_data = channel;
if (!ipack_get_carrier(ipoctal->dev)) if (!ipack_get_carrier(ipoctal->dev))
return -EBUSY; return -EBUSY;
err = tty_port_open(&channel->tty_port, tty, file); res = tty_standard_install(driver, tty);
if (err) if (res)
ipack_put_carrier(ipoctal->dev); goto err_put_carrier;
return err; tty->driver_data = channel;
return 0;
err_put_carrier:
ipack_put_carrier(ipoctal->dev);
return res;
}
static int ipoctal_open(struct tty_struct *tty, struct file *file)
{
struct ipoctal_channel *channel = tty->driver_data;
return tty_port_open(&channel->tty_port, tty, file);
} }
static void ipoctal_reset_stats(struct ipoctal_stats *stats) static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@ -266,7 +279,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
int res; int res;
int i; int i;
struct tty_driver *tty; struct tty_driver *tty;
char name[20];
struct ipoctal_channel *channel; struct ipoctal_channel *channel;
struct ipack_region *region; struct ipack_region *region;
void __iomem *addr; void __iomem *addr;
@ -357,8 +369,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
/* Fill struct tty_driver with ipoctal data */ /* Fill struct tty_driver with ipoctal data */
tty->owner = THIS_MODULE; tty->owner = THIS_MODULE;
tty->driver_name = KBUILD_MODNAME; tty->driver_name = KBUILD_MODNAME;
sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
tty->name = name; if (!tty->name) {
res = -ENOMEM;
goto err_put_driver;
}
tty->major = 0; tty->major = 0;
tty->minor_start = 0; tty->minor_start = 0;
@ -374,8 +389,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
res = tty_register_driver(tty); res = tty_register_driver(tty);
if (res) { if (res) {
dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n"); dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
put_tty_driver(tty); goto err_free_name;
return res;
} }
/* Save struct tty_driver for use it when uninstalling the device */ /* Save struct tty_driver for use it when uninstalling the device */
@ -386,7 +400,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
channel = &ipoctal->channel[i]; channel = &ipoctal->channel[i];
tty_port_init(&channel->tty_port); tty_port_init(&channel->tty_port);
tty_port_alloc_xmit_buf(&channel->tty_port); res = tty_port_alloc_xmit_buf(&channel->tty_port);
if (res)
continue;
channel->tty_port.ops = &ipoctal_tty_port_ops; channel->tty_port.ops = &ipoctal_tty_port_ops;
ipoctal_reset_stats(&channel->stats); ipoctal_reset_stats(&channel->stats);
@ -394,13 +410,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
spin_lock_init(&channel->lock); spin_lock_init(&channel->lock);
channel->pointer_read = 0; channel->pointer_read = 0;
channel->pointer_write = 0; channel->pointer_write = 0;
tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL); tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
i, NULL, channel, NULL);
if (IS_ERR(tty_dev)) { if (IS_ERR(tty_dev)) {
dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n"); dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
tty_port_free_xmit_buf(&channel->tty_port);
tty_port_destroy(&channel->tty_port); tty_port_destroy(&channel->tty_port);
continue; continue;
} }
dev_set_drvdata(tty_dev, channel); channel->tty_registered = true;
} }
/* /*
@ -412,6 +430,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_irq_handler, ipoctal); ipoctal_irq_handler, ipoctal);
return 0; return 0;
err_free_name:
kfree(tty->name);
err_put_driver:
put_tty_driver(tty);
return res;
} }
static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel, static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
@ -652,6 +677,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
static const struct tty_operations ipoctal_fops = { static const struct tty_operations ipoctal_fops = {
.ioctl = NULL, .ioctl = NULL,
.install = ipoctal_install,
.open = ipoctal_open, .open = ipoctal_open,
.close = ipoctal_close, .close = ipoctal_close,
.write = ipoctal_write_tty, .write = ipoctal_write_tty,
@ -694,12 +720,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
for (i = 0; i < NR_CHANNELS; i++) { for (i = 0; i < NR_CHANNELS; i++) {
struct ipoctal_channel *channel = &ipoctal->channel[i]; struct ipoctal_channel *channel = &ipoctal->channel[i];
if (!channel->tty_registered)
continue;
tty_unregister_device(ipoctal->tty_drv, i); tty_unregister_device(ipoctal->tty_drv, i);
tty_port_free_xmit_buf(&channel->tty_port); tty_port_free_xmit_buf(&channel->tty_port);
tty_port_destroy(&channel->tty_port); tty_port_destroy(&channel->tty_port);
} }
tty_unregister_driver(ipoctal->tty_drv); tty_unregister_driver(ipoctal->tty_drv);
kfree(ipoctal->tty_drv->name);
put_tty_driver(ipoctal->tty_drv); put_tty_driver(ipoctal->tty_drv);
kfree(ipoctal); kfree(ipoctal);
} }

View File

@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
// End transmit and repeat reset command so we exit sump mode // End transmit and repeat reset command so we exit sump mode
static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 }; static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
static const u8 COMMAND_SMODE_ENTER[] = { 's' }; static const u8 COMMAND_SMODE_ENTER[] = { 's' };
static const u8 COMMAND_SMODE_EXIT[] = { 0 };
static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 }; static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
#define REPLY_XMITCOUNT 't' #define REPLY_XMITCOUNT 't'
@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
buf[i] = cpu_to_be16(v); buf[i] = cpu_to_be16(v);
} }
buf[count] = cpu_to_be16(0xffff); buf[count] = 0xffff;
irtoy->tx_buf = buf; irtoy->tx_buf = buf;
irtoy->tx_len = size; irtoy->tx_len = size;
irtoy->emitted = 0; irtoy->emitted = 0;
// There is an issue where if the unit is receiving IR while the
// first TXSTART command is sent, the device might end up hanging
// with its led on. It does not respond to any command when this
// happens. To work around this, re-enter sample mode.
err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
return err;
}
err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
if (err) {
dev_err(irtoy->dev, "enter sample mode: %d\n", err);
return err;
}
err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART), err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
STATE_TX); STATE_TX);
kfree(buf); kfree(buf);

View File

@ -2613,8 +2613,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err) if (err)
return err; return err;
/* Port Control 2: don't force a good FCS, set the maximum frame size to /* Port Control 2: don't force a good FCS, set the MTU size to
* 10240 bytes, disable 802.1q tags checking, don't discard tagged or * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all * untagged frames on this port, do a destination address lookup on all
* received packets as usual, disable ARP mirroring and don't send a * received packets as usual, disable ARP mirroring and don't send a
* copy of all transmitted/received frames on this port to the CPU. * copy of all transmitted/received frames on this port to the CPU.
@ -2633,7 +2633,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err; return err;
if (chip->info->ops->port_set_jumbo_size) { if (chip->info->ops->port_set_jumbo_size) {
err = chip->info->ops->port_set_jumbo_size(chip, port, 10240); err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
if (err) if (err)
return err; return err;
} }
@ -2718,10 +2718,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_chip *chip = ds->priv;
if (chip->info->ops->port_set_jumbo_size) if (chip->info->ops->port_set_jumbo_size)
return 10240; return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size) else if (chip->info->ops->set_max_frame_size)
return 1632; return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
return 1522; return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
} }
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@ -2729,6 +2729,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0; int ret = 0;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
new_mtu += EDSA_HLEN;
mv88e6xxx_reg_lock(chip); mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size) if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu); ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
@ -3455,7 +3458,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type, .port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit, .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@ -3480,6 +3482,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.avb_ops = &mv88e6165_avb_ops, .avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops, .ptp_ops = &mv88e6165_ptp_ops,
.phylink_validate = mv88e6185_phylink_validate, .phylink_validate = mv88e6185_phylink_validate,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
}; };
static const struct mv88e6xxx_ops mv88e6165_ops = { static const struct mv88e6xxx_ops mv88e6165_ops = {

View File

@ -18,6 +18,7 @@
#include <linux/timecounter.h> #include <linux/timecounter.h>
#include <net/dsa.h> #include <net/dsa.h>
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096 #define MV88E6XXX_N_FID 4096
/* PVT limits for 4-bit port and 5-bit switch */ /* PVT limits for 4-bit port and 5-bit switch */

View File

@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
u16 val; u16 val;
int err; int err;
mtu += ETH_HLEN + ETH_FCS_LEN;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val); err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err) if (err)
return err; return err;

View File

@ -1082,6 +1082,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
u16 reg; u16 reg;
int err; int err;
size += VLAN_ETH_HLEN + ETH_FCS_LEN;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg); err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
if (err) if (err)
return err; return err;

View File

@ -504,8 +504,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
if (phy_interface_mode_is_rgmii(phy_mode)) { if (phy_interface_mode_is_rgmii(phy_mode)) {
val = enetc_port_rd(hw, ENETC_PM0_IF_MODE); val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
val &= ~ENETC_PM0_IFM_EN_AUTO; val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val &= ENETC_PM0_IFM_IFMODE_MASK;
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG; val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
} }

View File

@ -444,6 +444,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
if (hns3_nic_resetting(netdev)) if (hns3_nic_resetting(netdev))
return -EBUSY; return -EBUSY;
if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
netdev_warn(netdev, "net open repeatedly!\n");
return 0;
}
netif_carrier_off(netdev); netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev); ret = hns3_nic_set_real_num_queue(netdev);

View File

@ -300,33 +300,8 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
return ret_val; return ret_val;
} }
/** static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
* hns3_nic_self_test - self test
* @ndev: net device
* @eth_test: test cmd
* @data: test result
*/
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{ {
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
int test_index = 0;
u32 i;
if (hns3_nic_resetting(ndev)) {
netdev_err(ndev, "dev resetting!");
return;
}
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
netif_dbg(h, drv, ndev, "self test start");
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] = st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK; h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@ -343,13 +318,26 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
st_param[HNAE3_LOOP_PHY][1] = st_param[HNAE3_LOOP_PHY][1] =
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
static void hns3_selftest_prepare(struct net_device *ndev,
bool if_running, int (*st_param)[2])
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test start\n");
hns3_set_selftest_param(h, st_param);
if (if_running) if (if_running)
ndev->netdev_ops->ndo_stop(ndev); ndev->netdev_ops->ndo_stop(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q) #if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */ /* Disable the vlan filter for selftest does not support it */
if (h->ae_algo->ops->enable_vlan_filter) if (h->ae_algo->ops->enable_vlan_filter &&
ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, false); h->ae_algo->ops->enable_vlan_filter(h, false);
#endif #endif
@ -361,6 +349,36 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->halt_autoneg(h, true); h->ae_algo->ops->halt_autoneg(h, true);
set_bit(HNS3_NIC_STATE_TESTING, &priv->state); set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
}
static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
if (h->ae_algo->ops->halt_autoneg)
h->ae_algo->ops->halt_autoneg(h, false);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
if (h->ae_algo->ops->enable_vlan_filter &&
ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, true);
#endif
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
int test_index = 0;
u32 i;
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
@ -379,21 +397,32 @@ static void hns3_self_test(struct net_device *ndev,
test_index++; test_index++;
} }
}
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); /**
* hns3_nic_self_test - self test
* @ndev: net device
* @eth_test: test cmd
* @data: test result
*/
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
if (h->ae_algo->ops->halt_autoneg) if (hns3_nic_resetting(ndev)) {
h->ae_algo->ops->halt_autoneg(h, false); netdev_err(ndev, "dev resetting!");
return;
}
#if IS_ENABLED(CONFIG_VLAN_8021Q) /* Only do offline selftest, or pass by default */
if (h->ae_algo->ops->enable_vlan_filter) if (eth_test->flags != ETH_TEST_FL_OFFLINE)
h->ae_algo->ops->enable_vlan_filter(h, true); return;
#endif
if (if_running) hns3_selftest_prepare(ndev, if_running, st_param);
ndev->netdev_ops->ndo_open(ndev); hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
netif_dbg(h, drv, ndev, "self test end\n");
} }
static int hns3_get_sset_count(struct net_device *netdev, int stringset) static int hns3_get_sset_count(struct net_device *netdev, int stringset)

View File

@ -248,6 +248,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
} }
hclge_tm_schd_info_update(hdev, num_tc); hclge_tm_schd_info_update(hdev, num_tc);
if (num_tc > 1)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
ret = hclge_ieee_ets_to_tm_info(hdev, ets); ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret) if (ret)
@ -313,8 +317,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
u8 i, j, pfc_map, *prio_tc; u8 i, j, pfc_map, *prio_tc;
int ret; int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
return -EINVAL; return -EINVAL;
if (pfc->pfc_en == hdev->tm_info.pfc_en) if (pfc->pfc_en == hdev->tm_info.pfc_en)

View File

@ -7581,15 +7581,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
} }
/* check if we just hit the duplicate */ /* check if we just hit the duplicate */
if (!ret) { if (!ret)
dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n", return -EEXIST;
vport->vport_id, addr);
return 0;
}
dev_err(&hdev->pdev->dev,
"PF failed to add unicast entry(%pM) in the MAC table\n",
addr);
return ret; return ret;
} }
@ -7743,7 +7736,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
} else { } else {
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state); &vport->state);
break;
/* If one unicast mac address is existing in hardware,
* we need to try whether other unicast mac addresses
* are new addresses that can be added.
*/
if (ret != -EEXIST)
break;
} }
} }
} }

View File

@ -646,14 +646,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] = hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i; (i >= hdev->tm_info.num_tc) ? 0 : i;
/* DCB is enabled if we have more than 1 TC or pfc_en is
* non-zero.
*/
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
} }
static void hclge_tm_pg_info_init(struct hclge_dev *hdev) static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@ -682,12 +674,12 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
} }
} }
static void hclge_pfc_info_init(struct hclge_dev *hdev) static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
{ {
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
if (hdev->fc_mode_last_time == HCLGE_FC_PFC) if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"DCB is disable, but last mode is FC_PFC\n"); "Only 1 tc used, but last mode is FC_PFC\n");
hdev->tm_info.fc_mode = hdev->fc_mode_last_time; hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
@ -700,6 +692,27 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
} }
} }
static void hclge_update_fc_mode(struct hclge_dev *hdev)
{
if (!hdev->tm_info.pfc_en) {
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
return;
}
if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
hdev->tm_info.fc_mode = HCLGE_FC_PFC;
}
}
void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
{
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
hclge_update_fc_mode(hdev);
else
hclge_update_fc_mode_by_dcb_flag(hdev);
}
static void hclge_tm_schd_info_init(struct hclge_dev *hdev) static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{ {
hclge_tm_pg_info_init(hdev); hclge_tm_pg_info_init(hdev);
@ -708,7 +721,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev); hclge_tm_vport_info_update(hdev);
hclge_pfc_info_init(hdev); hclge_tm_pfc_info_update(hdev);
} }
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@ -1444,19 +1457,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev); hclge_tm_schd_info_init(hdev);
} }
void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
{
/* DCB is enabled if we have more than 1 TC or pfc_en is
* non-zero.
*/
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
hclge_pfc_info_init(hdev);
}
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{ {
int ret; int ret;
@ -1502,7 +1502,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
return 0; return 0;
return hclge_tm_bp_setup(hdev); return hclge_tm_bp_setup(hdev);

View File

@ -2431,11 +2431,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
sizeof(info->bus_info)); sizeof(info->bus_info));
} }
#define E100_PHY_REGS 0x1C #define E100_PHY_REGS 0x1D
static int e100_get_regs_len(struct net_device *netdev) static int e100_get_regs_len(struct net_device *netdev)
{ {
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
/* We know the number of registers, and the size of the dump buffer.
* Calculate the total size in bytes.
*/
return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
} }
static void e100_get_regs(struct net_device *netdev, static void e100_get_regs(struct net_device *netdev,
@ -2449,14 +2453,18 @@ static void e100_get_regs(struct net_device *netdev,
buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
ioread8(&nic->csr->scb.cmd_lo) << 16 | ioread8(&nic->csr->scb.cmd_lo) << 16 |
ioread16(&nic->csr->scb.status); ioread16(&nic->csr->scb.status);
for (i = E100_PHY_REGS; i >= 0; i--) for (i = 0; i < E100_PHY_REGS; i++)
buff[1 + E100_PHY_REGS - i] = /* Note that we read the registers in reverse order. This
mdio_read(netdev, nic->mii.phy_id, i); * ordering is the ABI apparently used by ethtool and other
* applications.
*/
buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
E100_PHY_REGS - 1 - i);
memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
e100_exec_cb(nic, NULL, e100_dump); e100_exec_cb(nic, NULL, e100_dump);
msleep(10); msleep(10);
memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
sizeof(nic->mem->dump_buf)); sizeof(nic->mem->dump_buf));
} }
static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)

View File

@ -3216,7 +3216,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
max_combined = ixgbe_max_rss_indices(adapter); max_combined = ixgbe_max_rss_indices(adapter);
} }
return max_combined; return min_t(int, max_combined, num_online_cpus());
} }
static void ixgbe_get_channels(struct net_device *dev, static void ixgbe_get_channels(struct net_device *dev,

View File

@ -10123,6 +10123,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog; struct bpf_prog *old_prog;
bool need_reset; bool need_reset;
int num_queues;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL; return -EINVAL;
@ -10172,11 +10173,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* Kick start the NAPI context if there is an AF_XDP socket open /* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start. * on that queue id. This so that receiving will start.
*/ */
if (need_reset && prog) if (need_reset && prog) {
for (i = 0; i < adapter->num_rx_queues; i++) num_queues = min_t(int, adapter->num_rx_queues,
adapter->num_xdp_queues);
for (i = 0; i < num_queues; i++)
if (adapter->xdp_ring[i]->xsk_pool) if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i, (void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX); XDP_WAKEUP_RX);
}
return 0; return 0;
} }

View File

@ -4,8 +4,6 @@
# #
obj-$(CONFIG_KS8842) += ks8842.o obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
ks8851-objs = ks8851_common.o ks8851_spi.o obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
ks8851_mll-objs = ks8851_common.o ks8851_par.o
obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o

View File

@ -1031,6 +1031,7 @@ int ks8851_suspend(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ks8851_suspend);
int ks8851_resume(struct device *dev) int ks8851_resume(struct device *dev)
{ {
@ -1044,6 +1045,7 @@ int ks8851_resume(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ks8851_resume);
#endif #endif
int ks8851_probe_common(struct net_device *netdev, struct device *dev, int ks8851_probe_common(struct net_device *netdev, struct device *dev,
@ -1175,6 +1177,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
err_reg_io: err_reg_io:
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(ks8851_probe_common);
int ks8851_remove_common(struct device *dev) int ks8851_remove_common(struct device *dev)
{ {
@ -1191,3 +1194,8 @@ int ks8851_remove_common(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ks8851_remove_common);
MODULE_DESCRIPTION("KS8851 Network driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");

View File

@ -27,7 +27,12 @@
#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe #define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf #define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a #define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3 #define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6 #define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400 #define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
#define MII_BCM7XXX_SHD_3_AN_STAT 0xb #define MII_BCM7XXX_SHD_3_AN_STAT 0xb
@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev); return genphy_config_aneg(phydev);
} }
static int phy_set_clr_bits(struct phy_device *dev, int location, static int __phy_set_clr_bits(struct phy_device *dev, int location,
int set_mask, int clr_mask) int set_mask, int clr_mask)
{ {
int v, ret; int v, ret;
v = phy_read(dev, location); v = __phy_read(dev, location);
if (v < 0) if (v < 0)
return v; return v;
v &= ~clr_mask; v &= ~clr_mask;
v |= set_mask; v |= set_mask;
ret = phy_write(dev, location, v); ret = __phy_write(dev, location, v);
if (ret < 0) if (ret < 0)
return ret; return ret;
return v; return v;
} }
static int phy_set_clr_bits(struct phy_device *dev, int location,
int set_mask, int clr_mask)
{
int ret;
mutex_lock(&dev->mdio.bus->mdio_lock);
ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
mutex_unlock(&dev->mdio.bus->mdio_lock);
return ret;
}
static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev) static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
{ {
int ret; int ret;
@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
return bcm7xxx_28nm_ephy_apd_enable(phydev); return bcm7xxx_28nm_ephy_apd_enable(phydev);
} }
#define MII_BCM7XXX_REG_INVALID 0xff
static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
{
switch (regnum) {
case MDIO_CTRL1:
return MII_BCM7XXX_SHD_3_PCS_CTRL;
case MDIO_STAT1:
return MII_BCM7XXX_SHD_3_PCS_STATUS;
case MDIO_PCS_EEE_ABLE:
return MII_BCM7XXX_SHD_3_EEE_CAP;
case MDIO_AN_EEE_ADV:
return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
case MDIO_AN_EEE_LPABLE:
return MII_BCM7XXX_SHD_3_EEE_LP;
case MDIO_PCS_EEE_WK_ERR:
return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
default:
return MII_BCM7XXX_REG_INVALID;
}
}
static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
{
return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
}
static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
int devnum, u16 regnum)
{
u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
int ret;
if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
shd == MII_BCM7XXX_REG_INVALID)
return -EOPNOTSUPP;
/* set shadow mode 2 */
ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
MII_BCM7XXX_SHD_MODE_2, 0);
if (ret < 0)
return ret;
/* Access the desired shadow register address */
ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
if (ret < 0)
goto reset_shadow_mode;
ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
reset_shadow_mode:
/* reset shadow mode 2 */
__phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
MII_BCM7XXX_SHD_MODE_2);
return ret;
}
static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
int devnum, u16 regnum, u16 val)
{
u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
int ret;
if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
shd == MII_BCM7XXX_REG_INVALID)
return -EOPNOTSUPP;
/* set shadow mode 2 */
ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
MII_BCM7XXX_SHD_MODE_2, 0);
if (ret < 0)
return ret;
/* Access the desired shadow register address */
ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
if (ret < 0)
goto reset_shadow_mode;
/* Write the desired value in the shadow register */
__phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
reset_shadow_mode:
/* reset shadow mode 2 */
return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
MII_BCM7XXX_SHD_MODE_2);
}
static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev) static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
{ {
int ret; int ret;
@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
.get_stats = bcm7xxx_28nm_get_phy_stats, \ .get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \ .probe = bcm7xxx_28nm_probe, \
.remove = bcm7xxx_28nm_remove, \ .remove = bcm7xxx_28nm_remove, \
.read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
.write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
} }
#define BCM7XXX_40NM_EPHY(_oui, _name) \ #define BCM7XXX_40NM_EPHY(_oui, _name) \

View File

@ -2354,7 +2354,7 @@ static int remove_net_device(struct hso_device *hso_dev)
} }
/* Frees our network device */ /* Frees our network device */
static void hso_free_net_device(struct hso_device *hso_dev, bool bailout) static void hso_free_net_device(struct hso_device *hso_dev)
{ {
int i; int i;
struct hso_net *hso_net = dev2net(hso_dev); struct hso_net *hso_net = dev2net(hso_dev);
@ -2377,7 +2377,7 @@ static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
kfree(hso_net->mux_bulk_tx_buf); kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL; hso_net->mux_bulk_tx_buf = NULL;
if (hso_net->net && !bailout) if (hso_net->net)
free_netdev(hso_net->net); free_netdev(hso_net->net);
kfree(hso_dev); kfree(hso_dev);
@ -3137,7 +3137,7 @@ static void hso_free_interface(struct usb_interface *interface)
rfkill_unregister(rfk); rfkill_unregister(rfk);
rfkill_destroy(rfk); rfkill_destroy(rfk);
} }
hso_free_net_device(network_table[i], false); hso_free_net_device(network_table[i]);
} }
} }
} }

View File

@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
static void smsc95xx_handle_link_change(struct net_device *net) static void smsc95xx_handle_link_change(struct net_device *net)
{ {
struct usbnet *dev = netdev_priv(net);
phy_print_status(net->phydev); phy_print_status(net->phydev);
usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
} }
static int smsc95xx_start_phy(struct usbnet *dev) static int smsc95xx_start_phy(struct usbnet *dev)

View File

@ -1799,8 +1799,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
bcn_int -= data->bcn_delta; bcn_int -= data->bcn_delta;
data->bcn_delta = 0; data->bcn_delta = 0;
} }
hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer), hrtimer_forward_now(&data->beacon_timer,
ns_to_ktime(bcn_int * NSEC_PER_USEC)); ns_to_ktime(bcn_int * NSEC_PER_USEC));
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }

View File

@ -831,6 +831,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd) struct nvme_command *cmd)
{ {
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK; blk_status_t ret = BLK_STS_OK;
nvme_clear_nvme_request(req); nvme_clear_nvme_request(req);
@ -877,7 +878,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
nvme_req(req)->genctr++; if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req); cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd); trace_nvme_setup_cmd(req, cmd);
return ret; return ret;

View File

@ -144,6 +144,12 @@ enum nvme_quirks {
* NVMe 1.3 compliance. * NVMe 1.3 compliance.
*/ */
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
/*
* The controller requires the command_id value be be limited, so skip
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
}; };
/* /*

View File

@ -3259,7 +3259,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR | .driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS }, NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, } { 0, }

View File

@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION); MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5); MODULE_FIRMWARE(FW_FNAME_T5);
MODULE_FIRMWARE(FW_FNAME_T6); MODULE_FIRMWARE(FW_FNAME_T6);
MODULE_SOFTDEP("pre: cxgb4");

View File

@ -3913,7 +3913,6 @@ struct qla_hw_data {
uint32_t scm_supported_f:1; uint32_t scm_supported_f:1;
/* Enabled in Driver */ /* Enabled in Driver */
uint32_t scm_enabled:1; uint32_t scm_enabled:1;
uint32_t max_req_queue_warned:1;
uint32_t plogi_template_valid:1; uint32_t plogi_template_valid:1;
} flags; } flags;

View File

@ -4158,6 +4158,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
ql_dbg(ql_dbg_init, vha, 0x0125, ql_dbg(ql_dbg_init, vha, 0x0125,
"INTa mode: Enabled.\n"); "INTa mode: Enabled.\n");
ha->flags.mr_intr_valid = 1; ha->flags.mr_intr_valid = 1;
/* Set max_qpair to 0, as MSI-X and MSI in not enabled */
ha->max_qpairs = 0;
} }
clear_risc_ints: clear_risc_ints:

View File

@ -109,19 +109,24 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return -EINVAL; return -EINVAL;
} }
if (ha->queue_pair_map[qidx]) { /* Use base qpair if max_qpairs is 0 */
*handle = ha->queue_pair_map[qidx]; if (!ha->max_qpairs) {
ql_log(ql_log_info, vha, 0x2121, qpair = ha->base_qpair;
"Returning existing qpair of %p for idx=%x\n", } else {
*handle, qidx); if (ha->queue_pair_map[qidx]) {
return 0; *handle = ha->queue_pair_map[qidx];
} ql_log(ql_log_info, vha, 0x2121,
"Returning existing qpair of %p for idx=%x\n",
*handle, qidx);
return 0;
}
qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
if (qpair == NULL) { if (!qpair) {
ql_log(ql_log_warn, vha, 0x2122, ql_log(ql_log_warn, vha, 0x2122,
"Failed to allocate qpair\n"); "Failed to allocate qpair\n");
return -EINVAL; return -EINVAL;
}
} }
*handle = qpair; *handle = qpair;
@ -715,18 +720,9 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
WARN_ON(vha->nvme_local_port); WARN_ON(vha->nvme_local_port);
if (ha->max_req_queues < 3) {
if (!ha->flags.max_req_queue_warned)
ql_log(ql_log_info, vha, 0x2120,
"%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
__func__, ha->max_req_queues);
ha->flags.max_req_queue_warned = 1;
return ret;
}
qla_nvme_fc_transport.max_hw_queues = qla_nvme_fc_transport.max_hw_queues =
min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
(uint8_t)(ha->max_req_queues - 2)); (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
pinfo.node_name = wwn_to_u64(vha->node_name); pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name); pinfo.port_name = wwn_to_u64(vha->port_name);

View File

@ -324,8 +324,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
const char *str) const char *str)
{ {
int off = (int)tag - hba->nutrs; struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
trace_android_vh_ufs_send_tm_command(hba, tag, str); trace_android_vh_ufs_send_tm_command(hba, tag, str);
trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header, trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,

View File

@ -1220,8 +1220,25 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
new_row_size = new_cols << 1; new_row_size = new_cols << 1;
new_screen_size = new_row_size * new_rows; new_screen_size = new_row_size * new_rows;
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) {
return 0; /*
* This function is being called here to cover the case
* where the userspace calls the FBIOPUT_VSCREENINFO twice,
* passing the same fb_var_screeninfo containing the fields
* yres/xres equal to a number non-multiple of vc_font.height
* and yres_virtual/xres_virtual equal to number lesser than the
* vc_font.height and yres/xres.
* In the second call, the struct fb_var_screeninfo isn't
* being modified by the underlying driver because of the
* if above, and this causes the fbcon_display->vrows to become
* negative and it eventually leads to out-of-bound
* access by the imageblit function.
* To give the correct values to the struct and to not have
* to deal with possible errors from the code below, we call
* the resize_screen here as well.
*/
return resize_screen(vc, new_cols, new_rows, user);
}
if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
return -EINVAL; return -EINVAL;

View File

@ -1101,6 +1101,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
return 0; return 0;
} }
static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
if (priv_dev->dev_ver < DEV_VER_V3)
return;
if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
}
}
/** /**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object * @priv_ep: endpoint object
@ -1352,6 +1365,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
cdns3_rearm_drdy_if_needed(priv_ep);
trace_cdns3_doorbell_epx(priv_ep->name, trace_cdns3_doorbell_epx(priv_ep->name,
readl(&priv_dev->regs->ep_traddr)); readl(&priv_dev->regs->ep_traddr));
} }

View File

@ -627,7 +627,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
vaddr = eppnt->p_vaddr; vaddr = eppnt->p_vaddr;
if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
elf_type |= MAP_FIXED_NOREPLACE; elf_type |= MAP_FIXED;
else if (no_base && interp_elf_ex->e_type == ET_DYN) else if (no_base && interp_elf_ex->e_type == ET_DYN)
load_addr = -vaddr; load_addr = -vaddr;

View File

@ -524,7 +524,7 @@ void debugfs_create_file_size(const char *name, umode_t mode,
{ {
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops); struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
if (de) if (!IS_ERR(de))
d_inode(de)->i_size = file_size; d_inode(de)->i_size = file_size;
} }
EXPORT_SYMBOL_GPL(debugfs_create_file_size); EXPORT_SYMBOL_GPL(debugfs_create_file_size);

View File

@ -556,7 +556,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
struct dir_private_info *info = file->private_data; struct dir_private_info *info = file->private_data;
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct fname *fname; struct fname *fname;
int ret; int ret = 0;
if (!info) { if (!info) {
info = ext4_htree_create_dir_info(file, ctx->pos); info = ext4_htree_create_dir_info(file, ctx->pos);
@ -604,7 +604,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
info->curr_minor_hash, info->curr_minor_hash,
&info->next_hash); &info->next_hash);
if (ret < 0) if (ret < 0)
return ret; goto finished;
if (ret == 0) { if (ret == 0) {
ctx->pos = ext4_get_htree_eof(file); ctx->pos = ext4_get_htree_eof(file);
break; break;
@ -635,7 +635,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
} }
finished: finished:
info->last_pos = ctx->pos; info->last_pos = ctx->pos;
return 0; return ret < 0 ? ret : 0;
} }
static int ext4_release_dir(struct inode *inode, struct file *filp) static int ext4_release_dir(struct inode *inode, struct file *filp)

View File

@ -5907,7 +5907,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
} }
/* Check if *cur is a hole and if it is, skip it */ /* Check if *cur is a hole and if it is, skip it */
static void skip_hole(struct inode *inode, ext4_lblk_t *cur) static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
{ {
int ret; int ret;
struct ext4_map_blocks map; struct ext4_map_blocks map;
@ -5916,9 +5916,12 @@ static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
ret = ext4_map_blocks(NULL, inode, &map, 0); ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
if (ret != 0) if (ret != 0)
return; return 0;
*cur = *cur + map.m_len; *cur = *cur + map.m_len;
return 0;
} }
/* Count number of blocks used by this inode and update i_blocks */ /* Count number of blocks used by this inode and update i_blocks */
@ -5967,7 +5970,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
* iblocks by total number of differences found. * iblocks by total number of differences found.
*/ */
cur = 0; cur = 0;
skip_hole(inode, &cur); ret = skip_hole(inode, &cur);
if (ret < 0)
goto out;
path = ext4_find_extent(inode, cur, NULL, 0); path = ext4_find_extent(inode, cur, NULL, 0);
if (IS_ERR(path)) if (IS_ERR(path))
goto out; goto out;
@ -5986,8 +5991,12 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
} }
cur = max(cur + 1, le32_to_cpu(ex->ee_block) + cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex)); ext4_ext_get_actual_len(ex));
skip_hole(inode, &cur); ret = skip_hole(inode, &cur);
if (ret < 0) {
ext4_ext_drop_refs(path);
kfree(path);
break;
}
path2 = ext4_find_extent(inode, cur, NULL, 0); path2 = ext4_find_extent(inode, cur, NULL, 0);
if (IS_ERR(path2)) { if (IS_ERR(path2)) {
ext4_ext_drop_refs(path); ext4_ext_drop_refs(path);

View File

@ -832,6 +832,12 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
sizeof(lrange), (u8 *)&lrange, crc)) sizeof(lrange), (u8 *)&lrange, crc))
return -ENOSPC; return -ENOSPC;
} else { } else {
unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
/* Limit the number of blocks in one extent */
map.m_len = min(max, map.m_len);
fc_ext.fc_ino = cpu_to_le32(inode->i_ino); fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
ex = (struct ext4_extent *)&fc_ext.fc_ex; ex = (struct ext4_extent *)&fc_ext.fc_ex;
ex->ee_block = cpu_to_le32(map.m_lblk); ex->ee_block = cpu_to_le32(map.m_lblk);

View File

@ -1654,6 +1654,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int ret; int ret;
bool allocated = false; bool allocated = false;
bool reserved = false;
/* /*
* If the cluster containing lblk is shared with a delayed, * If the cluster containing lblk is shared with a delayed,
@ -1670,6 +1671,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
ret = ext4_da_reserve_space(inode); ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */ if (ret != 0) /* ENOSPC */
goto errout; goto errout;
reserved = true;
} else { /* bigalloc */ } else { /* bigalloc */
if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
if (!ext4_es_scan_clu(inode, if (!ext4_es_scan_clu(inode,
@ -1682,6 +1684,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
ret = ext4_da_reserve_space(inode); ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */ if (ret != 0) /* ENOSPC */
goto errout; goto errout;
reserved = true;
} else { } else {
allocated = true; allocated = true;
} }
@ -1692,6 +1695,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
} }
ret = ext4_es_insert_delayed_block(inode, lblk, allocated); ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
if (ret && reserved)
ext4_da_release_space(inode, 1);
errout: errout:
return ret; return ret;

View File

@ -1356,6 +1356,12 @@ static void ext4_destroy_inode(struct inode *inode)
true); true);
dump_stack(); dump_stack();
} }
if (EXT4_I(inode)->i_reserved_data_blocks)
ext4_msg(inode->i_sb, KERN_ERR,
"Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
inode->i_ino, EXT4_I(inode),
EXT4_I(inode)->i_reserved_data_blocks);
} }
static void init_once(void *foo) static void init_once(void *foo)
@ -3194,17 +3200,17 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
*/ */
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
{ {
loff_t res = EXT4_NDIR_BLOCKS; unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
int meta_blocks; int meta_blocks;
loff_t upper_limit;
/* This is calculated to be the largest file size for a dense, block /*
* This is calculated to be the largest file size for a dense, block
* mapped file such that the file's total number of 512-byte sectors, * mapped file such that the file's total number of 512-byte sectors,
* including data and all indirect blocks, does not exceed (2^48 - 1). * including data and all indirect blocks, does not exceed (2^48 - 1).
* *
* __u32 i_blocks_lo and _u16 i_blocks_high represent the total * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
* number of 512-byte sectors of the file. * number of 512-byte sectors of the file.
*/ */
if (!has_huge_files) { if (!has_huge_files) {
/* /*
* !has_huge_files or implies that the inode i_block field * !has_huge_files or implies that the inode i_block field
@ -3247,7 +3253,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
if (res > MAX_LFS_FILESIZE) if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE; res = MAX_LFS_FILESIZE;
return res; return (loff_t)res;
} }
static ext4_fsblk_t descriptor_loc(struct super_block *sb, static ext4_fsblk_t descriptor_loc(struct super_block *sb,

View File

@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
* (level 0) and ascending to the root node (level 'num_levels - 1'). * (level 0) and ascending to the root node (level 'num_levels - 1').
* Then at the end (level 'num_levels'), calculate the root hash. * Then at the end (level 'num_levels'), calculate the root hash.
*/ */
blocks = (inode->i_size + params->block_size - 1) >> blocks = ((u64)inode->i_size + params->block_size - 1) >>
params->log_blocksize; params->log_blocksize;
for (level = 0; level <= params->num_levels; level++) { for (level = 0; level <= params->num_levels; level++) {
err = build_merkle_tree_level(filp, level, blocks, params, err = build_merkle_tree_level(filp, level, blocks, params,

View File

@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
*/ */
/* Compute number of levels and the number of blocks in each level */ /* Compute number of levels and the number of blocks in each level */
blocks = (inode->i_size + params->block_size - 1) >> log_blocksize; blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks); pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
while (blocks > 1) { while (blocks > 1) {
if (params->num_levels >= FS_VERITY_MAX_LEVELS) { if (params->num_levels >= FS_VERITY_MAX_LEVELS) {

View File

@ -534,6 +534,8 @@ struct btf_func_model {
* programs only. Should not be used with normal calls and indirect calls. * programs only. Should not be used with normal calls and indirect calls.
*/ */
#define BPF_TRAMP_F_SKIP_FRAME BIT(2) #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2

View File

@ -553,5 +553,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
u8 rt_family, unsigned char *flags, bool skip_oif); u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
int nh_weight, u8 rt_family); int nh_weight, u8 rt_family, u32 nh_tclassid);
#endif /* _NET_FIB_H */ #endif /* _NET_FIB_H */

View File

@ -211,7 +211,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
struct fib_nh_common *nhc = &nhi->fib_nhc; struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight; int weight = nhg->nh_entries[i].weight;
if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0) if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
return -EMSGSIZE; return -EMSGSIZE;
} }

View File

@ -481,8 +481,10 @@ struct sock {
u32 sk_ack_backlog; u32 sk_ack_backlog;
u32 sk_max_ack_backlog; u32 sk_max_ack_backlog;
kuid_t sk_uid; kuid_t sk_uid;
spinlock_t sk_peer_lock;
struct pid *sk_peer_pid; struct pid *sk_peer_pid;
const struct cred *sk_peer_cred; const struct cred *sk_peer_cred;
long sk_rcvtimeo; long sk_rcvtimeo;
ktime_t sk_stamp; ktime_t sk_stamp;
#if BITS_PER_LONG==32 #if BITS_PER_LONG==32

View File

@ -368,6 +368,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
const struct btf_type *mtype, *ptype; const struct btf_type *mtype, *ptype;
struct bpf_prog *prog; struct bpf_prog *prog;
u32 moff; u32 moff;
u32 flags;
moff = btf_member_bit_offset(t, member) / 8; moff = btf_member_bit_offset(t, member) / 8;
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
@ -431,10 +432,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
flags = st_ops->func_models[i].ret_size > 0 ?
BPF_TRAMP_F_RET_FENTRY_RET : 0;
err = arch_prepare_bpf_trampoline(NULL, image, err = arch_prepare_bpf_trampoline(NULL, image,
st_map->image + PAGE_SIZE, st_map->image + PAGE_SIZE,
&st_ops->func_models[i], 0, &st_ops->func_models[i],
tprogs, NULL); flags, tprogs, NULL);
if (err < 0) if (err < 0)
goto reset_unlock; goto reset_unlock;

View File

@ -833,7 +833,7 @@ int bpf_jit_charge_modmem(u32 pages)
{ {
if (atomic_long_add_return(pages, &bpf_jit_current) > if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) { (bpf_jit_limit >> PAGE_SHIFT)) {
if (!capable(CAP_SYS_ADMIN)) { if (!bpf_capable()) {
atomic_long_sub(pages, &bpf_jit_current); atomic_long_sub(pages, &bpf_jit_current);
return -EPERM; return -EPERM;
} }

View File

@ -16,8 +16,10 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
if (ti_work & _TIF_NEED_RESCHED) if (ti_work & _TIF_NEED_RESCHED)
schedule(); schedule();
if (ti_work & _TIF_NOTIFY_RESUME) if (ti_work & _TIF_NOTIFY_RESUME) {
tracehook_notify_resume(NULL); tracehook_notify_resume(NULL);
rseq_handle_notify_resume(NULL, NULL);
}
ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work); ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
if (ret) if (ret)

View File

@ -268,9 +268,16 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
return; return;
if (unlikely(!access_ok(t->rseq, sizeof(*t->rseq)))) if (unlikely(!access_ok(t->rseq, sizeof(*t->rseq))))
goto error; goto error;
ret = rseq_ip_fixup(regs); /*
if (unlikely(ret < 0)) * regs is NULL if and only if the caller is in a syscall path. Skip
goto error; * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
* kill a misbehaving userspace on debug kernels.
*/
if (regs) {
ret = rseq_ip_fixup(regs);
if (unlikely(ret < 0))
goto error;
}
if (unlikely(rseq_update_cpu_id(t))) if (unlikely(rseq_update_cpu_id(t)))
goto error; goto error;
return; return;

View File

@ -618,9 +618,17 @@ static struct attribute *sugov_attrs[] = {
}; };
ATTRIBUTE_GROUPS(sugov); ATTRIBUTE_GROUPS(sugov);
static void sugov_tunables_free(struct kobject *kobj)
{
struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj);
kfree(to_sugov_tunables(attr_set));
}
static struct kobj_type sugov_tunables_ktype = { static struct kobj_type sugov_tunables_ktype = {
.default_groups = sugov_groups, .default_groups = sugov_groups,
.sysfs_ops = &governor_sysfs_ops, .sysfs_ops = &governor_sysfs_ops,
.release = &sugov_tunables_free,
}; };
/********************** cpufreq governor interface *********************/ /********************** cpufreq governor interface *********************/
@ -720,12 +728,10 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
return tunables; return tunables;
} }
static void sugov_tunables_free(struct sugov_tunables *tunables) static void sugov_clear_global_tunables(void)
{ {
if (!have_governor_per_policy()) if (!have_governor_per_policy())
global_tunables = NULL; global_tunables = NULL;
kfree(tunables);
} }
static int sugov_init(struct cpufreq_policy *policy) static int sugov_init(struct cpufreq_policy *policy)
@ -788,7 +794,7 @@ static int sugov_init(struct cpufreq_policy *policy)
fail: fail:
kobject_put(&tunables->attr_set.kobj); kobject_put(&tunables->attr_set.kobj);
policy->governor_data = NULL; policy->governor_data = NULL;
sugov_tunables_free(tunables); sugov_clear_global_tunables();
stop_kthread: stop_kthread:
sugov_kthread_stop(sg_policy); sugov_kthread_stop(sg_policy);
@ -815,7 +821,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
policy->governor_data = NULL; policy->governor_data = NULL;
if (!count) if (!count)
sugov_tunables_free(tunables); sugov_clear_global_tunables();
mutex_unlock(&global_tunables_lock); mutex_unlock(&global_tunables_lock);

View File

@ -586,6 +586,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if (ret || size <= PAGE_SIZE) if (ret || size <= PAGE_SIZE)
return ret; return ret;
/* Don't even allow crazy sizes */
if (WARN_ON_ONCE(size > INT_MAX))
return NULL;
return __vmalloc_node(size, 1, flags, node, return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0)); __builtin_return_address(0));
} }

View File

@ -1257,6 +1257,16 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
} }
EXPORT_SYMBOL(sock_setsockopt); EXPORT_SYMBOL(sock_setsockopt);
static const struct cred *sk_get_peer_cred(struct sock *sk)
{
const struct cred *cred;
spin_lock(&sk->sk_peer_lock);
cred = get_cred(sk->sk_peer_cred);
spin_unlock(&sk->sk_peer_lock);
return cred;
}
static void cred_to_ucred(struct pid *pid, const struct cred *cred, static void cred_to_ucred(struct pid *pid, const struct cred *cred,
struct ucred *ucred) struct ucred *ucred)
@ -1430,7 +1440,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct ucred peercred; struct ucred peercred;
if (len > sizeof(peercred)) if (len > sizeof(peercred))
len = sizeof(peercred); len = sizeof(peercred);
spin_lock(&sk->sk_peer_lock);
cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
spin_unlock(&sk->sk_peer_lock);
if (copy_to_user(optval, &peercred, len)) if (copy_to_user(optval, &peercred, len))
return -EFAULT; return -EFAULT;
goto lenout; goto lenout;
@ -1438,20 +1452,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_PEERGROUPS: case SO_PEERGROUPS:
{ {
const struct cred *cred;
int ret, n; int ret, n;
if (!sk->sk_peer_cred) cred = sk_get_peer_cred(sk);
if (!cred)
return -ENODATA; return -ENODATA;
n = sk->sk_peer_cred->group_info->ngroups; n = cred->group_info->ngroups;
if (len < n * sizeof(gid_t)) { if (len < n * sizeof(gid_t)) {
len = n * sizeof(gid_t); len = n * sizeof(gid_t);
put_cred(cred);
return put_user(len, optlen) ? -EFAULT : -ERANGE; return put_user(len, optlen) ? -EFAULT : -ERANGE;
} }
len = n * sizeof(gid_t); len = n * sizeof(gid_t);
ret = groups_to_user((gid_t __user *)optval, ret = groups_to_user((gid_t __user *)optval, cred->group_info);
sk->sk_peer_cred->group_info); put_cred(cred);
if (ret) if (ret)
return ret; return ret;
goto lenout; goto lenout;
@ -1794,9 +1811,10 @@ static void __sk_destruct(struct rcu_head *head)
sk->sk_frag.page = NULL; sk->sk_frag.page = NULL;
} }
if (sk->sk_peer_cred) /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
put_cred(sk->sk_peer_cred); put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid); put_pid(sk->sk_peer_pid);
if (likely(sk->sk_net_refcnt)) if (likely(sk->sk_net_refcnt))
put_net(sock_net(sk)); put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk); sk_prot_free(sk->sk_prot_creator, sk);
@ -3016,6 +3034,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_peer_pid = NULL; sk->sk_peer_pid = NULL;
sk->sk_peer_cred = NULL; sk->sk_peer_cred = NULL;
spin_lock_init(&sk->sk_peer_lock);
sk->sk_write_pending = 0; sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1; sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;

View File

@ -1663,7 +1663,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc, int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
int nh_weight, u8 rt_family) int nh_weight, u8 rt_family, u32 nh_tclassid)
{ {
const struct net_device *dev = nhc->nhc_dev; const struct net_device *dev = nhc->nhc_dev;
struct rtnexthop *rtnh; struct rtnexthop *rtnh;
@ -1681,6 +1681,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
rtnh->rtnh_flags = flags; rtnh->rtnh_flags = flags;
if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
goto nla_put_failure;
/* length of rtnetlink header + attributes */ /* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
@ -1708,14 +1711,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
} }
for_nexthops(fi) { for_nexthops(fi) {
if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight, u32 nh_tclassid = 0;
AF_INET) < 0)
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid && nh_tclassid = nh->nh_tclassid;
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif #endif
if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
AF_INET, nh_tclassid) < 0)
goto nla_put_failure;
} endfor_nexthops(fi); } endfor_nexthops(fi);
mp_end: mp_end:

View File

@ -1036,7 +1036,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
__be16 dport; __be16 dport;
u8 tos; u8 tos;
int err, is_udplite = IS_UDPLITE(sk); int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb; struct sk_buff *skb;
struct ip_options_data opt_copy; struct ip_options_data opt_copy;
@ -1345,7 +1345,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
} }
up->len += size; up->len += size;
if (!(up->corkflag || (flags&MSG_MORE))) if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
ret = udp_push_pending_frames(sk); ret = udp_push_pending_frames(sk);
if (!ret) if (!ret)
ret = size; ret = size;
@ -2612,9 +2612,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
switch (optname) { switch (optname) {
case UDP_CORK: case UDP_CORK:
if (val != 0) { if (val != 0) {
up->corkflag = 1; WRITE_ONCE(up->corkflag, 1);
} else { } else {
up->corkflag = 0; WRITE_ONCE(up->corkflag, 0);
lock_sock(sk); lock_sock(sk);
push_pending_frames(sk); push_pending_frames(sk);
release_sock(sk); release_sock(sk);
@ -2737,7 +2737,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
switch (optname) { switch (optname) {
case UDP_CORK: case UDP_CORK:
val = up->corkflag; val = READ_ONCE(up->corkflag);
break; break;
case UDP_ENCAP: case UDP_ENCAP:

View File

@ -5543,14 +5543,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure; goto nla_put_failure;
if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
rt->fib6_nh->fib_nh_weight, AF_INET6) < 0) rt->fib6_nh->fib_nh_weight, AF_INET6,
0) < 0)
goto nla_put_failure; goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling, list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings, fib6_siblings) { &rt->fib6_siblings, fib6_siblings) {
if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
sibling->fib6_nh->fib_nh_weight, sibling->fib6_nh->fib_nh_weight,
AF_INET6) < 0) AF_INET6, 0) < 0)
goto nla_put_failure; goto nla_put_failure;
} }

View File

@ -1288,7 +1288,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int addr_len = msg->msg_namelen; int addr_len = msg->msg_namelen;
bool connected = false; bool connected = false;
int ulen = len; int ulen = len;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
int err; int err;
int is_udplite = IS_UDPLITE(sk); int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);

View File

@ -2,6 +2,7 @@
/* /*
* Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de> * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
* Copyright 2012-2013, cozybit Inc. * Copyright 2012-2013, cozybit Inc.
* Copyright (C) 2021 Intel Corporation
*/ */
#include "mesh.h" #include "mesh.h"
@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
/* only transmit to PS STA with announced, non-zero awake window */ /* only transmit to PS STA with announced, non-zero awake window */
if (test_sta_flag(sta, WLAN_STA_PS_STA) && if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
(!elems->awake_window || !le16_to_cpu(*elems->awake_window))) (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
return; return;
if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER)) if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))

View File

@ -2177,7 +2177,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
} }
vht_mcs = iterator.this_arg[4] >> 4; vht_mcs = iterator.this_arg[4] >> 4;
if (vht_mcs > 11)
vht_mcs = 0;
vht_nss = iterator.this_arg[4] & 0xF; vht_nss = iterator.this_arg[4] & 0xF;
if (!vht_nss || vht_nss > 8)
vht_nss = 1;
break; break;
/* /*
@ -3365,6 +3369,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out; goto out;
/* If n == 2, the "while (*frag_tail)" loop above didn't execute
* and frag_tail should be &skb_shinfo(head)->frag_list.
* However, ieee80211_amsdu_prepare_head() can reallocate it.
* Reload frag_tail to have it pointing to the correct place.
*/
if (n == 2)
frag_tail = &skb_shinfo(head)->frag_list;
/* /*
* Pad out the previous subframe to a multiple of 4 by adding the * Pad out the previous subframe to a multiple of 4 by adding the
* padding to the next one, that's being added. Note that head->len * padding to the next one, that's being added. Note that head->len

View File

@ -520,6 +520,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_DROP_UNUSABLE; return RX_DROP_UNUSABLE;
} }
/* reload hdr - skb might have been reallocated */
hdr = (void *)rx->skb->data;
data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0) if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE; return RX_DROP_UNUSABLE;
@ -749,6 +752,9 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE; return RX_DROP_UNUSABLE;
} }
/* reload hdr - skb might have been reallocated */
hdr = (void *)rx->skb->data;
data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0) if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE; return RX_DROP_UNUSABLE;

View File

@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
struct sock *sk; struct sock *sk;
net = sock_net(in_skb->sk); net = sock_net(in_skb->sk);
msk = mptcp_token_get_sock(req->id.idiag_cookie[0]); msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
if (!msk) if (!msk)
goto out_nosk; goto out_nosk;

View File

@ -424,7 +424,7 @@ int mptcp_token_new_connect(struct sock *sk);
void mptcp_token_accept(struct mptcp_subflow_request_sock *r, void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
struct mptcp_sock *msk); struct mptcp_sock *msk);
bool mptcp_token_exists(u32 token); bool mptcp_token_exists(u32 token);
struct mptcp_sock *mptcp_token_get_sock(u32 token); struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
long *s_num); long *s_num);
void mptcp_token_destroy(struct mptcp_sock *msk); void mptcp_token_destroy(struct mptcp_sock *msk);

View File

@ -69,7 +69,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
struct mptcp_sock *msk; struct mptcp_sock *msk;
int local_id; int local_id;
msk = mptcp_token_get_sock(subflow_req->token); msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
if (!msk) { if (!msk) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
return NULL; return NULL;

Some files were not shown because too many files have changed in this diff Show More