Merge "Merge android12-5.10.8 (f11e175) into msm-5.10"

This commit is contained in:
qctecmdr 2021-01-20 11:26:55 -08:00 committed by Gerrit - the friendly Code Review server
commit d06697e749
164 changed files with 188741 additions and 13728 deletions

View File

@ -91,9 +91,9 @@ The ECM function provides these attributes in its function directory:
and after creating the functions/ecm.<instance name> they contain default and after creating the functions/ecm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected. values: qmult is 5, dev_addr and host_addr are randomly selected.
Except for ifname they can be written to until the function is linked to a The ifname can be written to if the function is not bound. A write must be an
configuration. The ifname is read-only and contains the name of the interface interface pattern such as "usb%d", which will cause the net core to choose the
which was assigned by the net core, e. g. usb0. next free usbX interface. By default, it is set to "usb%d".
Testing the ECM function Testing the ECM function
------------------------ ------------------------
@ -131,9 +131,9 @@ The ECM subset function provides these attributes in its function directory:
and after creating the functions/ecm.<instance name> they contain default and after creating the functions/ecm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected. values: qmult is 5, dev_addr and host_addr are randomly selected.
Except for ifname they can be written to until the function is linked to a The ifname can be written to if the function is not bound. A write must be an
configuration. The ifname is read-only and contains the name of the interface interface pattern such as "usb%d", which will cause the net core to choose the
which was assigned by the net core, e. g. usb0. next free usbX interface. By default, it is set to "usb%d".
Testing the ECM subset function Testing the ECM subset function
------------------------------- -------------------------------
@ -171,9 +171,9 @@ The EEM function provides these attributes in its function directory:
and after creating the functions/eem.<instance name> they contain default and after creating the functions/eem.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected. values: qmult is 5, dev_addr and host_addr are randomly selected.
Except for ifname they can be written to until the function is linked to a The ifname can be written to if the function is not bound. A write must be an
configuration. The ifname is read-only and contains the name of the interface interface pattern such as "usb%d", which will cause the net core to choose the
which was assigned by the net core, e. g. usb0. next free usbX interface. By default, it is set to "usb%d".
Testing the EEM function Testing the EEM function
------------------------ ------------------------
@ -453,9 +453,9 @@ The NCM function provides these attributes in its function directory:
and after creating the functions/ncm.<instance name> they contain default and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected. values: qmult is 5, dev_addr and host_addr are randomly selected.
Except for ifname they can be written to until the function is linked to a The ifname can be written to if the function is not bound. A write must be an
configuration. The ifname is read-only and contains the name of the interface interface pattern such as "usb%d", which will cause the net core to choose the
which was assigned by the net core, e. g. usb0. next free usbX interface. By default, it is set to "usb%d".
Testing the NCM function Testing the NCM function
------------------------ ------------------------
@ -591,9 +591,9 @@ The RNDIS function provides these attributes in its function directory:
and after creating the functions/rndis.<instance name> they contain default and after creating the functions/rndis.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected. values: qmult is 5, dev_addr and host_addr are randomly selected.
Except for ifname they can be written to until the function is linked to a The ifname can be written to if the function is not bound. A write must be an
configuration. The ifname is read-only and contains the name of the interface interface pattern such as "usb%d", which will cause the net core to choose the
which was assigned by the net core, e. g. usb0. next free usbX interface. By default, it is set to "usb%d".
Testing the RNDIS function Testing the RNDIS function
-------------------------- --------------------------

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 7 SUBLEVEL = 8
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
@ -917,6 +917,27 @@ KBUILD_CFLAGS += $(CC_FLAGS_LTO)
export CC_FLAGS_LTO export CC_FLAGS_LTO
endif endif
ifdef CONFIG_CFI_CLANG
CC_FLAGS_CFI := -fsanitize=cfi \
-fsanitize-cfi-cross-dso \
-fno-sanitize-cfi-canonical-jump-tables \
-fno-sanitize-blacklist
ifdef CONFIG_CFI_PERMISSIVE
CC_FLAGS_CFI += -fsanitize-recover=cfi \
-fno-sanitize-trap=cfi
else
ifndef CONFIG_UBSAN_TRAP
CC_FLAGS_CFI += -ftrap-function=__ubsan_handle_cfi_check_fail_abort
endif
endif
# If LTO flags are filtered out, we must also filter out CFI.
CC_FLAGS_LTO += $(CC_FLAGS_CFI)
KBUILD_CFLAGS += $(CC_FLAGS_CFI)
export CC_FLAGS_CFI
endif
ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B
KBUILD_CFLAGS += -falign-functions=32 KBUILD_CFLAGS += -falign-functions=32
endif endif

96414
android/abi_gki_aarch64.xml Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -704,6 +704,29 @@ config LTO_CLANG_THIN
If unsure, say Y. If unsure, say Y.
endchoice endchoice
config CFI_CLANG
bool "Use Clang's Control Flow Integrity (CFI)"
depends on LTO_CLANG && KALLSYMS
help
This option enables Clang's Control Flow Integrity (CFI), which adds
runtime checking for indirect function calls.
config CFI_CLANG_SHADOW
bool "Use CFI shadow to speed up cross-module checks"
default y
depends on CFI_CLANG && MODULES
help
If you select this option, the kernel builds a fast look-up table of
CFI check functions in loaded modules to reduce overhead.
config CFI_PERMISSIVE
bool "Use CFI in permissive mode"
depends on CFI_CLANG
help
When selected, Control Flow Integrity (CFI) violations result in a
warning instead of a kernel panic. This option is useful for finding
CFI violations during development.
config HAVE_ARCH_WITHIN_STACK_FRAMES config HAVE_ARCH_WITHIN_STACK_FRAMES
bool bool
help help
@ -1143,6 +1166,12 @@ config ARCH_WANT_LD_ORPHAN_WARN
by the linker, since the locations of such sections can change between linker by the linker, since the locations of such sections can change between linker
versions. versions.
config ARCH_SPLIT_ARG64
bool
help
If a 32-bit architecture requires 64-bit arguments to be split into
pairs of 32-bit arguments, select this option.
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig" source "scripts/gcc-plugins/Kconfig"

View File

@ -230,10 +230,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
break; break;
case BUS_NOTIFY_BIND_DRIVER: case BUS_NOTIFY_BIND_DRIVER:
od = to_omap_device(pdev); od = to_omap_device(pdev);
if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && if (od) {
pm_runtime_status_suspended(dev)) {
od->_driver_status = BUS_NOTIFY_BIND_DRIVER; od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
pm_runtime_set_active(dev); if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
pm_runtime_status_suspended(dev)) {
pm_runtime_set_active(dev);
}
} }
break; break;
case BUS_NOTIFY_ADD_DEVICE: case BUS_NOTIFY_ADD_DEVICE:

View File

@ -1658,6 +1658,8 @@ config ARM64_BTI_KERNEL
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100 depends on !CC_IS_GCC || GCC_VERSION >= 100100
depends on !(CC_IS_CLANG && GCOV_KERNEL) depends on !(CC_IS_CLANG && GCOV_KERNEL)
# https://bugs.llvm.org/show_bug.cgi?id=46258
depends on !CFI_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help help
Build the kernel with Branch Target Identification annotations Build the kernel with Branch Target Identification annotations

View File

@ -158,7 +158,7 @@ extern void *__vhe_undefined_symbol;
val = lm_alias((ptr)); \ val = lm_alias((ptr)); \
val; \ val; \
}) })
#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym)) #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(__va_function(kvm_nvhe_sym(sym)))
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;

View File

@ -300,6 +300,24 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
* With non-canonical CFI jump tables, the compiler replaces function
* address references with the address of the function's CFI jump
* table entry. This results in __pa_symbol(function) returning the
* physical address of the jump table entry, which can lead to address
* space confusion since the jump table points to the function's
* virtual address. Therefore, use inline assembly to ensure we are
* always taking the address of the actual function.
*/
#define __va_function(x) ({ \
void *addr; \
asm("adrp %0, " __stringify(x) "\n\t" \
"add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \
addr; \
})
#define __pa_function(x) __pa_symbol(__va_function(x))
/* /*
* virt_to_page(x) convert a _valid_ virtual address to struct page * * virt_to_page(x) convert a _valid_ virtual address to struct page *
* virt_addr_valid(x) indicates whether a virtual address is valid * virt_addr_valid(x) indicates whether a virtual address is valid

View File

@ -137,7 +137,7 @@ static inline void cpu_install_idmap(void)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated. * avoiding the possibility of conflicting TLB entries being allocated.
*/ */
static inline void cpu_replace_ttbr1(pgd_t *pgdp) static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
{ {
typedef void (ttbr_replace_func)(phys_addr_t); typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1; extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@ -158,7 +158,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
ttbr1 |= TTBR_CNP_BIT; ttbr1 |= TTBR_CNP_BIT;
} }
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1);
cpu_install_idmap(); cpu_install_idmap();
replace_phys(ttbr1); replace_phys(ttbr1);

View File

@ -97,7 +97,8 @@
#endif /* CONFIG_ARM64_FORCE_52BIT */ #endif /* CONFIG_ARM64_FORCE_52BIT */
extern phys_addr_t arm64_dma_phys_limit; extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) extern phys_addr_t arm64_dma32_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT ((arm64_dma_phys_limit ? : arm64_dma32_phys_limit) - 1)
struct debug_info { struct debug_info {
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT

View File

@ -132,8 +132,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end); } while (cur += d_size, cur < end);
} }
static void __apply_alternatives(void *alt_region, bool is_module, static void __nocfi __apply_alternatives(void *alt_region, bool is_module,
unsigned long *feature_mask) unsigned long *feature_mask)
{ {
struct alt_instr *alt; struct alt_instr *alt;
struct alt_region *region = alt_region; struct alt_region *region = alt_region;

View File

@ -13,16 +13,16 @@
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg2); unsigned long arg0, unsigned long arg1, unsigned long arg2);
static inline void __noreturn cpu_soft_restart(unsigned long entry, static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry,
unsigned long arg0, unsigned long arg0,
unsigned long arg1, unsigned long arg1,
unsigned long arg2) unsigned long arg2)
{ {
typeof(__cpu_soft_restart) *restart; typeof(__cpu_soft_restart) *restart;
unsigned long el2_switch = !is_kernel_in_hyp_mode() && unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available(); is_hyp_mode_available();
restart = (void *)__pa_symbol(__cpu_soft_restart); restart = (void *)__pa_function(__cpu_soft_restart);
cpu_install_idmap(); cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2); restart(el2_switch, entry, arg0, arg1, arg2);

View File

@ -1398,7 +1398,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
} }
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static void static void __nocfi
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{ {
typedef void (kpti_remap_fn)(int, int, phys_addr_t); typedef void (kpti_remap_fn)(int, int, phys_addr_t);
@ -1415,7 +1415,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (arm64_use_ng_mappings) if (arm64_use_ng_mappings)
return; return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings);
cpu_install_idmap(); cpu_install_idmap();
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
@ -2571,7 +2571,7 @@ static void verify_hyp_capabilities(void)
int parange, ipa_max; int parange, ipa_max;
unsigned int safe_vmid_bits, vmid_bits; unsigned int safe_vmid_bits, vmid_bits;
if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST)) if (!IS_ENABLED(CONFIG_KVM))
return; return;
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);

View File

@ -38,7 +38,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu) static int cpu_psci_cpu_boot(unsigned int cpu)
{ {
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry)); int err = psci_ops.cpu_on(cpu_logical_map(cpu),
__pa_function(secondary_entry));
if (err) if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err); pr_err("failed to boot CPU%d (%d)\n", cpu, err);

View File

@ -88,7 +88,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianness before jumping. This is mandated by * boot-loader's endianness before jumping. This is mandated by
* the boot protocol. * the boot protocol.
*/ */
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); writeq_relaxed(__pa_function(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr, __flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr)); sizeof(*release_addr));

View File

@ -4,6 +4,7 @@
# #
ccflags-y += -I $(srctree)/$(src) ccflags-y += -I $(srctree)/$(src)
CFLAGS_REMOVE_debug.o += $(CC_FLAGS_CFI)
KVM=../../../virt/kvm KVM=../../../virt/kvm

View File

@ -11,3 +11,6 @@ subdir-ccflags-y := -I$(incdir) \
$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o
# Disable LTO+CFI for the files in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))

View File

@ -663,6 +663,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 pmcr, val; u64 pmcr, val;
/* No PMU available, PMCR_EL0 may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return;
pmcr = read_sysreg(pmcr_el0); pmcr = read_sysreg(pmcr_el0);
/* /*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN

View File

@ -60,7 +60,7 @@ EXPORT_SYMBOL(memstart_addr);
* bit addressable memory area. * bit addressable memory area.
*/ */
phys_addr_t arm64_dma_phys_limit __ro_after_init; phys_addr_t arm64_dma_phys_limit __ro_after_init;
static phys_addr_t arm64_dma32_phys_limit __ro_after_init; phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
/* /*

View File

@ -262,10 +262,19 @@ __secondary_hold_acknowledge:
MachineCheck: MachineCheck:
EXCEPTION_PROLOG_0 EXCEPTION_PROLOG_0
#ifdef CONFIG_PPC_CHRP #ifdef CONFIG_PPC_CHRP
#ifdef CONFIG_VMAP_STACK
mr r11, r1
mfspr r1, SPRN_SPRG_THREAD
lwz r1, RTAS_SP(r1)
cmpwi cr1, r1, 0
bne cr1, 7f
mr r1, r11
#else
mfspr r11, SPRN_SPRG_THREAD mfspr r11, SPRN_SPRG_THREAD
lwz r11, RTAS_SP(r11) lwz r11, RTAS_SP(r11)
cmpwi cr1, r11, 0 cmpwi cr1, r11, 0
bne cr1, 7f bne cr1, 7f
#endif
#endif /* CONFIG_PPC_CHRP */ #endif /* CONFIG_PPC_CHRP */
EXCEPTION_PROLOG_1 for_rtas=1 EXCEPTION_PROLOG_1 for_rtas=1
7: EXCEPTION_PROLOG_2 7: EXCEPTION_PROLOG_2

View File

@ -18,6 +18,7 @@ config X86_32
select MODULES_USE_ELF_REL select MODULES_USE_ELF_REL
select OLD_SIGACTION select OLD_SIGACTION
select GENERIC_VDSO_32 select GENERIC_VDSO_32
select ARCH_SPLIT_ARG64
config X86_64 config X86_64
def_bool y def_bool y

View File

@ -8,6 +8,7 @@
#include <asm/extable.h> #include <asm/extable.h>
extern char __brk_base[], __brk_limit[]; extern char __brk_base[], __brk_limit[];
extern char __cfi_jt_start[], __cfi_jt_end[];
extern char __end_rodata_aligned[]; extern char __end_rodata_aligned[];
#if defined(CONFIG_X86_64) #if defined(CONFIG_X86_64)

View File

@ -628,23 +628,10 @@ extern struct paravirt_patch_site __start_parainstructions[],
* *
* See entry_{32,64}.S for more details. * See entry_{32,64}.S for more details.
*/ */
static void __init __no_sanitize_address notrace int3_magic(unsigned int *ptr)
/* {
* We define the int3_magic() function in assembly to control the calling *ptr = 1;
* convention such that we can 'call' it from assembly. }
*/
extern void int3_magic(unsigned int *ptr); /* defined in asm */
asm (
" .pushsection .init.text, \"ax\", @progbits\n"
" .type int3_magic, @function\n"
"int3_magic:\n"
" movl $1, (%" _ASM_ARG1 ")\n"
" ret\n"
" .size int3_magic, .-int3_magic\n"
" .popsection\n"
);
extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */ extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */

View File

@ -103,6 +103,14 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
asm ( asm (
".pushsection .rodata\n" ".pushsection .rodata\n"
"optprobe_template_func:\n" "optprobe_template_func:\n"
".pushsection .discard.func_stack_frame_non_standard\n"
"__func_stack_frame_non_standard_optprobe_template_func:\n"
#ifdef CONFIG_64BIT
".quad optprobe_template_func\n"
#else
".long optprobe_template_func\n"
#endif
".popsection\n"
".global optprobe_template_entry\n" ".global optprobe_template_entry\n"
"optprobe_template_entry:\n" "optprobe_template_entry:\n"
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
@ -154,9 +162,6 @@ asm (
"optprobe_template_end:\n" "optprobe_template_end:\n"
".popsection\n"); ".popsection\n");
void optprobe_template_func(void);
STACK_FRAME_NON_STANDARD(optprobe_template_func);
#define TMPL_CLAC_IDX \ #define TMPL_CLAC_IDX \
((long)optprobe_template_clac - (long)optprobe_template_entry) ((long)optprobe_template_clac - (long)optprobe_template_entry)
#define TMPL_MOVE_IDX \ #define TMPL_MOVE_IDX \

View File

@ -196,6 +196,10 @@ static int __apply_relocate_add(Elf64_Shdr *sechdrs,
val -= (u64)loc; val -= (u64)loc;
write(loc, &val, 8); write(loc, &val, 8);
break; break;
case R_X86_64_8:
if (!strncmp(strtab + sym->st_name, "__typeid__", 10))
break;
/* fallthrough */
default: default:
pr_err("%s: Unknown rela relocation: %llu\n", pr_err("%s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info)); me->name, ELF64_R_TYPE(rel[i].r_info));

View File

@ -490,3 +490,7 @@ INIT_PER_CPU(irq_stack_backing_store);
"kexec control code size is too big"); "kexec control code size is too big");
#endif #endif
#ifdef CONFIG_CFI_CLANG
. = ASSERT((__cfi_jt_end - __cfi_jt_start > 0),
"CFI jump table is empty");
#endif

View File

@ -155,6 +155,7 @@ enum handler_type ex_get_fault_handler_type(unsigned long ip)
return EX_HANDLER_OTHER; return EX_HANDLER_OTHER;
} }
__nocfi
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
unsigned long fault_addr) unsigned long fault_addr)
{ {

View File

@ -498,6 +498,15 @@ static void pti_clone_entry_text(void)
pti_clone_pgtable((unsigned long) __entry_text_start, pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end, (unsigned long) __entry_text_end,
PTI_CLONE_PMD); PTI_CLONE_PMD);
/*
* If CFI is enabled, also map jump tables, so the entry code can
* make indirect calls.
*/
if (IS_ENABLED(CONFIG_CFI_CLANG))
pti_clone_pgtable((unsigned long) __cfi_jt_start,
(unsigned long) __cfi_jt_end,
PTI_CLONE_PMD);
} }
/* /*

View File

@ -425,7 +425,7 @@ static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl,
return AE_OK; return AE_OK;
} }
static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used) static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
{ {
struct resource mcfg_res; struct resource mcfg_res;
@ -442,7 +442,7 @@ static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used)
return mcfg_res.flags; return mcfg_res.flags;
} }
typedef bool (*check_reserved_t)(u64 start, u64 end, unsigned type); typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
static bool __ref is_mmconf_reserved(check_reserved_t is_reserved, static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
struct pci_mmcfg_region *cfg, struct pci_mmcfg_region *cfg,

View File

@ -31,7 +31,7 @@ KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that # These are adjustments to the compiler flags used for objects that
# make up the standalone purgatory.ro # make up the standalone purgatory.ro
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel $(CC_FLAGS_CFI)
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0 PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector PURGATORY_CFLAGS += -fno-stack-protector

View File

@ -48,6 +48,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"^(xen_irq_disable_direct_reloc$|" "^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|" "xen_save_fl_direct_reloc$|"
"VDSO|" "VDSO|"
"__typeid__|"
"__crc_)", "__crc_)",
/* /*
@ -808,6 +809,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
symname); symname);
break; break;
case R_X86_64_8:
if (!shn_abs || !is_reloc(S_ABS, symname))
die("Non-whitelisted %s relocation: %s\n",
rel_type(r_type), symname);
break;
case R_X86_64_32: case R_X86_64_32:
case R_X86_64_32S: case R_X86_64_32S:
case R_X86_64_64: case R_X86_64_64:

View File

@ -256,14 +256,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
part = rcu_dereference(ptbl->part[piter->idx]); part = rcu_dereference(ptbl->part[piter->idx]);
if (!part) if (!part)
continue; continue;
get_device(part_to_dev(part));
piter->part = part;
if (!part_nr_sects_read(part) && if (!part_nr_sects_read(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
piter->idx == 0)) piter->idx == 0)) {
put_device(part_to_dev(part));
piter->part = NULL;
continue; continue;
}
get_device(part_to_dev(part));
piter->part = part;
piter->idx += inc; piter->idx += inc;
break; break;
} }

View File

@ -2,6 +2,7 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 . ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki . ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
ABI_DEFINITION=android/abi_gki_aarch64.xml
KMI_SYMBOL_LIST=android/abi_gki_aarch64 KMI_SYMBOL_LIST=android/abi_gki_aarch64
ADDITIONAL_KMI_SYMBOL_LISTS=" ADDITIONAL_KMI_SYMBOL_LISTS="
android/abi_gki_aarch64_db845c android/abi_gki_aarch64_db845c

View File

@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev); devname = dev_name(map->dev);
if (name) { if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", if (!map->debugfs_name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name); devname, name);
if (!map->debugfs_name)
return;
}
name = map->debugfs_name; name = map->debugfs_name;
} else { } else {
name = devname; name = devname;
@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) { if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name); kfree(map->debugfs_name);
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index); dummy_index);
if (!map->debugfs_name)
return;
name = map->debugfs_name; name = map->debugfs_name;
dummy_index++; dummy_index++;
} }

View File

@ -451,6 +451,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI depends on PCI
select CRC32
help help
Device driver for IBM's high speed PCIe SSD Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height. storage device: Flash Adapter 900GB Full Height.

View File

@ -1671,7 +1671,8 @@ static void rnbd_destroy_sessions(void)
*/ */
list_for_each_entry_safe(sess, sn, &sess_list, list) { list_for_each_entry_safe(sess, sn, &sess_list, list) {
WARN_ON(!rnbd_clt_get_sess(sess)); if (!rnbd_clt_get_sess(sess))
continue;
close_rtrs(sess); close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/* /*

View File

@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */ /* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data, static int transition_frequency_fidvid(struct powernow_k8_data *data,
unsigned int index) unsigned int index,
struct cpufreq_policy *policy)
{ {
struct cpufreq_policy *policy;
u32 fid = 0; u32 fid = 0;
u32 vid = 0; u32 vid = 0;
int res; int res;
@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid); freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid); freqs.new = find_khz_freq_from_fid(fid);
policy = cpufreq_cpu_get(smp_processor_id());
cpufreq_cpu_put(policy);
cpufreq_freq_transition_begin(policy, &freqs); cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid); res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res); cpufreq_freq_transition_end(policy, &freqs, res);
@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate); powernow_k8_acpi_pst_values(data, newstate);
ret = transition_frequency_fidvid(data, newstate); ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) { if (ret) {
pr_err("transition frequency failed\n"); pr_err("transition frequency failed\n");

View File

@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) { if (desc->chunk) {
/* Create and add new element into the linked list */ /* Create and add new element into the linked list */
desc->chunks_alloc++;
list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) { if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk); kfree(chunk);
return NULL; return NULL;
} }
desc->chunks_alloc++;
list_add_tail(&chunk->list, &desc->chunk->list);
} else { } else {
/* List head */ /* List head */
chunk->burst = NULL; chunk->burst = NULL;

View File

@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0; return 0;
err_free: err_free:
mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node); of_dma_controller_free(pdev->dev.of_node);
err_unregister: err_unregister:
dma_async_device_unregister(dd); dma_async_device_unregister(dd);

View File

@ -351,7 +351,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev); ret = dma_async_device_register(ddev);
if (ret) if (ret)
return ret; goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node, ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev); of_dma_simple_xlate, mdev);
@ -364,6 +364,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac: unregister_dmac:
dma_async_device_unregister(ddev); dma_async_device_unregister(ddev);
disable_xdmac:
disable_xdmac(mdev);
return ret; return ret;
} }

View File

@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false; has_dre = false;
if (!has_dre) if (!has_dre)
xdev->common.copy_align = fls(width - 1); xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node) struct device_node *node)
{ {
int ret, i, nr_channels = 1; int ret, i;
u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels); ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
} }
/* Register the DMA engine with the core */ /* Register the DMA engine with the core */
dma_async_device_register(&xdev->common); err = dma_async_device_register(&xdev->common);
if (err) {
dev_err(xdev->dev, "failed to register the dma device\n");
goto error;
}
err = of_dma_controller_register(node, of_dma_xilinx_xlate, err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev); xdev);

View File

@ -1382,6 +1382,9 @@ struct intel_dp {
bool ycbcr_444_to_420; bool ycbcr_444_to_420;
} dfp; } dfp;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* Display stream compression testing */ /* Display stream compression testing */
bool force_dsc_en; bool force_dsc_en;

View File

@ -1411,7 +1411,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into * lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states. * deep sleep states.
*/ */
cpu_latency_qos_update_request(&i915->pm_qos, 0); cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
intel_dp_check_edp(intel_dp); intel_dp_check_edp(intel_dp);
@ -1544,7 +1544,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
ret = recv_bytes; ret = recv_bytes;
out: out:
cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd) if (vdd)
edp_panel_vdd_off(intel_dp, false); edp_panel_vdd_off(intel_dp, false);
@ -1776,6 +1776,9 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
static void static void
intel_dp_aux_fini(struct intel_dp *intel_dp) intel_dp_aux_fini(struct intel_dp *intel_dp)
{ {
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
cpu_latency_qos_remove_request(&intel_dp->pm_qos);
kfree(intel_dp->aux.name); kfree(intel_dp->aux.name);
} }
@ -1818,6 +1821,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
aux_ch_name(dig_port->aux_ch), aux_ch_name(dig_port->aux_ch),
port_name(encoder->port)); port_name(encoder->port));
intel_dp->aux.transfer = intel_dp_aux_transfer; intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
} }
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)

View File

@ -577,8 +577,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev); pci_set_master(pdev);
cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv); intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the /* On the 945G/GM, the chipset reports the MSI capability on the
@ -623,7 +621,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi: err_msi:
if (pdev->msi_enabled) if (pdev->msi_enabled)
pci_disable_msi(pdev); pci_disable_msi(pdev);
cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions: err_mem_regions:
intel_memory_regions_driver_release(dev_priv); intel_memory_regions_driver_release(dev_priv);
err_ggtt: err_ggtt:
@ -645,8 +642,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled) if (pdev->msi_enabled)
pci_disable_msi(pdev); pci_disable_msi(pdev);
cpu_latency_qos_remove_request(&dev_priv->pm_qos);
} }
/** /**

View File

@ -892,9 +892,6 @@ struct drm_i915_private {
bool display_irqs_enabled; bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* Sideband mailbox protection */ /* Sideband mailbox protection */
struct mutex sb_lock; struct mutex sb_lock;
struct pm_qos_request sb_qos; struct pm_qos_request sb_qos;

View File

@ -617,6 +617,8 @@ int panfrost_job_init(struct panfrost_device *pfdev)
} }
for (j = 0; j < NUM_JOB_SLOTS; j++) { for (j = 0; j < NUM_JOB_SLOTS; j++) {
mutex_init(&js->queue[j].lock);
js->queue[j].fence_context = dma_fence_context_alloc(1); js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched, ret = drm_sched_init(&js->queue[j].sched,
@ -647,8 +649,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, 0); job_write(pfdev, JOB_INT_MASK, 0);
for (j = 0; j < NUM_JOB_SLOTS; j++) for (j = 0; j < NUM_JOB_SLOTS; j++) {
drm_sched_fini(&js->queue[j].sched); drm_sched_fini(&js->queue[j].sched);
mutex_destroy(&js->queue[j].lock);
}
} }
@ -660,7 +664,6 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
int ret, i; int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) { for (i = 0; i < NUM_JOB_SLOTS; i++) {
mutex_init(&js->queue[i].lock);
sched = &js->queue[i].sched; sched = &js->queue[i].sched;
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched, DRM_SCHED_PRIORITY_NORMAL, &sched,
@ -673,14 +676,10 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
{ {
struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
int i; int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) { for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
mutex_destroy(&js->queue[i].lock);
}
} }
int panfrost_job_is_idle(struct panfrost_device *pfdev) int panfrost_job_is_idle(struct panfrost_device *pfdev)

View File

@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
group); group);
} }
static void wacom_devm_kfifo_release(struct device *dev, void *res)
{
struct kfifo_rec_ptr_2 *devres = res;
kfifo_free(devres);
}
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
int error;
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
sizeof(struct kfifo_rec_ptr_2),
GFP_KERNEL);
if (!pen_fifo)
return -ENOMEM;
error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
if (error) {
devres_free(pen_fifo);
return error;
}
devres_add(&wacom->hdev->dev, pen_fifo);
return 0;
}
enum led_brightness wacom_leds_brightness_get(struct wacom_led *led) enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
{ {
struct wacom *wacom = led->wacom; struct wacom *wacom = led->wacom;
@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type) if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV; return -ENODEV;
error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); error = wacom_devm_kfifo_alloc(wacom);
if (error) if (error)
return error; return error;
@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE) if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom); wacom_release_resources(wacom);
kfifo_free(&wacom_wac->pen_fifo);
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM

View File

@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */ /* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev, lookup = devm_kzalloc(dev,
struct_size(lookup, table, mux_config->n_gpios), struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL); GFP_KERNEL);
if (!lookup) if (!lookup)
return -ENOMEM; return -ENOMEM;

View File

@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003 #define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000 #define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001 #define I2C_SOFT_RST 0x0001
#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001 #define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002 #define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000 #define I2C_TIME_CLR_VALUE 0x0000
@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002 #define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001 #define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000 #define I2C_SCL_MIS_COMP_VALUE 0x0000
#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000 #define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001 #define I2C_DMA_CON_RX 0x0001
@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001 #define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000 #define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000 #define I2C_DMA_CLR_FLAG 0x0000
#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002 #define I2C_DMA_HARD_RST 0x0002
#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8 #define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64 #define MAX_STEP_CNT_DIV 64
@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{ {
u16 control_reg; u16 control_reg;
writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); if (i2c->dev_comp->dma_sync) {
udelay(50); writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); udelay(10);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET); udelay(10);
writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
i2c->pdmabase + OFFSET_RST);
mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
OFFSET_SOFTRESET);
udelay(10);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
} else {
writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
udelay(50);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
}
/* Set ioconfig */ /* Set ioconfig */
if (i2c->use_push_pull) if (i2c->use_push_pull)

View File

@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */ /* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000 #define SPRD_I2C_PM_TIMEOUT 1000
/* timeout (ms) for transfer message */
#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */ /* SPRD i2c data structure */
struct sprd_i2c { struct sprd_i2c {
@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg) struct i2c_msg *msg, bool is_last_msg)
{ {
struct sprd_i2c *i2c_dev = i2c_adap->algo_data; struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
unsigned long time_left;
i2c_dev->msg = msg; i2c_dev->msg = msg;
i2c_dev->buf = msg->buf; i2c_dev->buf = msg->buf;
@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev); sprd_i2c_opt_start(i2c_dev);
wait_for_completion(&i2c_dev->complete); time_left = wait_for_completion_timeout(&i2c_dev->complete,
msecs_to_jiffies(I2C_XFER_TIMEOUT));
if (!time_left)
return -ETIMEDOUT;
return i2c_dev->err; return i2c_dev->err;
} }

View File

@ -36,9 +36,6 @@
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
#define VLAN_SL_MASK 7
#define VLAN_SL_SHIFT 13
static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr) static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
{ {
u32 fl = ah_attr->grh.flow_label; u32 fl = ah_attr->grh.flow_label;
@ -81,18 +78,12 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
/* HIP08 needs to record vlan info in Address Vector */ /* HIP08 needs to record vlan info in Address Vector */
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
ah->av.vlan_en = 0;
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
&ah->av.vlan_id, NULL); &ah->av.vlan_id, NULL);
if (ret) if (ret)
return ret; return ret;
if (ah->av.vlan_id < VLAN_N_VID) { ah->av.vlan_en = ah->av.vlan_id < VLAN_N_VID;
ah->av.vlan_en = 1;
ah->av.vlan_id |= (rdma_ah_get_sl(ah_attr) & VLAN_SL_MASK) <<
VLAN_SL_SHIFT;
}
} }
return ret; return ret;

View File

@ -99,6 +99,7 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
if (!dn || !of_device_is_available(dn)) { if (!dn || !of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n", dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name); adj->phandle_name, node->name);
of_node_put(dn);
return 0; return 0;
} }

View File

@ -54,13 +54,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms. platforms.
config INTERCONNECT_QCOM_RPMH_POSSIBLE
tristate
default INTERCONNECT_QCOM
depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
depends on OF || COMPILE_TEST
help
Compile-testing RPMH drivers is possible on other platforms,
but in order to avoid link failures, drivers must not be built-in
when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
config INTERCONNECT_QCOM_RPMH config INTERCONNECT_QCOM_RPMH
tristate tristate
config INTERCONNECT_QCOM_SC7180 config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver" tristate "Qualcomm SC7180 interconnect driver"
depends on INTERCONNECT_QCOM depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
select INTERCONNECT_QCOM_RPMH select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER select INTERCONNECT_QCOM_BCM_VOTER
help help
@ -69,8 +79,7 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845 config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver" tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
select INTERCONNECT_QCOM_RPMH select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER select INTERCONNECT_QCOM_BCM_VOTER
help help
@ -125,8 +134,7 @@ config INTERCONNECT_QCOM_WAIPIO
config INTERCONNECT_QCOM_SM8150 config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver" tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
select INTERCONNECT_QCOM_RPMH select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER select INTERCONNECT_QCOM_BCM_VOTER
help help
@ -135,8 +143,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250 config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver" tristate "Qualcomm SM8250 interconnect driver"
depends on INTERCONNECT_QCOM depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
select INTERCONNECT_QCOM_RPMH select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER select INTERCONNECT_QCOM_BCM_VOTER
help help

View File

@ -229,6 +229,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map); set_bit(qsmmu->bypass_cbndx, smmu->context_map);
arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
} }

View File

@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages)); int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
if (WARN_ON_ONCE(!ALIGN(addr, align))) if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
addr &= ~(align - 1); addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) | desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) | QI_EIOTLB_DID(did) |

View File

@ -1390,6 +1390,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i); irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data); irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) { if (!irq_data || !irq_cfg) {
if (!i)
kfree(data);
ret = -EINVAL; ret = -EINVAL;
goto out_free_data; goto out_free_data;
} }

View File

@ -19,6 +19,7 @@ if NVM
config NVM_PBLK config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target" tristate "Physical Block Device Open-Channel SSD target"
select CRC32
help help
Allows an open-channel SSD to be exposed as a block device to the Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be host. The target assumes the device exposes raw flash and must be

View File

@ -1341,6 +1341,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev"); bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr); atomic_inc(&c->attached_dev_nr);
if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
pr_err("Please update to the latest bcache-tools to create the cache device\n");
set_disk_ro(dc->disk.disk, 1);
}
/* Allow the writeback thread to proceed */ /* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock); up_write(&dc->writeback_lock);
@ -1564,6 +1570,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume"); bcache_device_link(d, c, "volume");
if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
pr_err("Please update to the latest bcache-tools to create the cache device\n");
set_disk_ro(d->disk, 1);
}
return 0; return 0;
err: err:
kobject_put(&d->kobj); kobject_put(&d->kobj);
@ -2123,6 +2135,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds(); c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c); bcache_write_super(c);
if (bch_has_feature_obso_large_bucket(&c->cache->sb))
pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
list_for_each_entry_safe(dc, t, &uncached_devices, list) list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL); bch_cached_dev_attach(dc, c, NULL);

View File

@ -314,7 +314,7 @@ void lkdtm_USERCOPY_KERNEL(void)
pr_info("attempting bad copy_to_user from kernel text: %px\n", pr_info("attempting bad copy_to_user from kernel text: %px\n",
vm_mmap); vm_mmap);
if (copy_to_user((void __user *)user_addr, vm_mmap, if (copy_to_user((void __user *)user_addr, __va_function(vm_mmap),
unconst + PAGE_SIZE)) { unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n"); pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user; goto free_user;

View File

@ -645,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
return 0; return 0;
} }
static void bareudp_dellink(struct net_device *dev, struct list_head *head)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
list_del(&bareudp->next);
unregister_netdevice_queue(dev, head);
}
static int bareudp_newlink(struct net *net, struct net_device *dev, static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[], struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct bareudp_conf conf; struct bareudp_conf conf;
LIST_HEAD(list_kill);
int err; int err;
err = bareudp2info(data, &conf, extack); err = bareudp2info(data, &conf, extack);
@ -662,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb); err = bareudp_link_config(dev, tb);
if (err) if (err)
return err; goto err_unconfig;
return 0; return 0;
}
static void bareudp_dellink(struct net_device *dev, struct list_head *head) err_unconfig:
{ bareudp_dellink(dev, &list_kill);
struct bareudp_dev *bareudp = netdev_priv(dev); unregister_netdevice_many(&list_kill);
return err;
list_del(&bareudp->next);
unregister_netdevice_queue(dev, head);
} }
static size_t bareudp_get_size(const struct net_device *dev) static size_t bareudp_get_size(const struct net_device *dev)

View File

@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD config CAN_KVASER_PCIEFD
depends on PCI depends on PCI
tristate "Kvaser PCIe FD cards" tristate "Kvaser PCIe FD cards"
select CRC32
help help
This is a driver for the Kvaser PCI Express CAN FD family. This is a driver for the Kvaser PCI Express CAN FD family.

View File

@ -1914,8 +1914,6 @@ EXPORT_SYMBOL_GPL(m_can_class_resume);
void m_can_class_unregister(struct m_can_classdev *m_can_dev) void m_can_class_unregister(struct m_can_classdev *m_can_dev)
{ {
unregister_candev(m_can_dev->net); unregister_candev(m_can_dev->net);
m_can_clk_stop(m_can_dev);
} }
EXPORT_SYMBOL_GPL(m_can_class_unregister); EXPORT_SYMBOL_GPL(m_can_class_unregister);

View File

@ -129,30 +129,6 @@ struct tcan4x5x_priv {
int reg_offset; int reg_offset;
}; };
static struct can_bittiming_const tcan4x5x_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 2,
.tseg1_max = 31,
.tseg2_min = 2,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{ {
int wake_state = 0; int wake_state = 0;
@ -479,8 +455,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev; mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops; mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true; mcan_class->is_peripheral = true;
mcan_class->bit_timing = &tcan4x5x_bittiming_const;
mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq; mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv); spi_set_drvdata(spi, priv);

View File

@ -1436,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause); phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause); phylink_set(mask, Asym_Pause);
/* With the exclusion of MII and Reverse MII, we support Gigabit, /* With the exclusion of MII, Reverse MII and Reduced MII, we
* including Half duplex * support Gigabit, including Half duplex
*/ */
if (state->interface != PHY_INTERFACE_MODE_MII && if (state->interface != PHY_INTERFACE_MODE_MII &&
state->interface != PHY_INTERFACE_MODE_REVMII) { state->interface != PHY_INTERFACE_MODE_REVMII &&
state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half); phylink_set(mask, 1000baseT_Half);
} }

View File

@ -621,7 +621,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) { while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk = struct chtls_sock *csk =
container_of((struct synq *)__skb_dequeue container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq); (&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk; struct sock *child = csk->sk;
@ -1109,6 +1109,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req, const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev) struct chtls_dev *cdev)
{ {
struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL; struct neighbour *n = NULL;
struct inet_sock *newinet; struct inet_sock *newinet;
const struct iphdr *iph; const struct iphdr *iph;
@ -1118,9 +1119,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst; struct dst_entry *dst;
struct tcp_sock *tp; struct tcp_sock *tp;
struct sock *newsk; struct sock *newsk;
bool found = false;
u16 port_id; u16 port_id;
int rxq_idx; int rxq_idx;
int step; int step, i;
iph = (const struct iphdr *)network_hdr; iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@ -1152,7 +1154,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr); n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif #endif
} }
if (!n) if (!n || !n->dev)
goto free_sk; goto free_sk;
ndev = n->dev; ndev = n->dev;
@ -1161,6 +1163,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (is_vlan_dev(ndev)) if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev); ndev = vlan_dev_real_dev(ndev);
for_each_port(adap, i)
if (cdev->ports[i] == ndev)
found = true;
if (!found)
goto free_dst;
port_id = cxgb4_port_idx(ndev); port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev); csk = chtls_sock_create(cdev);
@ -1237,6 +1246,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk: free_csk:
chtls_sock_release(&csk->kref); chtls_sock_release(&csk->kref);
free_dst: free_dst:
neigh_release(n);
dst_release(dst); dst_release(dst);
free_sk: free_sk:
inet_csk_prepare_forced_close(newsk); inet_csk_prepare_forced_close(newsk);
@ -1386,7 +1396,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk) if (!newsk)
goto free_oreq; goto reject;
if (chtls_get_module(newsk)) if (chtls_get_module(newsk))
goto reject; goto reject;
@ -1402,8 +1412,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb); kfree_skb(skb);
return; return;
free_oreq:
chtls_reqsk_free(oreq);
reject: reject:
mk_tid_release(reply_skb, 0, tid); mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@ -1588,6 +1596,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT); sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid); data = lookup_stid(cdev->tids, stid);
if (!data) {
/* listening server close */
kfree_skb(skb);
goto unlock;
}
lsk = ((struct listen_ctx *)data)->lsk; lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk); bh_lock_sock(lsk);
@ -1996,39 +2009,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock); spin_unlock_bh(&cdev->deferq.lock);
} }
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct sk_buff *reply_skb;
struct chtls_sock *csk;
csk = rcu_dereference_sk_user_data(sk);
reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
GFP_KERNEL);
if (!reply_skb) {
req->status = (queue << 1);
t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
set_abort_rpl_wr(reply_skb, GET_TID(req), status);
kfree_skb(skb);
set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
if (csk_conn_inline(csk)) {
struct l2t_entry *e = csk->l2t_entry;
if (e && sk->sk_state != TCP_SYN_RECV) {
cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
return;
}
}
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, struct chtls_dev *cdev,
int status, int queue) int status, int queue)
@ -2077,9 +2057,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx; queue = csk->txq_idx;
skb->sk = NULL; skb->sk = NULL;
chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk); do_abort_syn_rcv(child, lsk);
send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
CPL_ABORT_NO_RST, queue);
} }
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@ -2109,8 +2089,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) { if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx; int queue = csk->txq_idx;
chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk); do_abort_syn_rcv(sk, psk);
send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else { } else {
skb->sk = sk; skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@ -2128,9 +2108,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx; int queue = csk->txq_idx;
if (is_neg_adv(req->status)) { if (is_neg_adv(req->status)) {
if (sk->sk_state == TCP_SYN_RECV)
chtls_set_tcb_tflag(sk, 0, 0);
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
@ -2157,12 +2134,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return; return;
chtls_release_resources(sk);
chtls_conn_done(sk);
} }
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev, chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue); rst_status, queue);
chtls_release_resources(sk);
chtls_conn_done(sk);
} }
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb) static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)

View File

@ -168,7 +168,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \ #define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \ #define hclge_mbx_tail_ptr_move_arq(arq) \
(arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \ #define hclge_mbx_head_ptr_move_arq(arq) \
(arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif #endif

View File

@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
if (hdev->hw.mac.phydev) { if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) {
count += 1; count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
} }
@ -4484,8 +4485,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets; req->ipv4_sctp_en = tuple_sets;
break; break;
case SCTP_V6_FLOW: case SCTP_V6_FLOW:
if ((nfc->data & RXH_L4_B_0_1) || if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & RXH_L4_B_2_3)) (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL; return -EINVAL;
req->ipv6_sctp_en = tuple_sets; req->ipv6_sctp_en = tuple_sets;
@ -4665,6 +4666,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en = vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER; HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en = vport[i].rss_tuple_sets.ipv6_sctp_en =
hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP; HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en = vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER; HCLGE_RSS_INPUT_TUPLE_OTHER;

View File

@ -105,6 +105,8 @@
#define HCLGE_D_IP_BIT BIT(2) #define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3) #define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4) #define HCLGE_V_TAG_BIT BIT(4)
#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1 #define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2 #define HCLGE_RSS_TC_SIZE_1 2

View File

@ -901,8 +901,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets; req->ipv4_sctp_en = tuple_sets;
break; break;
case SCTP_V6_FLOW: case SCTP_V6_FLOW:
if ((nfc->data & RXH_L4_B_0_1) || if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & RXH_L4_B_2_3)) (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL; return -EINVAL;
req->ipv6_sctp_en = tuple_sets; req->ipv6_sctp_en = tuple_sets;
@ -2481,7 +2481,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; tuple_sets->ipv6_sctp_en =
hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
} }

View File

@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2) #define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4) #define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U #define HCLGEVF_STATS_TIMER_INTERVAL 36U

View File

@ -4409,7 +4409,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog; struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }

View File

@ -5480,7 +5480,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv; struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread; unsigned int thread;
int queue, err; int queue, err, val;
/* Checks for hardware constraints */ /* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs > if (port->first_rxq + port->nrxqs >
@ -5494,6 +5494,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port); mvpp2_egress_disable(port);
mvpp2_port_disable(port); mvpp2_port_disable(port);
if (mvpp2_is_xlg(port->phy_interface)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
} else {
val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
val |= MVPP2_GMAC_FORCE_LINK_DOWN;
writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
}
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),

View File

@ -862,8 +862,10 @@ static int cgx_lmac_init(struct cgx *cgx)
if (!lmac) if (!lmac)
return -ENOMEM; return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
if (!lmac->name) if (!lmac->name) {
return -ENOMEM; err = -ENOMEM;
goto err_lmac_free;
}
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i; lmac->lmac_id = i;
lmac->cgx = cgx; lmac->cgx = cgx;
@ -874,7 +876,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9), CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac); cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err) if (err)
return err; goto err_irq;
/* Enable interrupt */ /* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@ -886,6 +888,12 @@ static int cgx_lmac_init(struct cgx *cgx)
} }
return cgx_lmac_verify_fwi_version(cgx); return cgx_lmac_verify_fwi_version(cgx);
err_irq:
kfree(lmac->name);
err_lmac_free:
kfree(lmac);
return err;
} }
static int cgx_lmac_exit(struct cgx *cgx) static int cgx_lmac_exit(struct cgx *cgx)

View File

@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0) if (!reg_c0)
return true; return true;
/* If reg_c0 is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
*/
skb->mark = 0;
priv = netdev_priv(skb->dev); priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch; esw = priv->mdev->priv.eswitch;

View File

@ -1007,6 +1007,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
} }
static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
const unsigned long link_modes, u8 autoneg)
{
/* Extended link-mode has no speed limitations. */
if (ext)
return 0;
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
autoneg != AUTONEG_ENABLE) {
netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
return -EINVAL;
}
return 0;
}
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{ {
u32 i, ptys_modes = 0; u32 i, ptys_modes = 0;
@ -1100,13 +1116,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext); mlx5e_port_speed2linkmodes(mdev, speed, !ext);
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
autoneg != AUTONEG_ENABLE) { if (err)
netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
err = -EINVAL;
goto out; goto out;
}
link_modes = link_modes & eproto.cap; link_modes = link_modes & eproto.cap;
if (!link_modes) { if (!link_modes) {

View File

@ -936,6 +936,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
kfree(ft->g); kfree(ft->g);
ft->g = NULL;
return -ENOMEM; return -ENOMEM;
} }
@ -1081,6 +1082,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
kfree(ft->g); kfree(ft->g);
ft->g = NULL;
return -ENOMEM; return -ENOMEM;
} }
@ -1384,6 +1386,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
ft->g[ft->num_groups] = NULL; ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft); mlx5e_destroy_groups(ft);
kvfree(in); kvfree(in);
kfree(ft->g);
return err; return err;
} }

View File

@ -556,7 +556,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev; struct mlx5_core_dev *tmp_dev;
int i, err; int i, err;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return; return;
tmp_dev = mlx5_get_next_phys_dev(dev); tmp_dev = mlx5_get_next_phys_dev(dev);
@ -574,12 +576,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0) if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return; return;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < MLX5_MAX_PORTS; i++)
tmp_dev = ldev->pf[i].dev; if (!ldev->pf[i].dev)
if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
break; break;
}
if (i >= MLX5_MAX_PORTS) if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY; ldev->flags |= MLX5_LAG_FLAG_READY;

View File

@ -116,7 +116,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{ {
mlx5_core_roce_gid_set(dev, 0, 0, 0, mlx5_core_roce_gid_set(dev, 0, 0, 0,
NULL, NULL, false, 0, 0); NULL, NULL, false, 0, 1);
} }
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid) static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)

View File

@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev); err = register_netdev(dev);
if (err) if (err)
goto out; goto undo_probe;
return 0; return 0;
undo_probe:
dma_free_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
out: out:
free_netdev(dev); free_netdev(dev);
@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev); err = register_netdev(ndev);
if (err) if (err)
goto out; goto undo_probe;
nubus_set_drvdata(board, ndev); nubus_set_drvdata(board, ndev);
return 0; return 0;
undo_probe:
dma_free_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
out: out:
free_netdev(ndev); free_netdev(ndev);
return err; return err;

View File

@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev); sonic_msg_init(dev);
if ((err = register_netdev(dev))) if ((err = register_netdev(dev)))
goto out1; goto undo_probe1;
return 0; return 0;
out1: undo_probe1:
dma_free_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE); release_region(dev->base_addr, SONIC_MEM_SIZE);
out: out:
free_netdev(dev); free_netdev(dev);

View File

@ -123,6 +123,12 @@ static void ionic_link_status_check(struct ionic_lif *lif)
link_up = link_status == IONIC_PORT_OPER_STATUS_UP; link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
if (link_up) { if (link_up) {
if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
mutex_lock(&lif->queue_lock);
ionic_start_queues(lif);
mutex_unlock(&lif->queue_lock);
}
if (!netif_carrier_ok(netdev)) { if (!netif_carrier_ok(netdev)) {
u32 link_speed; u32 link_speed;
@ -132,12 +138,6 @@ static void ionic_link_status_check(struct ionic_lif *lif)
link_speed / 1000); link_speed / 1000);
netif_carrier_on(netdev); netif_carrier_on(netdev);
} }
if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
mutex_lock(&lif->queue_lock);
ionic_start_queues(lif);
mutex_unlock(&lif->queue_lock);
}
} else { } else {
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
netdev_info(netdev, "Link down\n"); netdev_info(netdev, "Link down\n");

View File

@ -78,6 +78,7 @@ config QED
depends on PCI depends on PCI
select ZLIB_INFLATE select ZLIB_INFLATE
select CRC8 select CRC8
select CRC32
select NET_DEVLINK select NET_DEVLINK
help help
This enables the support for Marvell FastLinQ adapters family. This enables the support for Marvell FastLinQ adapters family.

View File

@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant * @variant: reference to the current board variant
* @regmap: regmap for using the syscon * @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled * @internal_phy_powered: Does the internal PHY is enabled
* @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib * @mux_handle: Internal pointer used by mdio-mux lib
*/ */
struct sunxi_priv_data { struct sunxi_priv_data {
@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant; const struct emac_variant *variant;
struct regmap_field *regmap_field; struct regmap_field *regmap_field;
bool internal_phy_powered; bool internal_phy_powered;
bool use_internal_phy;
void *mux_handle; void *mux_handle;
}; };
@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt, .dma_interrupt = sun8i_dwmac_dma_interrupt,
}; };
static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{ {
struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv; struct sunxi_priv_data *gmac = priv;
int ret; int ret;
@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk); ret = clk_prepare_enable(gmac->tx_clk);
if (ret) { if (ret) {
if (gmac->regulator)
regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n"); dev_err(&pdev->dev, "Could not enable AHB clock\n");
return ret; goto err_disable_regulator;
}
if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
if (ret)
goto err_disable_clk;
} }
return 0; return 0;
err_disable_clk:
clk_disable_unprepare(gmac->tx_clk);
err_disable_regulator:
if (gmac->regulator)
regulator_disable(gmac->regulator);
return ret;
} }
static void sun8i_dwmac_core_init(struct mac_device_info *hw, static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv; struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val; u32 reg, val;
int ret = 0; int ret = 0;
bool need_power_ephy = false;
if (current_child ^ desired_child) { if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg); regmap_field_read(gmac->regmap_field, &reg);
@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY"); dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT; val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
gmac->use_internal_phy = true;
need_power_ephy = true;
break; break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID: case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY"); dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN; val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
need_power_ephy = false; gmac->use_internal_phy = false;
break; break;
default: default:
dev_err(priv->device, "Invalid child ID %x\n", dev_err(priv->device, "Invalid child ID %x\n",
@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL; return -EINVAL;
} }
regmap_field_write(gmac->regmap_field, val); regmap_field_write(gmac->regmap_field, val);
if (need_power_ephy) { if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv); ret = sun8i_dwmac_power_internal_phy(priv);
if (ret) if (ret)
return ret; return ret;
@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret; return ret;
} }
static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) static int sun8i_dwmac_set_syscon(struct device *dev,
struct plat_stmmacenet_data *plat)
{ {
struct sunxi_priv_data *gmac = priv->plat->bsp_priv; struct sunxi_priv_data *gmac = plat->bsp_priv;
struct device_node *node = priv->device->of_node; struct device_node *node = dev->of_node;
int ret; int ret;
u32 reg, val; u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val); ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) { if (ret) {
dev_err(priv->device, "Fail to read from regmap field.\n"); dev_err(dev, "Fail to read from regmap field.\n");
return ret; return ret;
} }
reg = gmac->variant->default_syscon_value; reg = gmac->variant->default_syscon_value;
if (reg != val) if (reg != val)
dev_warn(priv->device, dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n", "Current syscon value is not the default %x (expect %x)\n",
val, reg); val, reg);
@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */ /* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL; reg |= H3_EPHY_CLK_SEL;
ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node); ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) { if (ret < 0) {
dev_err(priv->device, "Could not parse MDIO addr\n"); dev_err(dev, "Could not parse MDIO addr\n");
return ret; return ret;
} }
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) { if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) { if (val % 100) {
dev_err(priv->device, "tx-delay must be a multiple of 100\n"); dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL; return -EINVAL;
} }
val /= 100; val /= 100;
dev_dbg(priv->device, "set tx-delay to %x\n", val); dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) { if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max << reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT); SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT); reg |= (val << SYSCON_ETXDC_SHIFT);
} else { } else {
dev_err(priv->device, "Invalid TX clock delay: %d\n", dev_err(dev, "Invalid TX clock delay: %d\n",
val); val);
return -EINVAL; return -EINVAL;
} }
@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) { if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) { if (val % 100) {
dev_err(priv->device, "rx-delay must be a multiple of 100\n"); dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL; return -EINVAL;
} }
val /= 100; val /= 100;
dev_dbg(priv->device, "set rx-delay to %x\n", val); dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) { if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max << reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT); SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT); reg |= (val << SYSCON_ERXDC_SHIFT);
} else { } else {
dev_err(priv->device, "Invalid RX clock delay: %d\n", dev_err(dev, "Invalid RX clock delay: %d\n",
val); val);
return -EINVAL; return -EINVAL;
} }
@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii) if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN; reg &= ~SYSCON_RMII_EN;
switch (priv->plat->interface) { switch (plat->interface) {
case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_MII:
/* default */ /* default */
break; break;
@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII; reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break; break;
default: default:
dev_err(priv->device, "Unsupported interface mode: %s", dev_err(dev, "Unsupported interface mode: %s",
phy_modes(priv->plat->interface)); phy_modes(plat->interface));
return -EINVAL; return -EINVAL;
} }
@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv; struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) { if (gmac->variant->soc_has_internal_phy) {
/* sun8i_dwmac_exit could be called with mdiomux uninit */
if (gmac->mux_handle)
mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered) if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac); sun8i_dwmac_unpower_internal_phy(gmac);
} }
sun8i_dwmac_unset_syscon(gmac);
reset_control_put(gmac->rst_ephy);
clk_disable_unprepare(gmac->tx_clk); clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator) if (gmac->regulator)
@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{ {
struct mac_device_info *mac; struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv; struct stmmac_priv *priv = ppriv;
int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac) if (!mac)
return NULL; return NULL;
ret = sun8i_dwmac_set_syscon(priv);
if (ret)
return NULL;
mac->pcsr = priv->ioaddr; mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops; mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops; mac->dma = &sun8i_dwmac_dma_ops;
@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac) if (!gmac)
return -ENOMEM; return -ENOMEM;
@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface); ret = of_get_phy_mode(dev->of_node, &interface);
if (ret) if (ret)
return -EINVAL; return -EINVAL;
plat_dat->interface = interface;
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks. /* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers. * hardware features were copied from Allwinner drivers.
*/ */
plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2; plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1; plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true; plat_dat->has_sun8i = true;
@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit; plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup; plat_dat->setup = sun8i_dwmac_setup;
ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
if (ret)
goto dwmac_deconfig;
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret) if (ret)
return ret; goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret) if (ret)
@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) { if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv); ret = get_ephy_nodes(priv);
if (ret) if (ret)
goto dwmac_exit; goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv); ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n"); dev_err(&pdev->dev, "Failed to register mux\n");
@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else { } else {
ret = sun8i_dwmac_reset(priv); ret = sun8i_dwmac_reset(priv);
if (ret) if (ret)
goto dwmac_exit; goto dwmac_remove;
} }
return ret; return ret;
dwmac_mux: dwmac_mux:
sun8i_dwmac_unset_syscon(gmac); reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
dwmac_remove:
stmmac_dvr_remove(&pdev->dev);
dwmac_exit: dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
dwmac_deconfig:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
static int sun8i_dwmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
if (gmac->variant->soc_has_internal_phy) {
mdio_mux_uninit(gmac->mux_handle);
sun8i_dwmac_unpower_internal_phy(gmac);
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
}
stmmac_pltfr_remove(pdev); stmmac_pltfr_remove(pdev);
return ret; sun8i_dwmac_unset_syscon(gmac);
return 0;
} }
static const struct of_device_id sun8i_dwmac_match[] = { static const struct of_device_id sun8i_dwmac_match[] = {
@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = { static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe, .probe = sun8i_dwmac_probe,
.remove = stmmac_pltfr_remove, .remove = sun8i_dwmac_remove,
.driver = { .driver = {
.name = "dwmac-sun8i", .name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops, .pm = &stmmac_pltfr_pm_ops,

View File

@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here. * accordingly. Otherwise, we should check here.
*/ */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); delayed_ndp_size = ctx->max_ndp_size +
max_t(u32,
ctx->tx_ndp_modulus,
ctx->tx_modulus + ctx->tx_remainder) - 1;
else else
delayed_ndp_size = 0; delayed_ndp_size = 0;
@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) { skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len; padding_count = ctx->tx_curr_size - skb_out->len;
skb_put_zero(skb_out, padding_count); if (!WARN_ON(padding_count > ctx->tx_curr_size))
skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size && } else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) { (skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */ skb_put_u8(skb_out, 0); /* force short packet */

View File

@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support" tristate "Slic Maxim ds26522 card support"
depends on SPI depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
select BITREVERSE
help help
This module initializes and configures the slic maxim card This module initializes and configures the slic maxim card
in T1 or E1 mode. in T1 or E1 mode.

View File

@ -2,6 +2,7 @@
config WIL6210 config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support" tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP select WANT_DEV_COREDUMP
select CRC32
depends on CFG80211 depends on CFG80211
depends on PCI depends on PCI
default n default n

View File

@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
} }
} }
static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
{
int ret;
/* drain the send queue as much as we can... */
do {
ret = nvme_tcp_try_send(queue);
} while (ret > 0);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last) bool sync, bool last)
{ {
@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
if (queue->io_cpu == smp_processor_id() && if (queue->io_cpu == smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) { sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last; queue->more_requests = !last;
nvme_tcp_try_send(queue); nvme_tcp_send_all(queue);
queue->more_requests = false; queue->more_requests = false;
mutex_unlock(&queue->send_mutex); mutex_unlock(&queue->send_mutex);
} else if (last) { } else if (last) {

View File

@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB depends on PHYLIB
depends on PTP_1588_CLOCK depends on PTP_1588_CLOCK
select CRC32
help help
Supports the DP83640 PHYTER with IEEE 1588 features. Supports the DP83640 PHYTER with IEEE 1588 features.
@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core" tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING depends on NETWORK_PHY_TIMESTAMPING
depends on HAS_IOMEM
depends on PHYLIB depends on PHYLIB
depends on PTP_1588_CLOCK depends on PTP_1588_CLOCK
help help

View File

@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM, .regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops, .ops = &rpmh_regulator_vrm_ops,
.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600), .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5, .n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps, .pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,

View File

@ -1075,7 +1075,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask); int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long); int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_set_offline(struct qeth_card *card, bool resetting); int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb) int (*reply_cb)

View File

@ -5300,12 +5300,12 @@ static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
return rc; return rc;
} }
static int qeth_set_online(struct qeth_card *card) static int qeth_set_online(struct qeth_card *card,
const struct qeth_discipline *disc)
{ {
bool carrier_ok; bool carrier_ok;
int rc; int rc;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin"); QETH_CARD_TEXT(card, 2, "setonlin");
@ -5322,7 +5322,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */ /* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
rc = card->discipline->set_online(card, carrier_ok); rc = disc->set_online(card, carrier_ok);
if (rc) if (rc)
goto err_online; goto err_online;
@ -5330,7 +5330,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return 0; return 0;
err_online: err_online:
@ -5345,15 +5344,14 @@ static int qeth_set_online(struct qeth_card *card)
qdio_free(CARD_DDEV(card)); qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return rc; return rc;
} }
int qeth_set_offline(struct qeth_card *card, bool resetting) int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
bool resetting)
{ {
int rc, rc2, rc3; int rc, rc2, rc3;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl"); QETH_CARD_TEXT(card, 3, "setoffl");
@ -5374,7 +5372,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work); cancel_work_sync(&card->rx_mode_work);
card->discipline->set_offline(card); disc->set_offline(card);
qeth_qdio_clear_card(card, 0); qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card); qeth_drain_output_queues(card);
@ -5395,16 +5393,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(qeth_set_offline); EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data) static int qeth_do_reset(void *data)
{ {
const struct qeth_discipline *disc;
struct qeth_card *card = data; struct qeth_card *card = data;
int rc; int rc;
/* Lock-free, other users will block until we are done. */
disc = card->discipline;
QETH_CARD_TEXT(card, 2, "recover1"); QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0; return 0;
@ -5412,8 +5413,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev, dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n"); "A recovery process has been started for the device\n");
qeth_set_offline(card, true); qeth_set_offline(card, disc, true);
rc = qeth_set_online(card); rc = qeth_set_online(card, disc);
if (!rc) { if (!rc) {
dev_info(&card->gdev->dev, dev_info(&card->gdev->dev,
"Device successfully recovered!\n"); "Device successfully recovered!\n");
@ -6360,6 +6361,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break; break;
default: default:
card->info.layer_enforced = true; card->info.layer_enforced = true;
/* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc); rc = qeth_core_load_discipline(card, enforced_disc);
if (rc) if (rc)
goto err_load; goto err_load;
@ -6392,10 +6394,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv"); QETH_CARD_TEXT(card, 2, "removedv");
mutex_lock(&card->discipline_mutex);
if (card->discipline) { if (card->discipline) {
card->discipline->remove(gdev); card->discipline->remove(gdev);
qeth_core_free_discipline(card); qeth_core_free_discipline(card);
} }
mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card); qeth_free_qdio_queues(card);
@ -6410,6 +6414,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0; int rc = 0;
enum qeth_discipline_id def_discipline; enum qeth_discipline_id def_discipline;
mutex_lock(&card->discipline_mutex);
if (!card->discipline) { if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2; QETH_DISCIPLINE_LAYER2;
@ -6423,16 +6428,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
} }
} }
rc = qeth_set_online(card); rc = qeth_set_online(card, card->discipline);
err: err:
mutex_unlock(&card->discipline_mutex);
return rc; return rc;
} }
static int qeth_core_set_offline(struct ccwgroup_device *gdev) static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{ {
struct qeth_card *card = dev_get_drvdata(&gdev->dev); struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
return qeth_set_offline(card, false); mutex_lock(&card->discipline_mutex);
rc = qeth_set_offline(card, card->discipline, false);
mutex_unlock(&card->discipline_mutex);
return rc;
} }
static void qeth_core_shutdown(struct ccwgroup_device *gdev) static void qeth_core_shutdown(struct ccwgroup_device *gdev)

View File

@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE) if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, false); qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work); cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED) if (card->dev->reg_state == NETREG_REGISTERED)

View File

@ -1816,7 +1816,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev, struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
if (qeth_get_ip_version(skb) != 4) if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features); return qeth_features_check(skb, dev, features);
} }
@ -1974,7 +1974,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE) if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, false); qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work); cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED) if (card->dev->reg_state == NETREG_REGISTERED)

View File

@ -10459,7 +10459,6 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp, struct lpfc_nodelist *ndlp,
struct sli4_wcqe_xri_aborted *axri) struct sli4_wcqe_xri_aborted *axri)
{ {
struct lpfc_vport *vport;
uint32_t ext_status = 0; uint32_t ext_status = 0;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
@ -10469,7 +10468,6 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
return; return;
} }
vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on " "3116 Port generated FCP XRI ABORT event on "
"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",

View File

@ -1116,6 +1116,17 @@ static int remove_group_qos(struct qos_cpu_group *qcg)
return 0; return 0;
} }
static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* reset gpio is optional */
if (!host->device_reset)
return;
gpiod_set_value_cansleep(host->device_reset, asserted);
}
static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{ {
struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@ -1137,6 +1148,9 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
err = ufs_qcom_unvote_qos_all(hba); err = ufs_qcom_unvote_qos_all(hba);
} }
if (!err && ufs_qcom_is_link_off(hba) && host->device_reset)
ufs_qcom_device_reset_ctrl(hba, true);
return err; return err;
} }
@ -3408,10 +3422,10 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
* The UFS device shall detect reset pulses of 1us, sleep for 10us to * The UFS device shall detect reset pulses of 1us, sleep for 10us to
* be on the safe side. * be on the safe side.
*/ */
gpiod_set_value_cansleep(host->device_reset, 1); ufs_qcom_device_reset_ctrl(hba, true);
usleep_range(10, 15); usleep_range(10, 15);
gpiod_set_value_cansleep(host->device_reset, 0); ufs_qcom_device_reset_ctrl(hba, false);
usleep_range(10, 15); usleep_range(10, 15);
return 0; return 0;

View File

@ -7737,7 +7737,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
else if (wlun == UFS_UPIU_RPMB_WLUN) else if (wlun == UFS_UPIU_RPMB_WLUN)
sdp = hba->sdev_rpmb; sdp = hba->sdev_rpmb;
else else
BUG_ON(1); BUG();
if (sdp) { if (sdp) {
ret = scsi_device_get(sdp); ret = scsi_device_get(sdp);
if (!ret && !scsi_device_online(sdp)) { if (!ret && !scsi_device_online(sdp)) {
@ -7900,8 +7900,6 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba); ufshcd_hba_exit(hba);
} else {
ufshcd_clear_ua_wluns(hba);
} }
} }
@ -8699,8 +8697,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret) if (ret)
goto set_dev_active; goto set_dev_active;
ufshcd_vreg_set_lpm(hba);
disable_clks: disable_clks:
/* /*
* Call vendor specific suspend callback. As these callbacks may access * Call vendor specific suspend callback. As these callbacks may access
@ -8728,6 +8724,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.state); hba->clk_gating.state);
} }
ufshcd_vreg_set_lpm(hba);
/* Put the host controller in low power mode if possible */ /* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba); ufshcd_hba_vreg_set_lpm(hba);
goto out; goto out;
@ -8782,18 +8780,18 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
old_link_state = hba->uic_link_state; old_link_state = hba->uic_link_state;
ufshcd_hba_vreg_set_hpm(hba); ufshcd_hba_vreg_set_hpm(hba);
/* Make sure clocks are enabled before accessing controller */ ret = ufshcd_vreg_set_hpm(hba);
ret = ufshcd_setup_clocks(hba, true);
if (ret) if (ret)
goto out; goto out;
/* Make sure clocks are enabled before accessing controller */
ret = ufshcd_setup_clocks(hba, true);
if (ret)
goto disable_vreg;
/* enable the host irq as host controller would be active soon */ /* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba); ufshcd_enable_irq(hba);
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
goto disable_irq_and_vops_clks;
/* /*
* Call vendor specific resume callback. As these callbacks may access * Call vendor specific resume callback. As these callbacks may access
* vendor specific host controller register space call them when the * vendor specific host controller register space call them when the
@ -8801,7 +8799,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/ */
ret = ufshcd_vops_resume(hba, pm_op); ret = ufshcd_vops_resume(hba, pm_op);
if (ret) if (ret)
goto disable_vreg; goto disable_irq_and_vops_clks;
if (ufshcd_is_link_hibern8(hba)) { if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba); ret = ufshcd_uic_hibern8_exit(hba);
@ -8865,8 +8863,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_link_state_transition(hba, old_link_state, 0); ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend: vendor_suspend:
ufshcd_vops_suspend(hba, pm_op); ufshcd_vops_suspend(hba, pm_op);
disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks: disable_irq_and_vops_clks:
ufshcd_disable_irq(hba); ufshcd_disable_irq(hba);
if (hba->clk_scaling.is_allowed) if (hba->clk_scaling.is_allowed)
@ -8877,6 +8873,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
trace_ufshcd_clk_gating(dev_name(hba->dev), trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state); hba->clk_gating.state);
} }
disable_vreg:
ufshcd_vreg_set_lpm(hba);
out: out:
hba->pm_op_in_progress = 0; hba->pm_op_in_progress = 0;
if (ret) if (ret)

View File

@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock; spinlock_t lock;
int irq; int irq;
bool cs_flag; bool cs_flag;
bool abort_failed;
}; };
static int get_spi_clk_cfg(unsigned int speed_hz, static int get_spi_clk_cfg(unsigned int speed_hz,
@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock); spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ); time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
if (!time_left) if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
/*
* No need for a lock since SPI core has a lock and we never
* access this from an interrupt.
*/
mas->abort_failed = true;
}
}
static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 m_irq, m_irq_en;
if (!mas->abort_failed)
return false;
/*
* The only known case where a transfer times out and then a cancel
* times out then an abort times out is if something is blocking our
* interrupt handler from running. Avoid starting any new transfers
* until that sorts itself out.
*/
spin_lock_irq(&mas->lock);
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
spin_unlock_irq(&mas->lock);
if (m_irq & m_irq_en) {
dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
m_irq & m_irq_en);
return true;
}
/*
* If we're here the problem resolved itself so no need to check more
* on future transfers.
*/
mas->abort_failed = false;
return false;
} }
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@ -158,9 +200,15 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag) if (set_flag == mas->cs_flag)
return; return;
pm_runtime_get_sync(mas->dev);
if (spi_geni_is_abort_still_pending(mas)) {
dev_err(mas->dev, "Can't set chip select\n");
goto exit;
}
mas->cs_flag = set_flag; mas->cs_flag = set_flag;
pm_runtime_get_sync(mas->dev);
spin_lock_irq(&mas->lock); spin_lock_irq(&mas->lock);
reinit_completion(&mas->cs_done); reinit_completion(&mas->cs_done);
if (set_flag) if (set_flag)
@ -173,6 +221,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (!time_left) if (!time_left)
handle_fifo_timeout(spi, NULL); handle_fifo_timeout(spi, NULL);
exit:
pm_runtime_put(mas->dev); pm_runtime_put(mas->dev);
} }
@ -280,6 +329,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret; int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi); struct spi_geni_master *mas = spi_master_get_devdata(spi);
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
ret = setup_fifo_params(spi_msg->spi, spi); ret = setup_fifo_params(spi_msg->spi, spi);
if (ret) if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret); dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@ -354,6 +406,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas); unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0; unsigned int i = 0;
/* Stop the watermark IRQ if nothing to send */
if (!mas->cur_xfer) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
return false;
}
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word; max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes) if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes; max_bytes = mas->tx_rem_bytes;
@ -396,6 +454,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4) if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid; rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
} }
/* Clear out the FIFO and bail if nowhere to put it */
if (!mas->cur_xfer) {
for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
readl(se->base + SE_GENI_RX_FIFOn);
return;
}
if (mas->rx_rem_bytes < rx_bytes) if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes; rx_bytes = mas->rx_rem_bytes;
@ -495,6 +561,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{ {
struct spi_geni_master *mas = spi_master_get_devdata(spi); struct spi_geni_master *mas = spi_master_get_devdata(spi);
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
/* Terminate and return success for 0 byte length transfer */ /* Terminate and return success for 0 byte length transfer */
if (!xfer->len) if (!xfer->len)
return 0; return 0;

View File

@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */ /* align packet size with data registers access */
if (spi->cur_bpw > 8) if (spi->cur_bpw > 8)
fthlv -= (fthlv % 2); /* multiple of 2 */ fthlv += (fthlv % 2) ? 1 : 0;
else else
fthlv -= (fthlv % 4); /* multiple of 4 */ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv) if (!fthlv)
fthlv = 1; fthlv = 1;

View File

@ -80,6 +80,7 @@ struct eth_dev {
bool zlp; bool zlp;
bool no_skb_reserve; bool no_skb_reserve;
bool ifname_set;
u8 host_mac[ETH_ALEN]; u8 host_mac[ETH_ALEN];
u8 dev_mac[ETH_ALEN]; u8 dev_mac[ETH_ALEN];
}; };
@ -1004,15 +1005,45 @@ EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len) int gether_get_ifname(struct net_device *net, char *name, int len)
{ {
struct eth_dev *dev = netdev_priv(net);
int ret; int ret;
rtnl_lock(); rtnl_lock();
ret = scnprintf(name, len, "%s\n", netdev_name(net)); ret = scnprintf(name, len, "%s\n",
dev->ifname_set ? net->name : netdev_name(net));
rtnl_unlock(); rtnl_unlock();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(gether_get_ifname); EXPORT_SYMBOL_GPL(gether_get_ifname);
int gether_set_ifname(struct net_device *net, const char *name, int len)
{
struct eth_dev *dev = netdev_priv(net);
char tmp[IFNAMSIZ];
const char *p;
if (name[len - 1] == '\n')
len--;
if (len >= sizeof(tmp))
return -E2BIG;
strscpy(tmp, name, len + 1);
if (!dev_valid_name(tmp))
return -EINVAL;
/* Require exactly one %d, so binding will not fail with EEXIST. */
p = strchr(name, '%');
if (!p || p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
strncpy(net->name, tmp, sizeof(net->name));
dev->ifname_set = true;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_ifname);
/* /*
* gether_cleanup - remove Ethernet-over-USB device * gether_cleanup - remove Ethernet-over-USB device
* Context: may sleep * Context: may sleep

Some files were not shown because too many files have changed in this diff Show More