Linux 5.8
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl8nLmkeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGBEkH/RJAnEan4gcdkBDf 2xS0yk4XLMjKZwbz61VSeKMUBkGCsh1cWsaAtJJAIVYj/6o/7mld01sZCnOASLnJ ET0nXL2NiT/f+prYkTE5qeYH225/Yfh5jgmrqZtx/uXFCwgE5Nzi3f72IXQDmCR+ kmpNhNox3YqQTKXhv7DXobDKcO0n8nZavnhxmA9SBZn2h9RHvmvJghD0UOfLjMpA 1SbknaE67n5JN/JjI6TkYWk4nuJmqfvmBL5IYVDEZYO4UlM5Bqzhw0XN7Ax70K3M KRK/eiqRmNwun5MxWnbzQU7t7iTgVmzjHLTpWGcM3V4blgGXC3uhjc+p/R8KTQUE bIydSzs= =fDeo -----END PGP SIGNATURE----- Merge 5.8 into android-mainline Linux 5.8 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic56a756d207b725fcfbc595562d1d719fac3eade
This commit is contained in:
commit
5a0b0ef465
@ -47,6 +47,9 @@ properties:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
description: Phandle to the device SRAM
|
||||
|
||||
iommus:
|
||||
maxItems: 1
|
||||
|
||||
memory-region:
|
||||
description:
|
||||
CMA pool to use for buffers allocation instead of the default
|
||||
|
@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to
|
||||
leverage the UDP based load balancing capability of different networks.
|
||||
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
|
||||
|
||||
The Bareudp tunnel module provides a generic L3 encapsulation tunnelling
|
||||
support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside
|
||||
a UDP tunnel.
|
||||
The Bareudp tunnel module provides a generic L3 encapsulation support for
|
||||
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
|
||||
|
||||
Special Handling
|
||||
----------------
|
||||
|
@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table:
|
||||
- Contains packet traps for packets that should be locally delivered after
|
||||
routing, but do not match more specific packet traps (e.g.,
|
||||
``ipv4_bgp``)
|
||||
* - ``external_delivery``
|
||||
- Contains packet traps for packets that should be routed through an
|
||||
external interface (e.g., management interface) that does not belong to
|
||||
the same device (e.g., switch ASIC) as the ingress interface
|
||||
* - ``ipv6``
|
||||
- Contains packet traps for various IPv6 control packets (e.g., Router
|
||||
Advertisements)
|
||||
|
@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
|
||||
F: include/linux/mfd/altera-a10sr.h
|
||||
|
||||
ALTERA TRIPLE SPEED ETHERNET DRIVER
|
||||
M: Thor Thayer <thor.thayer@linux.intel.com>
|
||||
M: Joyce Ooi <joyce.ooi@intel.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/altera/
|
||||
@ -1425,7 +1425,7 @@ F: arch/arm*/include/asm/perf_event.h
|
||||
F: arch/arm*/kernel/hw_breakpoint.c
|
||||
F: arch/arm*/kernel/perf_*
|
||||
F: arch/arm/oprofile/common.c
|
||||
F: drivers/perf/*
|
||||
F: drivers/perf/
|
||||
F: include/linux/perf/arm_pmu.h
|
||||
|
||||
ARM PORT
|
||||
@ -14201,7 +14201,8 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
||||
|
||||
QUALCOMM GENERIC INTERFACE I2C DRIVER
|
||||
M: Alok Chauhan <alokc@codeaurora.org>
|
||||
M: Akash Asthana <akashast@codeaurora.org>
|
||||
M: Mukesh Savaliya <msavaliy@codeaurora.org>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Supported
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -342,7 +342,8 @@ gateclk: clock-gating-control@18220 {
|
||||
|
||||
comphy: phy@18300 {
|
||||
compatible = "marvell,armada-380-comphy";
|
||||
reg = <0x18300 0x100>;
|
||||
reg-names = "comphy", "conf";
|
||||
reg = <0x18300 0x100>, <0x18460 4>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -397,7 +397,7 @@ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
|
||||
|
||||
pinctrl_usbotg: usbotggrp {
|
||||
fsl,pins = <
|
||||
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
|
||||
MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
|
||||
>;
|
||||
};
|
||||
|
||||
@ -409,6 +409,7 @@ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17070
|
||||
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
|
||||
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
|
||||
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
|
||||
MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
|
||||
>;
|
||||
};
|
||||
|
||||
|
@ -99,7 +99,7 @@ ethphy1: ethernet-phy@1 {
|
||||
&fec2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet2>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <ðphy0>;
|
||||
fsl,magic-packet;
|
||||
status = "okay";
|
||||
|
@ -213,7 +213,7 @@ ethphy2: ethernet-phy@2 {
|
||||
&fec2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet2>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <ðphy2>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -402,7 +402,7 @@ ethphy0: ethernet-phy@0 {
|
||||
|
||||
&gbe0 {
|
||||
phy-handle = <ðphy0>;
|
||||
phy-mode = "rgmii-id";
|
||||
phy-mode = "rgmii-rxid";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -198,7 +198,7 @@ reserved-memory {
|
||||
default-pool {
|
||||
compatible = "shared-dma-pool";
|
||||
size = <0x6000000>;
|
||||
alloc-ranges = <0x4a000000 0x6000000>;
|
||||
alloc-ranges = <0x40000000 0x10000000>;
|
||||
reusable;
|
||||
linux,cma-default;
|
||||
};
|
||||
|
@ -117,7 +117,7 @@ reserved-memory {
|
||||
default-pool {
|
||||
compatible = "shared-dma-pool";
|
||||
size = <0x6000000>;
|
||||
alloc-ranges = <0x4a000000 0x6000000>;
|
||||
alloc-ranges = <0x40000000 0x10000000>;
|
||||
reusable;
|
||||
linux,cma-default;
|
||||
};
|
||||
|
@ -181,7 +181,7 @@ reserved-memory {
|
||||
default-pool {
|
||||
compatible = "shared-dma-pool";
|
||||
size = <0x6000000>;
|
||||
alloc-ranges = <0x4a000000 0x6000000>;
|
||||
alloc-ranges = <0x40000000 0x10000000>;
|
||||
reusable;
|
||||
linux,cma-default;
|
||||
};
|
||||
|
@ -5,6 +5,8 @@
|
||||
#ifndef _ASM_ARM_PERCPU_H_
|
||||
#define _ASM_ARM_PERCPU_H_
|
||||
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
/*
|
||||
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
||||
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
||||
|
@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
|
||||
arch_install_hw_breakpoint(bp);
|
||||
}
|
||||
|
||||
static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
|
||||
struct arch_hw_breakpoint *info)
|
||||
{
|
||||
return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
|
||||
}
|
||||
|
||||
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
||||
}
|
||||
|
||||
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
|
||||
|
||||
/*
|
||||
* If we triggered a user watchpoint from a uaccess routine,
|
||||
* then handle the stepping ourselves since userspace really
|
||||
* can't help us with this.
|
||||
*/
|
||||
if (watchpoint_fault_on_uaccess(regs, info))
|
||||
goto step;
|
||||
|
||||
perf_bp_event(wp, regs);
|
||||
|
||||
/*
|
||||
* If no overflow handler is present, insert a temporary
|
||||
* mismatch breakpoint so we can single-step over the
|
||||
* watchpoint trigger.
|
||||
* Defer stepping to the overflow handler if one is installed.
|
||||
* Otherwise, insert a temporary mismatch breakpoint so that
|
||||
* we can single-step over the watchpoint trigger.
|
||||
*/
|
||||
if (is_default_overflow_handler(wp))
|
||||
enable_single_step(wp, instruction_pointer(regs));
|
||||
if (!is_default_overflow_handler(wp))
|
||||
goto unlock;
|
||||
|
||||
step:
|
||||
enable_single_step(wp, instruction_pointer(regs));
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
|
||||
if (!cntvct_ok) {
|
||||
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
|
||||
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
|
||||
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
|
||||
pud_t *pud;
|
||||
|
||||
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
|
||||
if (!WARN_ON(!p4d))
|
||||
if (WARN_ON(!p4d))
|
||||
return;
|
||||
pud = pud_alloc(mm, p4d, md->virtual);
|
||||
if (WARN_ON(!pud))
|
||||
|
@ -161,6 +161,7 @@ video-codec@1c0e000 {
|
||||
resets = <&ccu RST_BUS_VE>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
allwinner,sram = <&ve_sram 1>;
|
||||
iommus = <&iommu 3>;
|
||||
};
|
||||
|
||||
gpu: gpu@1800000 {
|
||||
|
@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
|
||||
"663:\n\t" \
|
||||
newinstr "\n" \
|
||||
"664:\n\t" \
|
||||
".previous\n\t" \
|
||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||
".org . - (662b-661b) + (664b-663b)\n" \
|
||||
".org . - (662b-661b) + (664b-663b)\n\t" \
|
||||
".previous\n" \
|
||||
".endif\n"
|
||||
|
||||
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
|
||||
|
@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
||||
{
|
||||
__uint128_t tmp;
|
||||
u64 sum;
|
||||
int n = ihl; /* we want it signed */
|
||||
|
||||
tmp = *(const __uint128_t *)iph;
|
||||
iph += 16;
|
||||
ihl -= 4;
|
||||
n -= 4;
|
||||
tmp += ((tmp >> 64) | (tmp << 64));
|
||||
sum = tmp >> 64;
|
||||
do {
|
||||
sum += *(const u32 *)iph;
|
||||
iph += 4;
|
||||
} while (--ihl);
|
||||
} while (--n > 0);
|
||||
|
||||
sum += ((sum >> 32) | (sum << 32));
|
||||
return csum_fold((__force u32)(sum >> 32));
|
||||
|
@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
|
||||
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
|
||||
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
|
||||
|
||||
#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
|
||||
system_supports_generic_auth()) && \
|
||||
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
#define vcpu_has_ptrauth(vcpu) \
|
||||
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
|
||||
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
|
||||
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
|
||||
#else
|
||||
#define vcpu_has_ptrauth(vcpu) false
|
||||
#endif
|
||||
|
||||
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
|
||||
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
||||
|
||||
|
@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
|
||||
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
|
||||
{
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
|
||||
return false;
|
||||
|
||||
if (pudp)
|
||||
return kvm_s2pud_exec(pudp);
|
||||
return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
|
||||
else if (pmdp)
|
||||
return kvm_s2pmd_exec(pmdp);
|
||||
return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
|
||||
else
|
||||
return kvm_s2pte_exec(ptep);
|
||||
return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
|
||||
}
|
||||
|
||||
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
* execute permissions, and we preserve whatever we have.
|
||||
*/
|
||||
needs_exec = exec_fault ||
|
||||
(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
|
||||
(fault_status == FSC_PERM &&
|
||||
stage2_is_exec(kvm, fault_ipa, vma_pagesize));
|
||||
|
||||
if (vma_pagesize == PUD_SIZE) {
|
||||
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
|
||||
|
@ -3072,10 +3072,18 @@ do_hash_page:
|
||||
ori r0,r0,DSISR_BAD_FAULT_64S@l
|
||||
and. r0,r5,r0 /* weird error? */
|
||||
bne- handle_page_fault /* if not, try to insert a HPTE */
|
||||
|
||||
/*
|
||||
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
|
||||
* don't call hash_page, just fail the fault. This is required to
|
||||
* prevent re-entrancy problems in the hash code, namely perf
|
||||
* interrupts hitting while something holds H_PAGE_BUSY, and taking a
|
||||
* hash fault. See the comment in hash_preload().
|
||||
*/
|
||||
ld r11, PACA_THREAD_INFO(r13)
|
||||
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
|
||||
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
|
||||
bne 77f /* then don't call hash_page now */
|
||||
lwz r0,TI_PREEMPT(r11)
|
||||
andis. r0,r0,NMI_MASK@h
|
||||
bne 77f
|
||||
|
||||
/*
|
||||
* r3 contains the trap number
|
||||
|
@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
|
||||
pgd_t *pgdir;
|
||||
int rc, ssize, update_flags = 0;
|
||||
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(get_region_id(ea) != USER_REGION_ID);
|
||||
|
||||
@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
|
||||
return;
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
/*
|
||||
* __hash_page_* must run with interrupts off, as it sets the
|
||||
* H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
|
||||
* time and may take a hash fault reading the user stack, see
|
||||
* read_user_stack_slow() in the powerpc/perf code.
|
||||
*
|
||||
* If that takes a hash fault on the same page as we lock here, it
|
||||
* will bail out when seeing H_PAGE_BUSY set, and retry the access
|
||||
* leading to an infinite loop.
|
||||
*
|
||||
* Disabling interrupts here does not prevent perf interrupts, but it
|
||||
* will prevent them taking hash faults (see the NMI test in
|
||||
* do_hash_page), then read_user_stack's copy_from_user_nofault will
|
||||
* fail and perf will fall back to read_user_stack_slow(), which
|
||||
* walks the Linux page tables.
|
||||
*
|
||||
* Interrupts must also be off for the duration of the
|
||||
* mm_is_thread_local test and update, to prevent preempt running the
|
||||
* mm on another CPU (XXX: this may be racy vs kthread_use_mm).
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Is that local to this CPU ? */
|
||||
if (mm_is_thread_local(mm))
|
||||
update_flags |= HPTE_LOCAL_UPDATE;
|
||||
@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
|
||||
mm_ctx_user_psize(&mm->context),
|
||||
mm_ctx_user_psize(&mm->context),
|
||||
pte_val(*ptep));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
|
||||
|
||||
perf_read_regs(regs);
|
||||
|
||||
/*
|
||||
* If perf interrupts hit in a local_irq_disable (soft-masked) region,
|
||||
* we consider them as NMIs. This is required to prevent hash faults on
|
||||
* user addresses when reading callchains. See the NMI test in
|
||||
* do_hash_page.
|
||||
*/
|
||||
nmi = perf_intr_is_nmi(regs);
|
||||
if (nmi)
|
||||
nmi_enter();
|
||||
|
@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
|
||||
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
|
||||
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
|
||||
#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
|
||||
#endif
|
||||
|
||||
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
||||
@ -33,13 +34,4 @@ do { \
|
||||
tlb_remove_page((tlb), (pte)); \
|
||||
} while (0)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
#define __pmd_free_tlb(tlb, pmdp, addr) \
|
||||
do { \
|
||||
struct page *page = virt_to_page(pmdp); \
|
||||
pgtable_pmd_page_dtor(page); \
|
||||
tlb_remove_page((tlb), page); \
|
||||
} while (0);
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SH_PGALLOC_H */
|
||||
|
@ -199,7 +199,7 @@ syscall_trace_entry:
|
||||
mov.l @(OFF_R7,r15), r7 ! arg3
|
||||
mov.l @(OFF_R3,r15), r3 ! syscall_nr
|
||||
!
|
||||
mov.l 2f, r10 ! Number of syscalls
|
||||
mov.l 6f, r10 ! Number of syscalls
|
||||
cmp/hs r10, r3
|
||||
bf syscall_call
|
||||
mov #-ENOSYS, r0
|
||||
@ -353,7 +353,7 @@ ENTRY(system_call)
|
||||
tst r9, r8
|
||||
bf syscall_trace_entry
|
||||
!
|
||||
mov.l 2f, r8 ! Number of syscalls
|
||||
mov.l 6f, r8 ! Number of syscalls
|
||||
cmp/hs r8, r3
|
||||
bt syscall_badsys
|
||||
!
|
||||
@ -392,7 +392,7 @@ syscall_exit:
|
||||
#if !defined(CONFIG_CPU_SH2)
|
||||
1: .long TRA
|
||||
#endif
|
||||
2: .long NR_syscalls
|
||||
6: .long NR_syscalls
|
||||
3: .long sys_call_table
|
||||
7: .long do_syscall_trace_enter
|
||||
8: .long do_syscall_trace_leave
|
||||
|
@ -207,7 +207,7 @@ static void mask_and_ack_8259A(struct irq_data *data)
|
||||
* lets ACK and report it. [once per IRQ]
|
||||
*/
|
||||
if (!(spurious_irq_mask & irqmask)) {
|
||||
printk(KERN_DEBUG
|
||||
printk_deferred(KERN_DEBUG
|
||||
"spurious 8259A interrupt: IRQ%d.\n", irq);
|
||||
spurious_irq_mask |= irqmask;
|
||||
}
|
||||
|
@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
|
||||
if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
|
||||
if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
|
||||
apic_lvtt_period(apic))
|
||||
return;
|
||||
|
||||
|
@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
|
||||
svm->nested.vmcb = 0;
|
||||
svm->vcpu.arch.hflags = 0;
|
||||
|
||||
if (pause_filter_count) {
|
||||
if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
|
||||
control->pause_filter_count = pause_filter_count;
|
||||
if (pause_filter_thresh)
|
||||
control->pause_filter_thresh = pause_filter_thresh;
|
||||
@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
bool in_kernel = (svm_get_cpl(vcpu) == 0);
|
||||
|
||||
if (pause_filter_thresh)
|
||||
if (!kvm_pause_in_guest(vcpu->kvm))
|
||||
grow_ple_window(vcpu);
|
||||
|
||||
kvm_vcpu_on_spin(vcpu, in_kernel);
|
||||
@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
if (pause_filter_thresh)
|
||||
if (!kvm_pause_in_guest(vcpu->kvm))
|
||||
shrink_ple_window(vcpu);
|
||||
}
|
||||
|
||||
@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
|
||||
|
||||
static int svm_vm_init(struct kvm *kvm)
|
||||
{
|
||||
if (!pause_filter_count || !pause_filter_thresh)
|
||||
kvm->arch.pause_in_guest = true;
|
||||
|
||||
if (avic) {
|
||||
int ret = avic_vm_init(kvm);
|
||||
if (ret)
|
||||
|
@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
|
||||
return -EINVAL;
|
||||
|
||||
if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* SMM temporarily disables VMX, so we cannot be in guest mode,
|
||||
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
|
||||
@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Empty 'VMXON' state is permitted */
|
||||
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
|
||||
return 0;
|
||||
/* Empty 'VMXON' state is permitted if no VMCS loaded */
|
||||
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
|
||||
/* See vmx_has_valid_vmcs12. */
|
||||
if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
|
||||
(kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
|
||||
(kvm_state->hdr.vmx.vmcs12_pa != -1ull))
|
||||
return -EINVAL;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
|
||||
if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
|
||||
|
@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
|
||||
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: the same condition is checked against the state provided by userspace
|
||||
* in vmx_set_nested_state; if it is satisfied, the nested state must include
|
||||
* the VMCS12.
|
||||
*/
|
||||
static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
|
||||
return -EMEDIUMTYPE;
|
||||
}
|
||||
dev_data = PRIV(dev);
|
||||
if (!dev_data->persist) return 0;
|
||||
if (!dev_data->persist) {
|
||||
atm_dev_put(dev);
|
||||
return 0;
|
||||
}
|
||||
dev_data->persist = 0;
|
||||
if (PRIV(dev)->vcc) return 0;
|
||||
if (PRIV(dev)->vcc) {
|
||||
atm_dev_put(dev);
|
||||
return 0;
|
||||
}
|
||||
kfree(dev_data);
|
||||
atm_dev_put(dev);
|
||||
atm_dev_deregister(dev);
|
||||
|
@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
||||
|
||||
fast_mix(fast_pool);
|
||||
add_interrupt_bench(cycles);
|
||||
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
|
||||
|
||||
if (unlikely(crng_init == 0)) {
|
||||
if ((fast_pool->count >= 64) &&
|
||||
|
@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
|
||||
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
|
||||
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
|
||||
fw_cfg_sel_ko, "%d", entry->select);
|
||||
if (err)
|
||||
goto err_register;
|
||||
if (err) {
|
||||
kobject_put(&entry->kobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* add raw binary content access */
|
||||
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
|
||||
@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
|
||||
|
||||
err_add_raw:
|
||||
kobject_del(&entry->kobj);
|
||||
err_register:
|
||||
kfree(entry);
|
||||
return err;
|
||||
}
|
||||
|
@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
return n ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_DEV_INFO: {
|
||||
struct drm_amdgpu_info_device dev_info = {};
|
||||
struct drm_amdgpu_info_device dev_info;
|
||||
uint64_t vm_size;
|
||||
|
||||
memset(&dev_info, 0, sizeof(dev_info));
|
||||
dev_info.device_id = dev->pdev->device;
|
||||
dev_info.chip_rev = adev->rev_id;
|
||||
dev_info.external_rev = adev->external_rev_id;
|
||||
|
@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
tmp_str++;
|
||||
while (isspace(*++tmp_str));
|
||||
|
||||
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
@ -1038,7 +1039,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
|
||||
memcpy(buf_cpy, buf, bytes);
|
||||
buf_cpy[bytes] = '\0';
|
||||
tmp = buf_cpy;
|
||||
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
if (ret)
|
||||
@ -1635,7 +1637,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||
i++;
|
||||
memcpy(buf_cpy, buf, count-i);
|
||||
tmp_str = buf_cpy;
|
||||
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
* the same resource. If we have a new DC context as part of
|
||||
* the DM atomic state from validation we need to free it and
|
||||
* retain the existing one instead.
|
||||
*
|
||||
* Furthermore, since the DM atomic state only contains the DC
|
||||
* context and can safely be annulled, we can free the state
|
||||
* and clear the associated private object now to free
|
||||
* some memory and avoid a possible use-after-free later.
|
||||
*/
|
||||
struct dm_atomic_state *new_dm_state, *old_dm_state;
|
||||
|
||||
new_dm_state = dm_atomic_get_new_state(state);
|
||||
old_dm_state = dm_atomic_get_old_state(state);
|
||||
for (i = 0; i < state->num_private_objs; i++) {
|
||||
struct drm_private_obj *obj = state->private_objs[i].ptr;
|
||||
|
||||
if (new_dm_state && old_dm_state) {
|
||||
if (new_dm_state->context)
|
||||
dc_release_state(new_dm_state->context);
|
||||
if (obj->funcs == adev->dm.atomic_obj.funcs) {
|
||||
int j = state->num_private_objs-1;
|
||||
|
||||
new_dm_state->context = old_dm_state->context;
|
||||
dm_atomic_destroy_state(obj,
|
||||
state->private_objs[i].state);
|
||||
|
||||
if (old_dm_state->context)
|
||||
dc_retain_state(old_dm_state->context);
|
||||
/* If i is not at the end of the array then the
|
||||
* last element needs to be moved to where i was
|
||||
* before the array can safely be truncated.
|
||||
*/
|
||||
if (i != j)
|
||||
state->private_objs[i] =
|
||||
state->private_objs[j];
|
||||
|
||||
state->private_objs[j].ptr = NULL;
|
||||
state->private_objs[j].state = NULL;
|
||||
state->private_objs[j].old_state = NULL;
|
||||
state->private_objs[j].new_state = NULL;
|
||||
|
||||
state->num_private_objs = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs)
|
||||
bochs->dev->mode_config.preferred_depth = 24;
|
||||
bochs->dev->mode_config.prefer_shadow = 0;
|
||||
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
|
||||
bochs->dev->mode_config.fbdev_use_iomem = true;
|
||||
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
|
||||
|
||||
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
|
||||
|
@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
|
||||
|
||||
adv7511->bridge.funcs = &adv7511_bridge_funcs;
|
||||
adv7511->bridge.of_node = dev->of_node;
|
||||
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
|
||||
drm_bridge_add(&adv7511->bridge);
|
||||
|
||||
|
@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_panel *panel;
|
||||
int ret;
|
||||
|
||||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
|
||||
DRM_ERROR("Fix bridge driver to make connector optional!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
|
||||
&panel_bridge);
|
||||
if (ret)
|
||||
|
@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
|
||||
unsigned int y;
|
||||
|
||||
for (y = clip->y1; y < clip->y2; y++) {
|
||||
memcpy(dst, src, len);
|
||||
if (!fb_helper->dev->mode_config.fbdev_use_iomem)
|
||||
memcpy(dst, src, len);
|
||||
else
|
||||
memcpy_toio((void __iomem *)dst, src, len);
|
||||
|
||||
src += fb->pitches[0];
|
||||
dst += fb->pitches[0];
|
||||
}
|
||||
|
@ -871,9 +871,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
||||
* @file_priv: drm file-private structure
|
||||
*
|
||||
* Open an object using the global name, returning a handle and the size.
|
||||
*
|
||||
* This handle (of course) holds a reference to the object, so the object
|
||||
* will not go away until the handle is deleted.
|
||||
*/
|
||||
int
|
||||
drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
|
||||
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
args->handle = handle;
|
||||
args->size = obj->size;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
|
||||
}
|
||||
}
|
||||
|
||||
tr.len = chunk;
|
||||
tr.len = chunk * 2;
|
||||
len -= chunk;
|
||||
|
||||
ret = spi_sync(spi, &m);
|
||||
|
@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
|
||||
* configurations by passing the endpoints explicitly to
|
||||
* drm_of_lvds_get_dual_link_pixel_order().
|
||||
*/
|
||||
if (!current_pt || pixels_type != current_pt) {
|
||||
of_node_put(remote_port);
|
||||
if (!current_pt || pixels_type != current_pt)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return pixels_type;
|
||||
|
@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
|
||||
*/
|
||||
if (fb) {
|
||||
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
|
||||
if (!mcde->video_mode)
|
||||
/* Send a single frame using software sync */
|
||||
mcde_display_send_one_frame(mcde);
|
||||
if (!mcde->video_mode) {
|
||||
/*
|
||||
* Send a single frame using software sync if the flow
|
||||
* is not active yet.
|
||||
*/
|
||||
if (mcde->flow_active == 0)
|
||||
mcde_display_send_one_frame(mcde);
|
||||
}
|
||||
dev_info_once(mcde->dev, "sent first display update\n");
|
||||
} else {
|
||||
/*
|
||||
|
@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
*/
|
||||
if (core->assign_windows) {
|
||||
core->func->wndw.owner(core);
|
||||
core->func->update(core, interlock, false);
|
||||
nv50_disp_atomic_commit_core(state, interlock);
|
||||
core->assign_windows = false;
|
||||
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
|
||||
}
|
||||
@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev)
|
||||
if (disp->disp->object.oclass >= TU102_DISP)
|
||||
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
|
||||
else
|
||||
if (disp->disp->object.oclass >= GF110_DISP)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
|
||||
else
|
||||
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
|
||||
|
@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
|
||||
uint32_t *tile_mode,
|
||||
uint8_t *kind)
|
||||
{
|
||||
struct nouveau_display *disp = nouveau_display(drm->dev);
|
||||
BUG_ON(!tile_mode || !kind);
|
||||
|
||||
if (modifier == DRM_FORMAT_MOD_LINEAR) {
|
||||
@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
|
||||
* Extract the block height and kind from the corresponding
|
||||
* modifier fields. See drm_fourcc.h for details.
|
||||
*/
|
||||
|
||||
if ((modifier & (0xffull << 12)) == 0ull) {
|
||||
/* Legacy modifier. Translate to this dev's 'kind.' */
|
||||
modifier |= disp->format_modifiers[0] & (0xffull << 12);
|
||||
}
|
||||
|
||||
*tile_mode = (uint32_t)(modifier & 0xF);
|
||||
*kind = (uint8_t)((modifier >> 12) & 0xFF);
|
||||
|
||||
@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
|
||||
}
|
||||
}
|
||||
|
||||
static const u64 legacy_modifiers[] = {
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
static int
|
||||
nouveau_validate_decode_mod(struct nouveau_drm *drm,
|
||||
uint64_t modifier,
|
||||
@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
|
||||
(disp->format_modifiers[mod] != modifier);
|
||||
mod++);
|
||||
|
||||
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
|
||||
return -EINVAL;
|
||||
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
|
||||
for (mod = 0;
|
||||
(legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
|
||||
(legacy_modifiers[mod] != modifier);
|
||||
mod++);
|
||||
if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nouveau_decode_mod(drm, modifier, tile_mode, kind);
|
||||
|
||||
|
@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
struct drm_framebuffer *fb;
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct drm_mode_fb_cmd2 mode_cmd;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
int ret;
|
||||
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
@ -590,6 +590,7 @@ nouveau_fbcon_init(struct drm_device *dev)
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
free:
|
||||
kfree(fbcon);
|
||||
drm->fbcon = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
|
||||
{
|
||||
struct nvkm_ior *ior;
|
||||
|
||||
/* First preference is to reuse the OR that is currently armed
|
||||
* on HW, if any, in order to prevent unnecessary switching.
|
||||
*/
|
||||
list_for_each_entry(ior, &outp->disp->ior, head) {
|
||||
if (!ior->identity && !!ior->func->hda.hpd == hda &&
|
||||
!ior->asy.outp && ior->arm.outp == outp)
|
||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||
}
|
||||
|
||||
/* Failing that, a completely unused OR is the next best thing. */
|
||||
list_for_each_entry(ior, &outp->disp->ior, head) {
|
||||
if (!ior->identity && !!ior->func->hda.hpd == hda &&
|
||||
@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
|
||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||
}
|
||||
|
||||
/* First preference is to reuse the OR that is currently armed
|
||||
* on HW, if any, in order to prevent unnecessary switching.
|
||||
*/
|
||||
list_for_each_entry(ior, &outp->disp->ior, head) {
|
||||
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
|
||||
/*XXX: For various complicated reasons, we can't outright switch
|
||||
* the boot-time OR on the first modeset without some fairly
|
||||
* invasive changes.
|
||||
*
|
||||
* The systems that were fixed by modifying the OR selection
|
||||
* code to account for HDA support shouldn't regress here as
|
||||
* the HDA-enabled ORs match the relevant output's pad macro
|
||||
* index, and the firmware seems to select an OR this way.
|
||||
*
|
||||
* This warning is to make it obvious if that proves wrong.
|
||||
*/
|
||||
WARN_ON(hda && !ior->func->hda.hpd);
|
||||
return nvkm_outp_acquire_ior(outp, user, ior);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we don't need HDA, first try to acquire an OR that doesn't
|
||||
* support it to leave free the ones that do.
|
||||
*/
|
||||
|
@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
|
||||
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
|
||||
.clock = 157000,
|
||||
.hdisplay = 1200,
|
||||
.hsync_start = 1200 + 80,
|
||||
.hsync_end = 1200 + 80 + 24,
|
||||
.htotal = 1200 + 80 + 24 + 36,
|
||||
.hsync_start = 1200 + 60,
|
||||
.hsync_end = 1200 + 60 + 24,
|
||||
.htotal = 1200 + 60 + 24 + 56,
|
||||
.vdisplay = 1920,
|
||||
.vsync_start = 1920 + 16,
|
||||
.vsync_end = 1920 + 16 + 4,
|
||||
|
@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
|
||||
.height = 165,
|
||||
},
|
||||
.delay = {
|
||||
.hpd_absent_delay = 200,
|
||||
/*
|
||||
* When power is first given to the panel there's a short
|
||||
* spike on the HPD line. It was explained that this spike
|
||||
* was until the TCON data download was complete. On
|
||||
* one system this was measured at 8 ms. We'll put 15 ms
|
||||
* in the prepare delay just to be safe and take it away
|
||||
* from the hpd_absent_delay (which would otherwise be 200 ms)
|
||||
* to handle this. That means:
|
||||
* - If HPD isn't hooked up you still have 200 ms delay.
|
||||
* - If HPD is hooked up we won't try to look at it for the
|
||||
* first 15 ms.
|
||||
*/
|
||||
.prepare = 15,
|
||||
.hpd_absent_delay = 185,
|
||||
|
||||
.unprepare = 500,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||
|
@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!client || !slave_cb) {
|
||||
WARN(1, "insufficient data\n");
|
||||
if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(client->flags & I2C_CLIENT_SLAVE))
|
||||
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
|
||||
@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(client))
|
||||
return -EINVAL;
|
||||
|
||||
if (!client->adapter->algo->unreg_slave) {
|
||||
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
|
||||
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
|
||||
}
|
||||
|
||||
static void rdma_dim_destroy(struct ib_cq *cq)
|
||||
{
|
||||
if (!cq->dim)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&cq->dim->work);
|
||||
kfree(cq->dim);
|
||||
}
|
||||
|
||||
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
int rc;
|
||||
@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
||||
return cq;
|
||||
|
||||
out_destroy_cq:
|
||||
rdma_dim_destroy(cq);
|
||||
rdma_restrack_del(&cq->res);
|
||||
cq->device->ops.destroy_cq(cq, udata);
|
||||
out_free_wc:
|
||||
@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
rdma_dim_destroy(cq);
|
||||
trace_cq_free(cq);
|
||||
rdma_restrack_del(&cq->res);
|
||||
cq->device->ops.destroy_cq(cq, udata);
|
||||
if (cq->dim)
|
||||
cancel_work_sync(&cq->dim->work);
|
||||
kfree(cq->dim);
|
||||
kfree(cq->wc);
|
||||
kfree(cq);
|
||||
}
|
||||
|
@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
|
||||
size_t in_size;
|
||||
int ret;
|
||||
|
||||
if (in_len < offsetofend(typeof(cmd), reserved))
|
||||
return -EINVAL;
|
||||
in_size = min_t(size_t, in_len, sizeof(cmd));
|
||||
if (copy_from_user(&cmd, inbuf, in_size))
|
||||
return -EFAULT;
|
||||
@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
|
||||
size_t in_size;
|
||||
int ret;
|
||||
|
||||
if (in_len < offsetofend(typeof(cmd), reserved))
|
||||
return -EINVAL;
|
||||
in_size = min_t(size_t, in_len, sizeof(cmd));
|
||||
if (copy_from_user(&cmd, inbuf, in_size))
|
||||
return -EFAULT;
|
||||
|
@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
|
||||
work->frags[i].mr =
|
||||
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
|
||||
if (!work->frags[i].mr) {
|
||||
work->num_sge = i - 1;
|
||||
if (i)
|
||||
destroy_prefetch_work(work);
|
||||
work->num_sge = i;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
||||
srcu_key = srcu_read_lock(&dev->odp_srcu);
|
||||
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
|
||||
srcu_read_unlock(&dev->odp_srcu, srcu_key);
|
||||
destroy_prefetch_work(work);
|
||||
return -EINVAL;
|
||||
}
|
||||
queue_work(system_unbound_wq, &work->work);
|
||||
|
@ -1766,15 +1766,14 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
}
|
||||
|
||||
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_qp *qp,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct mlx5_ib_create_qp *ucmd,
|
||||
void *qpc)
|
||||
{
|
||||
int scqe_sz;
|
||||
bool allow_scat_cqe = false;
|
||||
|
||||
if (ucmd)
|
||||
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
|
||||
allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
|
||||
|
||||
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
|
||||
return;
|
||||
@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
|
||||
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
|
||||
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
|
||||
|
||||
@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
spin_lock_init(&qp->sq.lock);
|
||||
spin_lock_init(&qp->rq.lock);
|
||||
|
||||
@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
}
|
||||
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
|
||||
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
|
||||
configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
|
||||
configure_requester_scat_cqe(dev, qp, init_attr, qpc);
|
||||
|
||||
if (qp->rq.wqe_cnt) {
|
||||
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
|
||||
@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
spin_lock_init(&qp->sq.lock);
|
||||
spin_lock_init(&qp->rq.lock);
|
||||
|
||||
@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
|
||||
return;
|
||||
}
|
||||
|
||||
if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
|
||||
switch (flag) {
|
||||
case MLX5_QP_FLAG_SCATTER_CQE:
|
||||
case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
|
||||
/*
|
||||
* We don't return error if this flag was provided,
|
||||
* and mlx5 doesn't have right capability.
|
||||
*/
|
||||
*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
|
||||
* We don't return error if these flags were provided,
|
||||
* and mlx5 doesn't have right capability.
|
||||
*/
|
||||
*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
|
||||
MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
|
||||
}
|
||||
@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
|
||||
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
|
||||
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
|
||||
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
|
||||
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
|
||||
|
||||
if (qp->type == IB_QPT_RAW_PACKET) {
|
||||
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
|
||||
@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
qp->type = type;
|
||||
if (udata) {
|
||||
err = process_vendor_flags(dev, qp, params.ucmd, attr);
|
||||
|
@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
||||
qp->s_tail_ack_queue = 0;
|
||||
qp->s_acked_ack_queue = 0;
|
||||
qp->s_num_rd_atomic = 0;
|
||||
if (qp->r_rq.kwq)
|
||||
qp->r_rq.kwq->count = qp->r_rq.size;
|
||||
qp->r_sge.num_sge = 0;
|
||||
atomic_set(&qp->s_reserved_used, 0);
|
||||
}
|
||||
@ -2366,31 +2364,6 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_count - count numbers of request work queue entries
|
||||
* in circular buffer
|
||||
* @rq: data structure for request queue entry
|
||||
* @tail: tail indices of the circular buffer
|
||||
* @head: head indices of the circular buffer
|
||||
*
|
||||
* Return - total number of entries in the circular buffer
|
||||
*/
|
||||
static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
|
||||
{
|
||||
u32 count;
|
||||
|
||||
count = head;
|
||||
|
||||
if (count >= rq->size)
|
||||
count = 0;
|
||||
if (count < tail)
|
||||
count += rq->size - tail;
|
||||
else
|
||||
count -= tail;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_rvt_head - get head indices of the circular buffer
|
||||
* @rq: data structure for request queue entry
|
||||
@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
|
||||
|
||||
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
|
||||
head = get_rvt_head(rq, ip);
|
||||
kwq->count = get_count(rq, tail, head);
|
||||
kwq->count = rvt_get_rq_count(rq, head, tail);
|
||||
}
|
||||
if (unlikely(kwq->count == 0)) {
|
||||
ret = 0;
|
||||
@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
|
||||
* the number of remaining WQEs.
|
||||
*/
|
||||
if (kwq->count < srq->limit) {
|
||||
kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
|
||||
kwq->count =
|
||||
rvt_get_rq_count(rq,
|
||||
get_rvt_head(rq, ip), tail);
|
||||
if (kwq->count < srq->limit) {
|
||||
struct ib_event ev;
|
||||
|
||||
|
@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
|
||||
* not atomic, which is OK, since the fuzziness is
|
||||
* resolved as further ACKs go out.
|
||||
*/
|
||||
credits = head - tail;
|
||||
if ((int)credits < 0)
|
||||
credits += qp->r_rq.size;
|
||||
credits = rvt_get_rq_count(&qp->r_rq, head, tail);
|
||||
}
|
||||
/*
|
||||
* Binary search the credit table to find the code to
|
||||
|
@ -407,19 +407,34 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
|
||||
{
|
||||
if (bareudp->ethertype == proto)
|
||||
return true;
|
||||
|
||||
if (!bareudp->multi_proto_mode)
|
||||
return false;
|
||||
|
||||
if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
|
||||
proto == htons(ETH_P_MPLS_MC))
|
||||
return true;
|
||||
|
||||
if (bareudp->ethertype == htons(ETH_P_IP) &&
|
||||
proto == htons(ETH_P_IPV6))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct bareudp_dev *bareudp = netdev_priv(dev);
|
||||
struct ip_tunnel_info *info = NULL;
|
||||
int err;
|
||||
|
||||
if (skb->protocol != bareudp->ethertype) {
|
||||
if (!bareudp->multi_proto_mode ||
|
||||
(skb->protocol != htons(ETH_P_MPLS_MC) &&
|
||||
skb->protocol != htons(ETH_P_IPV6))) {
|
||||
err = -EINVAL;
|
||||
goto tx_error;
|
||||
}
|
||||
if (!bareudp_proto_valid(bareudp, skb->protocol)) {
|
||||
err = -EINVAL;
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
info = skb_tunnel_info(skb);
|
||||
|
@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
||||
port->reset = devm_reset_control_get_exclusive(dev, NULL);
|
||||
if (IS_ERR(port->reset)) {
|
||||
dev_err(dev, "no reset\n");
|
||||
clk_disable_unprepare(port->pclk);
|
||||
return PTR_ERR(port->reset);
|
||||
}
|
||||
reset_control_reset(port->reset);
|
||||
@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
||||
IRQF_SHARED,
|
||||
port_names[port->id],
|
||||
port);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
clk_disable_unprepare(port->pclk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = register_netdev(netdev);
|
||||
if (!ret) {
|
||||
|
@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
int k, sizeoflast;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (type == DESC_TYPE_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)priv;
|
||||
int ret;
|
||||
|
||||
ret = hns3_fill_skb_desc(ring, skb, desc);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||
} else if (type == DESC_TYPE_FRAGLIST_SKB) {
|
||||
if (type == DESC_TYPE_FRAGLIST_SKB ||
|
||||
type == DESC_TYPE_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)priv;
|
||||
|
||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||
@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
next_to_use_head = ring->next_to_use;
|
||||
|
||||
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
|
||||
if (unlikely(ret < 0))
|
||||
goto fill_err;
|
||||
|
||||
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
|
||||
if (unlikely(ret < 0))
|
||||
goto fill_err;
|
||||
@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
|
||||
return;
|
||||
|
||||
if (linkup) {
|
||||
netif_carrier_on(netdev);
|
||||
netif_tx_wake_all_queues(netdev);
|
||||
netif_carrier_on(netdev);
|
||||
if (netif_msg_link(handle))
|
||||
netdev_info(netdev, "link up\n");
|
||||
} else {
|
||||
|
@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
|
||||
/* to avoid rule conflict, when user configure rule by ethtool,
|
||||
* we need to clear all arfs rules
|
||||
*/
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
hclge_clear_arfs_rules(handle);
|
||||
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
ret = hclge_fd_config_rule(hdev, rule);
|
||||
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* make sure being called after lock up with fd_rule_lock */
|
||||
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
|
||||
bool clear_list)
|
||||
{
|
||||
@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
|
||||
if (!hnae3_dev_fd_supported(hdev))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
for_each_set_bit(location, hdev->fd_bmap,
|
||||
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
|
||||
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
|
||||
@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
|
||||
bitmap_zero(hdev->fd_bmap,
|
||||
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
}
|
||||
|
||||
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
|
||||
@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
|
||||
u16 flow_id, struct flow_keys *fkeys)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_fd_rule_tuples new_tuples;
|
||||
struct hclge_fd_rule_tuples new_tuples = {};
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_fd_rule *rule;
|
||||
u16 tmp_queue_id;
|
||||
@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
|
||||
if (!hnae3_dev_fd_supported(hdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memset(&new_tuples, 0, sizeof(new_tuples));
|
||||
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
|
||||
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
|
||||
/* when there is already fd rule existed add by user,
|
||||
* arfs should not work
|
||||
*/
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
|
||||
|
||||
/* check is there flow director filter existed for this flow,
|
||||
* if not, create a new filter for it;
|
||||
* if filter exist with different queue id, modify the filter;
|
||||
@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* make sure being called after lock up with fd_rule_lock */
|
||||
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
|
||||
|
||||
hdev->fd_en = enable;
|
||||
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
|
||||
if (!enable)
|
||||
|
||||
if (!enable) {
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
hclge_del_all_fd_entries(handle, clear);
|
||||
else
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
} else {
|
||||
hclge_restore_fd_entries(handle);
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
|
||||
@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
||||
int i;
|
||||
|
||||
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
||||
|
||||
spin_lock_bh(&hdev->fd_rule_lock);
|
||||
hclge_clear_arfs_rules(handle);
|
||||
spin_unlock_bh(&hdev->fd_rule_lock);
|
||||
|
||||
/* If it is not PF reset, the firmware will disable the MAC,
|
||||
* so it only need to stop phy here.
|
||||
@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
|
||||
bool writen_to_tbl = false;
|
||||
int ret = 0;
|
||||
|
||||
/* When device is resetting, firmware is unable to handle
|
||||
* mailbox. Just record the vlan id, and remove it after
|
||||
/* When device is resetting or reset failed, firmware is unable to
|
||||
* handle mailbox. Just record the vlan id, and remove it after
|
||||
* reset finished.
|
||||
*/
|
||||
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
|
||||
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
|
||||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
|
||||
set_bit(vlan_id, vport->vlan_del_fail_bmap);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
|
||||
if (proto != htons(ETH_P_8021Q))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
/* When device is resetting, firmware is unable to handle
|
||||
* mailbox. Just record the vlan id, and remove it after
|
||||
/* When device is resetting or reset failed, firmware is unable to
|
||||
* handle mailbox. Just record the vlan id, and remove it after
|
||||
* reset finished.
|
||||
*/
|
||||
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
|
||||
if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
|
||||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
|
||||
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
|
||||
{
|
||||
struct hnae3_handle *nic = &hdev->nic;
|
||||
struct hclge_vf_to_pf_msg send_msg;
|
||||
int ret;
|
||||
|
||||
rtnl_lock();
|
||||
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
rtnl_unlock();
|
||||
|
||||
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
|
||||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"is resetting when updating port based vlan info\n");
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
if (ret) {
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
/* send msg to PF and wait update port based vlan info */
|
||||
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
||||
HCLGE_MBX_PORT_BASE_VLAN_CFG);
|
||||
memcpy(send_msg.data, port_base_vlan_info, data_size);
|
||||
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
||||
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
||||
if (!ret) {
|
||||
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
|
||||
nic->port_base_vlan_state = state;
|
||||
else
|
||||
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
|
||||
}
|
||||
|
||||
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
|
||||
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
|
||||
else
|
||||
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
|
||||
|
||||
rtnl_lock();
|
||||
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
@ -3206,7 +3206,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
|
||||
req_tx_irq_failed:
|
||||
for (j = 0; j < i; j++) {
|
||||
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
|
||||
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
|
||||
irq_dispose_mapping(adapter->tx_scrq[j]->irq);
|
||||
}
|
||||
release_sub_crqs(adapter, 1);
|
||||
return rc;
|
||||
|
@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
|
||||
*/
|
||||
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
|
||||
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
|
||||
if (ret_val) {
|
||||
if (ret_val)
|
||||
e_warn("Failed to disable ULP\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val) {
|
||||
|
@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work)
|
||||
struct igb_adapter *adapter;
|
||||
adapter = container_of(work, struct igb_adapter, reset_task);
|
||||
|
||||
rtnl_lock();
|
||||
/* If we're already down or resetting, just bail */
|
||||
if (test_bit(__IGB_DOWN, &adapter->state) ||
|
||||
test_bit(__IGB_RESETTING, &adapter->state)) {
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
igb_dump(adapter);
|
||||
netdev_err(adapter->netdev, "Reset adapter\n");
|
||||
igb_reinit_locked(adapter);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
|
||||
if (!netif_running(pf->netdev))
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
otx2_stop(pf->netdev);
|
||||
pf->reset_count++;
|
||||
otx2_open(pf->netdev);
|
||||
netif_trans_update(pf->netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static const struct net_device_ops otx2_netdev_ops = {
|
||||
@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
|
||||
|
||||
pf = netdev_priv(netdev);
|
||||
|
||||
cancel_work_sync(&pf->reset_task);
|
||||
/* Disable link notifications */
|
||||
otx2_cgx_config_linkevents(pf, false);
|
||||
|
||||
|
@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
|
||||
|
||||
vf = netdev_priv(netdev);
|
||||
|
||||
cancel_work_sync(&vf->reset_task);
|
||||
unregister_netdev(netdev);
|
||||
otx2vf_disable_mbox_intr(vf);
|
||||
|
||||
otx2_detach_resources(&vf->mbox);
|
||||
|
@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
|
||||
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
|
||||
phy_interface_t interface, int speed)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (interface == PHY_INTERFACE_MODE_TRGMII) {
|
||||
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
|
||||
val = 500000000;
|
||||
ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
|
||||
if (ret)
|
||||
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
val = (speed == SPEED_1000) ?
|
||||
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
|
||||
mtk_w32(eth, val, INTF_MODE);
|
||||
@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
|
||||
state->interface))
|
||||
goto err_phy;
|
||||
} else {
|
||||
if (state->interface !=
|
||||
PHY_INTERFACE_MODE_TRGMII)
|
||||
mtk_gmac0_rgmii_adjust(mac->hw,
|
||||
state->speed);
|
||||
mtk_gmac0_rgmii_adjust(mac->hw,
|
||||
state->interface,
|
||||
state->speed);
|
||||
|
||||
/* mt7623_pad_clk_setup */
|
||||
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
|
||||
@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
||||
eth->netdev[id]->irq = eth->irq[0];
|
||||
eth->netdev[id]->dev.of_node = np;
|
||||
|
||||
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
|
||||
|
||||
return 0;
|
||||
|
||||
free_netdev:
|
||||
|
@ -4356,12 +4356,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
|
||||
static void mlx4_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
||||
struct mlx4_dev *dev = persist->dev;
|
||||
|
||||
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
|
||||
mutex_lock(&persist->interface_state_mutex);
|
||||
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
|
||||
mlx4_unload_one(pdev);
|
||||
mutex_unlock(&persist->interface_state_mutex);
|
||||
mlx4_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers mlx4_err_handler = {
|
||||
|
@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
|
||||
|
||||
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
/* A given netdev is not a representor or not a slave of LAG configuration */
|
||||
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
|
||||
return false;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
rpriv = priv->ppriv;
|
||||
|
||||
/* Egress acl forward to vport is supported only non-uplink representor */
|
||||
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
|
||||
}
|
||||
|
@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
|
||||
tun_dst = tun_rx_dst(enc_opts.key.len);
|
||||
if (!tun_dst) {
|
||||
WARN_ON_ONCE(true);
|
||||
if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
|
||||
key.enc_ip.tos, key.enc_ip.ttl,
|
||||
key.enc_tp.dst, TUNNEL_KEY,
|
||||
key32_to_tunnel_id(key.enc_key_id.keyid),
|
||||
enc_opts.key.len);
|
||||
} else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
|
||||
key.enc_ip.tos, key.enc_ip.ttl,
|
||||
key.enc_tp.dst, 0, TUNNEL_KEY,
|
||||
key32_to_tunnel_id(key.enc_key_id.keyid),
|
||||
enc_opts.key.len);
|
||||
} else {
|
||||
netdev_dbg(priv->netdev,
|
||||
"Couldn't restore tunnel, unsupported addr_type: %d\n",
|
||||
key.enc_control.addr_type);
|
||||
return false;
|
||||
}
|
||||
|
||||
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
|
||||
key.enc_ipv4.src, key.enc_ipv4.dst,
|
||||
key.enc_ip.tos, key.enc_ip.ttl,
|
||||
0, /* label */
|
||||
key.enc_tp.src, key.enc_tp.dst,
|
||||
key32_to_tunnel_id(key.enc_key_id.keyid),
|
||||
TUNNEL_KEY);
|
||||
if (!tun_dst) {
|
||||
netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
|
||||
|
||||
if (enc_opts.key.len)
|
||||
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
|
||||
|
@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
|
||||
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
|
||||
}
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
|
||||
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
|
||||
}
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
|
||||
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
|
||||
be32_to_cpu(enc_keyid.key->keyid));
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
|
||||
&rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_rq_wq_destroy;
|
||||
|
||||
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
|
||||
|
||||
@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
|
||||
&rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_rq_wq_destroy;
|
||||
|
||||
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
|
||||
|
||||
@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
|
||||
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||
}
|
||||
|
||||
static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
|
||||
enum mlx5_port_status state)
|
||||
{
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
int vport_admin_state;
|
||||
|
||||
mlx5_set_port_admin_status(mdev, state);
|
||||
|
||||
if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
|
||||
return;
|
||||
|
||||
if (state == MLX5_PORT_UP)
|
||||
vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
||||
else
|
||||
vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
|
||||
|
||||
mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
|
||||
}
|
||||
|
||||
int mlx5e_open_locked(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
@ -3101,7 +3120,7 @@ int mlx5e_open(struct net_device *netdev)
|
||||
mutex_lock(&priv->state_lock);
|
||||
err = mlx5e_open_locked(netdev);
|
||||
if (!err)
|
||||
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
|
||||
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
return err;
|
||||
@ -3135,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
|
||||
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
|
||||
err = mlx5e_close_locked(netdev);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
@ -5182,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
||||
|
||||
/* Marking the link as currently not needed by the Driver */
|
||||
if (!netif_running(netdev))
|
||||
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
|
||||
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
|
||||
|
||||
mlx5e_set_netdev_mtu_boundaries(priv);
|
||||
mlx5e_set_dev_port_mtu(priv);
|
||||
@ -5390,6 +5409,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
|
||||
profile->cleanup_tx(priv);
|
||||
|
||||
out:
|
||||
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
cancel_work_sync(&priv->update_stats_work);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -936,6 +936,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
|
||||
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_ethtool_cleanup_steering(priv);
|
||||
rep_vport_rx_rule_destroy(priv);
|
||||
mlx5e_destroy_rep_root_ft(priv);
|
||||
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
|
||||
@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
|
||||
|
||||
mlx5e_rep_tc_enable(priv);
|
||||
|
||||
mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
|
||||
0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
|
||||
mlx5_lag_add(mdev, netdev);
|
||||
priv->events_nb.notifier_call = uplink_rep_async_event;
|
||||
mlx5_notifier_register(mdev, &priv->events_nb);
|
||||
|
@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
match.key->vlan_priority);
|
||||
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1608,7 +1608,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||
}
|
||||
|
||||
esw_destroy_tsar(esw);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
|
||||
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||
esw_offloads_disable(esw);
|
||||
|
||||
esw_destroy_tsar(esw);
|
||||
|
||||
old_mode = esw->mode;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
|
||||
@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||
}
|
||||
esw_destroy_tsar(esw);
|
||||
|
||||
if (clear_vf)
|
||||
mlx5_eswitch_clear_vf_vports_info(esw);
|
||||
}
|
||||
@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||
u16 vport, int link_state)
|
||||
{
|
||||
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
||||
int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
|
||||
int other_vport = 1;
|
||||
int err = 0;
|
||||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||
if (IS_ERR(evport))
|
||||
return PTR_ERR(evport);
|
||||
|
||||
if (vport == MLX5_VPORT_UPLINK) {
|
||||
opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
|
||||
other_vport = 0;
|
||||
vport = 0;
|
||||
}
|
||||
mutex_lock(&esw->state_lock);
|
||||
|
||||
err = mlx5_modify_vport_admin_state(esw->dev,
|
||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||
vport, 1, link_state);
|
||||
err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
|
||||
if (err) {
|
||||
mlx5_core_warn(esw->dev,
|
||||
"Failed to set vport %d link state, err = %d",
|
||||
vport, err);
|
||||
mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
|
||||
vport, opmod, err);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
||||
int err = 0;
|
||||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
return -EPERM;
|
||||
if (IS_ERR(evport))
|
||||
return PTR_ERR(evport);
|
||||
if (vlan > 4095 || qos > 7)
|
||||
@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||
u8 set_flags = 0;
|
||||
int err;
|
||||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
return -EPERM;
|
||||
|
||||
if (vlan || qos)
|
||||
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
|
||||
|
||||
|
@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
|
||||
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
|
||||
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||
static inline
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
|
||||
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
|
||||
return &esw->offloads.vport_reps[idx];
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_esw_flow_attr *attr)
|
||||
{
|
||||
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
|
||||
attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
|
||||
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
||||
@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
||||
mlx5_eswitch_get_vport_metadata_mask());
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
|
||||
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
||||
if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
||||
} else {
|
||||
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
||||
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
|
||||
@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
|
||||
attr->in_rep->vport == MLX5_VPORT_UPLINK)
|
||||
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
|
||||
}
|
||||
|
||||
struct mlx5_flow_handle *
|
||||
@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
goto err_esw_get;
|
||||
}
|
||||
|
||||
mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
|
||||
|
||||
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
|
||||
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
|
||||
&flow_act, dest, i);
|
||||
@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
|
||||
i++;
|
||||
|
||||
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
|
||||
mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
|
||||
|
||||
if (attr->outer_match_level != MLX5_MATCH_NONE)
|
||||
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
|
||||
|
@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
|
||||
return ft;
|
||||
}
|
||||
|
||||
/* If reverse if false then return the first flow table in next priority of
|
||||
/* If reverse is false then return the first flow table in next priority of
|
||||
* prio in the tree, else return the last flow table in the previous priority
|
||||
* of prio in the tree.
|
||||
*/
|
||||
@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
|
||||
return find_closest_ft(prio, true);
|
||||
}
|
||||
|
||||
static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
|
||||
struct mlx5_flow_namespace *ns)
|
||||
{
|
||||
struct mlx5_flow_namespace *root_ns = &root->ns;
|
||||
struct fs_prio *iter_prio;
|
||||
struct fs_prio *prio;
|
||||
|
||||
fs_get_obj(prio, ns->node.parent);
|
||||
list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
|
||||
if (iter_prio == prio &&
|
||||
!list_is_last(&prio->node.children, &iter_prio->node.list))
|
||||
return list_next_entry(iter_prio, node.list);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_act *flow_act)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
|
||||
struct fs_prio *prio;
|
||||
bool next_ns;
|
||||
|
||||
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
|
||||
prio = find_fwd_ns_prio(root, ft->ns);
|
||||
else
|
||||
fs_get_obj(prio, ft->node.parent);
|
||||
next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
|
||||
fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
|
||||
|
||||
return (prio) ? find_next_chained_ft(prio) : NULL;
|
||||
return find_next_chained_ft(prio);
|
||||
}
|
||||
|
||||
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
|
||||
|
@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
|
||||
if (rq->extts.index >= clock->ptp_info.n_pins)
|
||||
return -EINVAL;
|
||||
|
||||
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
|
||||
if (pin < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (on) {
|
||||
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
|
||||
if (pin < 0)
|
||||
return -EBUSY;
|
||||
pin_mode = MLX5_PIN_MODE_IN;
|
||||
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
|
||||
field_select = MLX5_MTPPS_FS_PIN_MODE |
|
||||
MLX5_MTPPS_FS_PATTERN |
|
||||
MLX5_MTPPS_FS_ENABLE;
|
||||
} else {
|
||||
pin = rq->extts.index;
|
||||
field_select = MLX5_MTPPS_FS_ENABLE;
|
||||
}
|
||||
|
||||
@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
|
||||
if (rq->perout.index >= clock->ptp_info.n_pins)
|
||||
return -EINVAL;
|
||||
|
||||
if (on) {
|
||||
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
|
||||
rq->perout.index);
|
||||
if (pin < 0)
|
||||
return -EBUSY;
|
||||
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
|
||||
rq->perout.index);
|
||||
if (pin < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (on) {
|
||||
pin_mode = MLX5_PIN_MODE_OUT;
|
||||
pattern = MLX5_OUT_PATTERN_PERIODIC;
|
||||
ts.tv_sec = rq->perout.period.sec;
|
||||
@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
|
||||
MLX5_MTPPS_FS_ENABLE |
|
||||
MLX5_MTPPS_FS_TIME_STAMP;
|
||||
} else {
|
||||
pin = rq->perout.index;
|
||||
field_select = MLX5_MTPPS_FS_ENABLE;
|
||||
}
|
||||
|
||||
@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
|
||||
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
|
||||
};
|
||||
|
||||
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
|
||||
enum ptp_pin_function func, unsigned int chan)
|
||||
{
|
||||
return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
|
||||
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
|
||||
ptp_info);
|
||||
|
||||
switch (func) {
|
||||
case PTP_PF_NONE:
|
||||
return 0;
|
||||
case PTP_PF_EXTTS:
|
||||
return !(clock->pps_info.pin_caps[pin] &
|
||||
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
|
||||
case PTP_PF_PEROUT:
|
||||
return !(clock->pps_info.pin_caps[pin] &
|
||||
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static const struct ptp_clock_info mlx5_ptp_clock_info = {
|
||||
@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
|
||||
.verify = NULL,
|
||||
};
|
||||
|
||||
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
|
||||
u32 *mtpps, u32 mtpps_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
|
||||
|
||||
MLX5_SET(mtpps_reg, in, pin, pin);
|
||||
|
||||
return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
|
||||
mtpps_size, MLX5_REG_MTPPS, 0, 0);
|
||||
}
|
||||
|
||||
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = clock->mdev;
|
||||
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
|
||||
u8 mode;
|
||||
int err;
|
||||
|
||||
err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
|
||||
if (err || !MLX5_GET(mtpps_reg, out, enable))
|
||||
return PTP_PF_NONE;
|
||||
|
||||
mode = MLX5_GET(mtpps_reg, out, pin_mode);
|
||||
|
||||
if (mode == MLX5_PIN_MODE_IN)
|
||||
return PTP_PF_EXTTS;
|
||||
else if (mode == MLX5_PIN_MODE_OUT)
|
||||
return PTP_PF_PEROUT;
|
||||
|
||||
return PTP_PF_NONE;
|
||||
}
|
||||
|
||||
static int mlx5_init_pin_config(struct mlx5_clock *clock)
|
||||
{
|
||||
int i;
|
||||
@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
|
||||
sizeof(clock->ptp_info.pin_config[i].name),
|
||||
"mlx5_pps%d", i);
|
||||
clock->ptp_info.pin_config[i].index = i;
|
||||
clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
|
||||
clock->ptp_info.pin_config[i].chan = i;
|
||||
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
|
||||
clock->ptp_info.pin_config[i].chan = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1814,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
|
||||
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
|
||||
bulk_list, cb, cb_priv, tid);
|
||||
if (err) {
|
||||
kfree(trans);
|
||||
kfree_rcu(trans, rcu);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
@ -2051,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!found)
|
||||
if (!found) {
|
||||
rcu_read_unlock();
|
||||
goto drop;
|
||||
}
|
||||
|
||||
rxl->func(skb, local_port, rxl_item->priv);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
|
||||
drop:
|
||||
|
@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group {
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
|
||||
|
@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
|
||||
{
|
||||
/* Packets with link-local destination IP arriving to the router
|
||||
* are trapped to the CPU, so no need to program specific routes
|
||||
* for them. Only allow prefix routes (usually one fe80::/64) so
|
||||
* that packets are trapped for the right reason.
|
||||
*/
|
||||
if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
|
||||
(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
|
||||
return true;
|
||||
|
||||
/* Multicast routes aren't supported, so ignore them. Neighbour
|
||||
* Discovery packets are specifically trapped.
|
||||
*/
|
||||
@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
||||
mlxsw_sp->router = router;
|
||||
router->mlxsw_sp = mlxsw_sp;
|
||||
|
||||
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
|
||||
err = register_inetaddr_notifier(&router->inetaddr_nb);
|
||||
if (err)
|
||||
goto err_register_inetaddr_notifier;
|
||||
|
||||
router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
|
||||
err = register_inet6addr_notifier(&router->inet6addr_nb);
|
||||
if (err)
|
||||
goto err_register_inet6addr_notifier;
|
||||
|
||||
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
|
||||
err = __mlxsw_sp_router_init(mlxsw_sp);
|
||||
if (err)
|
||||
@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
||||
if (err)
|
||||
goto err_neigh_init;
|
||||
|
||||
mlxsw_sp->router->netevent_nb.notifier_call =
|
||||
mlxsw_sp_router_netevent_event;
|
||||
err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
||||
if (err)
|
||||
goto err_register_netevent_notifier;
|
||||
|
||||
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
|
||||
if (err)
|
||||
goto err_mp_hash_init;
|
||||
@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
||||
if (err)
|
||||
goto err_dscp_init;
|
||||
|
||||
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
|
||||
err = register_inetaddr_notifier(&router->inetaddr_nb);
|
||||
if (err)
|
||||
goto err_register_inetaddr_notifier;
|
||||
|
||||
router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
|
||||
err = register_inet6addr_notifier(&router->inet6addr_nb);
|
||||
if (err)
|
||||
goto err_register_inet6addr_notifier;
|
||||
|
||||
mlxsw_sp->router->netevent_nb.notifier_call =
|
||||
mlxsw_sp_router_netevent_event;
|
||||
err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
||||
if (err)
|
||||
goto err_register_netevent_notifier;
|
||||
|
||||
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
|
||||
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
|
||||
&mlxsw_sp->router->fib_nb,
|
||||
@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
||||
return 0;
|
||||
|
||||
err_register_fib_notifier:
|
||||
err_dscp_init:
|
||||
err_mp_hash_init:
|
||||
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
||||
err_register_netevent_notifier:
|
||||
unregister_inet6addr_notifier(&router->inet6addr_nb);
|
||||
err_register_inet6addr_notifier:
|
||||
unregister_inetaddr_notifier(&router->inetaddr_nb);
|
||||
err_register_inetaddr_notifier:
|
||||
mlxsw_core_flush_owq();
|
||||
err_dscp_init:
|
||||
err_mp_hash_init:
|
||||
mlxsw_sp_neigh_fini(mlxsw_sp);
|
||||
err_neigh_init:
|
||||
mlxsw_sp_vrs_fini(mlxsw_sp);
|
||||
@ -8174,10 +8170,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
||||
err_rifs_init:
|
||||
__mlxsw_sp_router_fini(mlxsw_sp);
|
||||
err_router_init:
|
||||
unregister_inet6addr_notifier(&router->inet6addr_nb);
|
||||
err_register_inet6addr_notifier:
|
||||
unregister_inetaddr_notifier(&router->inetaddr_nb);
|
||||
err_register_inetaddr_notifier:
|
||||
mutex_destroy(&mlxsw_sp->router->lock);
|
||||
kfree(mlxsw_sp->router);
|
||||
return err;
|
||||
@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
|
||||
&mlxsw_sp->router->fib_nb);
|
||||
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
||||
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
|
||||
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
|
||||
mlxsw_core_flush_owq();
|
||||
mlxsw_sp_neigh_fini(mlxsw_sp);
|
||||
mlxsw_sp_vrs_fini(mlxsw_sp);
|
||||
mlxsw_sp_mr_fini(mlxsw_sp);
|
||||
@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
mlxsw_sp_ipips_fini(mlxsw_sp);
|
||||
mlxsw_sp_rifs_fini(mlxsw_sp);
|
||||
__mlxsw_sp_router_fini(mlxsw_sp);
|
||||
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
|
||||
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
|
||||
mutex_destroy(&mlxsw_sp->router->lock);
|
||||
kfree(mlxsw_sp->router);
|
||||
}
|
||||
|
@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
|
||||
{
|
||||
.policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
|
||||
},
|
||||
{
|
||||
.policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
|
||||
@ -421,6 +424,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
|
||||
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
|
||||
.priority = 2,
|
||||
},
|
||||
{
|
||||
.group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
|
||||
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
|
||||
.priority = 1,
|
||||
},
|
||||
{
|
||||
.group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
|
||||
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
|
||||
@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
|
||||
.trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
|
||||
TRAP),
|
||||
.listeners_arr = {
|
||||
MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
|
||||
false),
|
||||
MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
|
||||
TRAP_TO_CPU, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
|
||||
|
||||
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
|
||||
|
||||
/* Next ts */
|
||||
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
|
||||
/* Get the h/w timestamp */
|
||||
ocelot_get_hwtimestamp(ocelot, &ts);
|
||||
|
||||
if (unlikely(!skb_match))
|
||||
continue;
|
||||
|
||||
/* Get the h/w timestamp */
|
||||
ocelot_get_hwtimestamp(ocelot, &ts);
|
||||
|
||||
/* Set the timestamp into the skb */
|
||||
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
||||
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
|
||||
skb_tstamp_tx(skb_match, &shhwtstamps);
|
||||
|
||||
dev_kfree_skb_any(skb_match);
|
||||
|
||||
/* Next ts */
|
||||
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_get_txtstamp);
|
||||
|
@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
|
||||
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
|
||||
err = nixge_of_get_resources(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto free_netdev;
|
||||
__nixge_hw_set_mac_address(ndev);
|
||||
|
||||
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
|
||||
if (priv->tx_irq < 0) {
|
||||
netdev_err(ndev, "could not find 'tx' irq");
|
||||
return priv->tx_irq;
|
||||
err = priv->tx_irq;
|
||||
goto free_netdev;
|
||||
}
|
||||
|
||||
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
|
||||
if (priv->rx_irq < 0) {
|
||||
netdev_err(ndev, "could not find 'rx' irq");
|
||||
return priv->rx_irq;
|
||||
err = priv->rx_irq;
|
||||
goto free_netdev;
|
||||
}
|
||||
|
||||
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
|
||||
|
@ -2001,7 +2001,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
|
||||
netif_device_detach(lif->netdev);
|
||||
err = ionic_stop(lif->netdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto reset_out;
|
||||
}
|
||||
|
||||
if (cb)
|
||||
@ -2011,6 +2011,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
|
||||
err = ionic_open(lif->netdev);
|
||||
netif_device_attach(lif->netdev);
|
||||
}
|
||||
|
||||
reset_out:
|
||||
mutex_unlock(&lif->queue_lock);
|
||||
|
||||
return err;
|
||||
|
@ -2261,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
|
||||
|
||||
minor = get_free_serial_index();
|
||||
if (minor < 0)
|
||||
goto exit;
|
||||
goto exit2;
|
||||
|
||||
/* register our minor number */
|
||||
serial->parent->dev = tty_port_register_device_attr(&serial->port,
|
||||
tty_drv, minor, &serial->parent->interface->dev,
|
||||
serial->parent, hso_serial_dev_groups);
|
||||
if (IS_ERR(serial->parent->dev))
|
||||
goto exit2;
|
||||
|
||||
/* fill in specific data for later use */
|
||||
serial->minor = minor;
|
||||
@ -2311,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
|
||||
return 0;
|
||||
exit:
|
||||
hso_serial_tty_unregister(serial);
|
||||
exit2:
|
||||
hso_serial_common_free(serial);
|
||||
return -1;
|
||||
}
|
||||
|
@ -377,10 +377,6 @@ struct lan78xx_net {
|
||||
struct tasklet_struct bh;
|
||||
struct delayed_work wq;
|
||||
|
||||
struct usb_host_endpoint *ep_blkin;
|
||||
struct usb_host_endpoint *ep_blkout;
|
||||
struct usb_host_endpoint *ep_intr;
|
||||
|
||||
int msg_enable;
|
||||
|
||||
struct urb *urb_intr;
|
||||
@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
|
||||
{
|
||||
int tmp;
|
||||
struct usb_host_interface *alt = NULL;
|
||||
struct usb_host_endpoint *in = NULL, *out = NULL;
|
||||
struct usb_host_endpoint *status = NULL;
|
||||
|
||||
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
|
||||
unsigned ep;
|
||||
|
||||
in = NULL;
|
||||
out = NULL;
|
||||
status = NULL;
|
||||
alt = intf->altsetting + tmp;
|
||||
|
||||
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
|
||||
struct usb_host_endpoint *e;
|
||||
int intr = 0;
|
||||
|
||||
e = alt->endpoint + ep;
|
||||
switch (e->desc.bmAttributes) {
|
||||
case USB_ENDPOINT_XFER_INT:
|
||||
if (!usb_endpoint_dir_in(&e->desc))
|
||||
continue;
|
||||
intr = 1;
|
||||
/* FALLTHROUGH */
|
||||
case USB_ENDPOINT_XFER_BULK:
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
if (usb_endpoint_dir_in(&e->desc)) {
|
||||
if (!intr && !in)
|
||||
in = e;
|
||||
else if (intr && !status)
|
||||
status = e;
|
||||
} else {
|
||||
if (!out)
|
||||
out = e;
|
||||
}
|
||||
}
|
||||
if (in && out)
|
||||
break;
|
||||
}
|
||||
if (!alt || !in || !out)
|
||||
return -EINVAL;
|
||||
|
||||
dev->pipe_in = usb_rcvbulkpipe(dev->udev,
|
||||
in->desc.bEndpointAddress &
|
||||
USB_ENDPOINT_NUMBER_MASK);
|
||||
dev->pipe_out = usb_sndbulkpipe(dev->udev,
|
||||
out->desc.bEndpointAddress &
|
||||
USB_ENDPOINT_NUMBER_MASK);
|
||||
dev->ep_intr = status;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
|
||||
{
|
||||
struct lan78xx_priv *pdata = NULL;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = lan78xx_get_endpoints(dev, intf);
|
||||
if (ret) {
|
||||
netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
|
||||
|
||||
pdata = (struct lan78xx_priv *)(dev->data[0]);
|
||||
@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
|
||||
static int lan78xx_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
|
||||
struct lan78xx_net *dev;
|
||||
struct net_device *netdev;
|
||||
struct usb_device *udev;
|
||||
@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
|
||||
mutex_init(&dev->stats.access_lock);
|
||||
|
||||
if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
|
||||
ret = -ENODEV;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
|
||||
ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
|
||||
if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
|
||||
ret = -ENODEV;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
|
||||
ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
|
||||
if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
|
||||
ret = -ENODEV;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
ep_intr = &intf->cur_altsetting->endpoint[2];
|
||||
if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
|
||||
ret = -ENODEV;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
dev->pipe_intr = usb_rcvintpipe(dev->udev,
|
||||
usb_endpoint_num(&ep_intr->desc));
|
||||
|
||||
ret = lan78xx_bind(dev, intf);
|
||||
if (ret < 0)
|
||||
goto out2;
|
||||
@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
|
||||
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
|
||||
|
||||
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
|
||||
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
|
||||
dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
|
||||
|
||||
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
|
||||
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
|
||||
|
||||
dev->pipe_intr = usb_rcvintpipe(dev->udev,
|
||||
dev->ep_intr->desc.bEndpointAddress &
|
||||
USB_ENDPOINT_NUMBER_MASK);
|
||||
period = dev->ep_intr->desc.bInterval;
|
||||
|
||||
period = ep_intr->desc.bInterval;
|
||||
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
|
||||
buf = kmalloc(maxp, GFP_KERNEL);
|
||||
if (buf) {
|
||||
@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf,
|
||||
usb_fill_int_urb(dev->urb_intr, dev->udev,
|
||||
dev->pipe_intr, buf, maxp,
|
||||
intr_complete, dev, period);
|
||||
dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
for (h = 0; h < FDB_HASH_SIZE; ++h) {
|
||||
struct vxlan_fdb *f;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
|
||||
struct vxlan_rdst *rd;
|
||||
|
||||
@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
cb->nlh->nlmsg_seq,
|
||||
RTM_NEWNEIGH,
|
||||
NLM_F_MULTI, NULL);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
}
|
||||
skip_nh:
|
||||
*idx += 1;
|
||||
continue;
|
||||
@ -1403,12 +1406,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
cb->nlh->nlmsg_seq,
|
||||
RTM_NEWNEIGH,
|
||||
NLM_F_MULTI, rd);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
}
|
||||
skip:
|
||||
*idx += 1;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
|
||||
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
|
||||
continue;
|
||||
/* the all_zeros_mac entry is deleted at vxlan_uninit */
|
||||
if (!is_zero_ether_addr(f->eth_addr))
|
||||
vxlan_fdb_destroy(vxlan, f, true, true);
|
||||
if (is_zero_ether_addr(f->eth_addr) &&
|
||||
f->vni == vxlan->cfg.vni)
|
||||
continue;
|
||||
vxlan_fdb_destroy(vxlan, f, true, true);
|
||||
}
|
||||
spin_unlock_bh(&vxlan->hash_lock[h]);
|
||||
}
|
||||
|
@ -1102,6 +1102,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
||||
int pos;
|
||||
int len;
|
||||
|
||||
if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
|
||||
return 0;
|
||||
|
||||
c.identify.opcode = nvme_admin_identify;
|
||||
c.identify.nsid = cpu_to_le32(nsid);
|
||||
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
|
||||
@ -1115,18 +1118,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
||||
if (status) {
|
||||
dev_warn(ctrl->device,
|
||||
"Identify Descriptors failed (%d)\n", status);
|
||||
/*
|
||||
* Don't treat non-retryable errors as fatal, as we potentially
|
||||
* already have a NGUID or EUI-64. If we failed with DNR set,
|
||||
* we want to silently ignore the error as we can still
|
||||
* identify the device, but if the status has DNR set, we want
|
||||
* to propagate the error back specifically for the disk
|
||||
* revalidation flow to make sure we don't abandon the
|
||||
* device just because of a temporal retry-able error (such
|
||||
* as path of transport errors).
|
||||
*/
|
||||
if (status > 0 && (status & NVME_SC_DNR))
|
||||
status = 0;
|
||||
goto free_data;
|
||||
}
|
||||
|
||||
|
@ -129,6 +129,13 @@ enum nvme_quirks {
|
||||
* Don't change the value of the temperature threshold feature
|
||||
*/
|
||||
NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
|
||||
|
||||
/*
|
||||
* The controller doesn't handle the Identify Namespace
|
||||
* Identification Descriptor list subcommand despite claiming
|
||||
* NVMe 1.3 compliance.
|
||||
*/
|
||||
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -3099,6 +3099,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
|
||||
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
|
||||
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
|
||||
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
|
||||
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
|
||||
@ -3122,6 +3124,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
|
||||
|
@ -1382,6 +1382,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
|
||||
if (nctrl->opts->tos >= 0)
|
||||
ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
|
||||
|
||||
/* Set 10 seconds timeout for icresp recvmsg */
|
||||
queue->sock->sk->sk_rcvtimeo = 10 * HZ;
|
||||
|
||||
queue->sock->sk->sk_allocation = GFP_ATOMIC;
|
||||
nvme_tcp_set_queue_io_cpu(queue);
|
||||
queue->request = NULL;
|
||||
|
@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
|
||||
|
||||
static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
|
||||
{
|
||||
pci_info(dev, "Disabling ASPM L0s/L1\n");
|
||||
pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
|
||||
}
|
||||
|
||||
/*
|
||||
* ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
|
||||
* upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
|
||||
* disable both L0s and L1 for now to be safe.
|
||||
*/
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
|
||||
|
||||
/*
|
||||
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
|
||||
* Link bit cleared after starting the link retrain process to allow this
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user