Merge android-5.4.8 (de197c5
) into msm-5.4
* refs/heads/tmp-de197c5: Linux 5.4.8 mm/hugetlbfs: fix for_each_hstate() loop in init_hugetlbfs_fs() mmc: sdhci-of-esdhc: re-implement erratum A-009204 workaround mmc: sdhci-of-esdhc: fix up erratum A-008171 workaround vhost/vsock: accept only packets with the right dst_cid net: ena: fix napi handler misbehavior when the napi budget is zero net: phylink: fix interface passed to mac_link_up ipv6/addrconf: only check invalid header values when NETLINK_F_STRICT_CHK is set bnxt: apply computed clamp value for coalece parameter gtp: do not allow adding duplicate tid and ms_addr pdp context gtp: fix an use-after-free in ipv4_pdp_find() hv_netvsc: Fix tx_table init in rndis_set_subchannel() tcp/dccp: fix possible race __inet_lookup_established() tcp: do not send empty skb from tcp_write_xmit() bonding: fix active-backup transition after link failure gtp: avoid zero size hashtable gtp: fix wrong condition in gtp_genl_dump_pdp() net: marvell: mvpp2: phylink requires the link interrupt net: dsa: sja1105: Reconcile the meaning of TPID and TPID2 for E/T and P/Q/R/S net/dst: do not confirm neighbor for vxlan and geneve pmtu update sit: do not confirm neighbor when do pmtu update vti: do not confirm neighbor when do pmtu update tunnel: do not confirm neighbor when do pmtu update net/dst: add new function skb_dst_update_pmtu_no_confirm gtp: do not confirm neighbor when do pmtu update ip6_gre: do not confirm neighbor when do pmtu update net: add bool confirm_neigh parameter for dst_ops.update_pmtu mlxsw: spectrum: Use dedicated policer for VRRP packets mlxsw: spectrum_router: Skip loopback RIFs during MAC validation bnxt_en: Add missing devlink health reporters for VFs. bnxt_en: Fix the logic that creates the health reporters. bnxt_en: Remove unnecessary NULL checks for fw_health bnxt_en: Fix bp->fw_health allocation and free logic. bnxt_en: Return error if FW returns more data than dump length bnxt_en: Free context memory in the open path if firmware has been reset. bnxt_en: Fix MSIX request logic for RDMA driver. udp: fix integer overflow while computing available space in sk_rcvbuf tcp: Fix highest_sack and highest_sack_seq ptp: fix the race between the release of ptp_clock and cdev net: stmmac: dwmac-meson8b: Fix the RGMII TX delay on Meson8b/8m2 SoCs net_sched: sch_fq: properly set sk->sk_pacing_status net/sched: add delete_empty() to filters and use it in cls_flower net/sched: act_mirred: Pull mac prior redir to non mac_header_xmit device net: phy: aquantia: add suspend / resume ops for AQR105 net/mlxfw: Fix out-of-memory error in mfa2 flash burning net: dsa: bcm_sf2: Fix IP fragment location and behavior cxgb4/cxgb4vf: fix flow control display for auto negotiation xfs: fix mount failure crash on invalid iclog memory access drm: limit to INT_MAX in create_blob ioctl uaccess: disallow > INT_MAX copy sizes tomoyo: Don't use nifty names on sockets. hrtimer: Annotate lockless access to timer->state net: icmp: fix data-race in cmp_global_allow() net: add a READ_ONCE() in skb_peek_tail() inetpeer: fix data-race in inet_putpeer / inet_putpeer netfilter: bridge: make sure to pull arp header in br_nf_forward_arp() net/smc: add fallback check to connect() powerpc: Fix __clear_user() with KUAP enabled 6pack,mkiss: fix possible deadlock netfilter: ebtables: compat: reject all padding in matches/watchers Revert "iwlwifi: assign directly to iwl_trans->cfg in QuZ detection" md: make sure desc_nr less than MD_SB_DISKS sctp: fix err handling of stream initialization Revert "powerpc/vcpu: Assume dedicated processors as non-preempt" userfaultfd: require CAP_SYS_PTRACE for UFFD_FEATURE_EVENT_FORK kernel: sysctl: make drop_caches write-only mm/hugetlbfs: fix error handling when setting up mounts selftests: vm: add fragment CONFIG_TEST_VMALLOC s390: disable preemption when switching to nodat stack with CALL_ON_STACK mailbox: imx: Fix Tx doorbell shutdown path ocfs2: fix passing zero to 'PTR_ERR' warning s390/cpum_sf: Check for SDBT and SDB consistency s390/unwind: filter out unreliable bogus %r14 libfdt: define INT32_MAX and UINT32_MAX in libfdt_env.h mailbox: imx: Clear the right interrupts at shutdown s390/zcrypt: handle new reply code FILTERED_BY_HYPERVISOR perf regs: Make perf_reg_name() return "unknown" instead of NULL perf script: Fix brstackinsn for AUXTRACE perf diff: Use llabs() with 64-bit values cifs: move cifsFileInfo_put logic into a work-queue cdrom: respect device capabilities during opening action of: unittest: fix memory leak in attach_node_and_children io_uring: io_allocate_scq_urings() should return a sane state um: virtio: Keep reading on -EAGAIN cifs: Fix use-after-free bug in cifs_reconnect() powerpc: Don't add -mabi= flags when building with Clang scripts/kallsyms: fix definitely-lost memory leak drm/amdgpu: Call find_vma under mmap_sem apparmor: fix unsigned len comparison with less than zero Drivers: hv: vmbus: Fix crash handler reset of Hyper-V synic tools/power/x86/intel-speed-select: Ignore missing config level gpio: lynxpoint: Setup correct IRQ handlers gpio: mpc8xxx: Don't overwrite default irq_set_type callback platform/x86: intel_pmc_core: Add Comet Lake (CML) platform support to intel_pmc_core driver platform/x86: intel_pmc_core: Fix the SoC naming inconsistency gpio/mpc8xxx: fix qoriq GPIO reading habanalabs: skip VA block list update in reset flow f2fs: Fix deadlock in f2fs_gc() context during atomic files handling scsi: target: iscsi: Wait for all commands to finish before freeing a session scsi: iscsi: Don't send data to unbound connection scsi: ufs: Fix up auto hibern8 enablement scsi: target: core: Release SPC-2 reservations when closing a session scsi: NCR5380: Add disconnect_mask module parameter scsi: scsi_debug: num_tgts must be >= 0 scsi: ufs: Fix error handing during hibern8 enter scsi: pm80xx: Fix for SATA device discovery powerpc/fixmap: Use __fix_to_virt() instead of fix_to_virt() watchdog: Fix the race between the release of watchdog_core_data and cdev watchdog: prevent deferral of watchdogd wakeup on RT watchdog: imx7ulp: Fix reboot hang HID: rmi: Check that the RMI_STARTED bit is set before unregistering the RMI transport device HID: Improve Windows Precision Touchpad detection. libnvdimm/btt: fix variable 'rc' set but not used ARM: 8937/1: spectre-v2: remove Brahma-B53 from hardening HID: i2c-hid: fix no irq after reset on raydium 3118 HID: logitech-hidpp: Silence intermittent get_battery_capacity errors dt-bindings: Improve validation build error handling HID: quirks: Add quirk for HP MSU1465 PIXART OEM mouse bcache: at least try to shrink 1 node in bch_mca_scan() clk: pxa: fix one of the pxa RTC clocks scsi: atari_scsi: sun3_scsi: Set sg_tablesize to 1 instead of SG_NONE powerpc/book3s/mm: Update Oops message to print the correct translation in use powerpc/eeh: differentiate duplicate detection message powerpc/security: Fix wrong message when RFI Flush is disable PCI: rpaphp: Correctly match ibm, my-drc-index to drc-name when using drc-info PCI: rpaphp: Annotate and correctly byte swap DRC properties PCI: rpaphp: Don't rely on firmware feature to imply drc-info support powerpc/pseries/cmm: Implement release() function for sysfs device scsi: ufs: fix potential bug which ends in system hang PCI: rpaphp: Fix up pointer to first drc-info entry scsi: zorro_esp: Limit DMA transfers to 65536 bytes (except on Fastlane) scsi: lpfc: fix: Coverity: lpfc_cmpl_els_rsp(): Null pointer dereferences Input: ili210x - handle errors from input_mt_init_slots() iomap: fix return value of iomap_dio_bio_actor on 32bit systems i2c: stm32f7: fix & reorder remove & probe error handling iommu/arm-smmu-v3: Don't display an error when IRQ lines are missing fs/quota: handle overflows of sysctl fs.quota.* and report as unsigned long dma-direct: check for overflows on 32 bit DMA addresses irqchip: ingenic: Error out if IRQ domain creation failed irqchip/irq-bcm7038-l1: Enable parent IRQ if necessary clk: clk-gpio: propagate rate change to parent clk: qcom: Allow constant ratio freq tables for rcg clk: qcom: smd: Add missing pnoc clock f2fs: fix to update dir's i_pino during cross_rename scsi: lpfc: Fix duplicate unreg_rpi error in port offline flow scsi: lpfc: Fix unexpected error messages during RSCN handling scsi: tracing: Fix handling of TRANSFER LENGTH == 0 for READ(6) and WRITE(6) jbd2: Fix statistics for the number of logged blocks ext4: iomap that extends beyond EOF should be marked dirty ext4: update direct I/O read lock pattern for IOCB_NOWAIT powerpc/book3s64/hash: Add cond_resched to avoid soft lockup warning powerpc/security/book3s64: Report L1TF status in sysfs selftests/powerpc: Skip tm-signal-sigreturn-nt if TM not available dtc: Use pkg-config to locate libyaml clocksource/drivers/timer-of: Use unique device name instead of timer clocksource/drivers/asm9260: Add a check for of_clk_get leds: trigger: netdev: fix handling on interface rename leds: an30259a: add a check for devm_regmap_init_i2c leds: lm3692x: Handle failure to probe the regulator dmaengine: fsl-qdma: Handle invalid qdma-queue0 IRQ dma-mapping: fix handling of dma-ranges for reserved memory (again) dma-mapping: Add vmap checks to dma_map_single() dma-debug: add a schedule point in debug_dma_dump_mappings() powerpc/tools: Don't quote $objdump in scripts selftests/powerpc: Fixup clobbers for TM tests Input: st1232 - do not reset the chip too early powerpc/pseries: Don't fail hash page table insert for bolted mapping powerpc/pseries: Mark accumulate_stolen_time() as notrace scsi: hisi_sas: Delete the debugfs folder of hisi_sas when the probe fails scsi: hisi_sas: Replace in_softirq() check in hisi_sas_task_exec() scsi: csiostor: Don't enable IRQs too early scsi: lpfc: Fix SLI3 hba in loop mode not discovering devices scsi: lpfc: Fix hardlockup in lpfc_abort_handler scsi: target: compare full CHAP_A Algorithm strings dmaengine: xilinx_dma: Clear desc_pendingcount in xilinx_dma_reset iommu/tegra-smmu: Fix page tables in > 4 GiB memory iommu: rockchip: Free domain on .domain_free platform/x86: peaq-wmi: switch to using polled mode of input devices tools/power/x86/intel-speed-select: Remove warning for unused result powerpc/papr_scm: Fix an off-by-one check in papr_scm_meta_{get, set} f2fs: fix to update time in lazytime mode Input: atmel_mxt_ts - disable IRQ across suspend scsi: lpfc: Fix list corruption in lpfc_sli_get_iocbq gpio: mxc: Only get the second IRQ when there is more than one IRQ scsi: mpt3sas: Reject NVMe Encap cmnds to unsupported HBA scsi: lpfc: Fix locking on mailbox command completion scsi: mpt3sas: Fix clear pending bit in ioctl status scsi: lpfc: Fix discovery failures when target device connectivity bounces scsi: lpfc: Fix spinlock_irq issues in lpfc_els_flush_cmd() Revert "MIPS: futex: Emit Loongson3 sync workarounds within asm" Revert "MIPS: futex: Restore \n after sync instructions" UPSTREAM: exit: panic before exit_mm() on global init exit ANDROID: serdev: Fix platform device support Conflicts: Documentation/devicetree/bindings Documentation/devicetree/bindings/Makefile kernel/time/hrtimer.c Change-Id: I271162549a080e2b747572f5c87cfd8fa111da51 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
commit
2b9f49ee80
@ -130,11 +130,13 @@ binding schema. All of the DT binding documents can be validated using the
|
||||
|
||||
make dt_binding_check
|
||||
|
||||
In order to perform validation of DT source files, use the `dtbs_check` target::
|
||||
In order to perform validation of DT source files, use the ``dtbs_check`` target::
|
||||
|
||||
make dtbs_check
|
||||
|
||||
This will first run the `dt_binding_check` which generates the processed schema.
|
||||
Note that ``dtbs_check`` will skip any binding schema files with errors. It is
|
||||
necessary to use ``dt_binding_check`` to get all the validation errors in the
|
||||
binding schema files.
|
||||
|
||||
It is also possible to run checks with a single schema file by setting the
|
||||
``DT_SCHEMA_FILES`` variable to a specific schema file.
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 7
|
||||
SUBLEVEL = 8
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,11 +2,13 @@
|
||||
#ifndef _ARM_LIBFDT_ENV_H
|
||||
#define _ARM_LIBFDT_ENV_H
|
||||
|
||||
#include <linux/limits.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define INT_MAX ((int)(~0U>>1))
|
||||
#define INT32_MAX S32_MAX
|
||||
#define UINT32_MAX U32_MAX
|
||||
|
||||
typedef __be16 fdt16_t;
|
||||
typedef __be32 fdt32_t;
|
||||
|
@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
|
||||
unsigned long attrs)
|
||||
|
||||
{
|
||||
void *ret = dma_alloc_from_global_coherent(size, dma_handle);
|
||||
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
|
||||
|
||||
/*
|
||||
* dma_alloc_from_global_coherent() may fail because:
|
||||
|
@ -65,6 +65,9 @@ static void cpu_v7_spectre_init(void)
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_ARM_PSCI
|
||||
case ARM_CPU_PART_BRAHMA_B53:
|
||||
/* Requires no workaround */
|
||||
break;
|
||||
default:
|
||||
/* Other ARM CPUs require no workaround */
|
||||
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
|
||||
|
@ -218,14 +218,13 @@
|
||||
* ordering will be done by smp_llsc_mb() and friends.
|
||||
*/
|
||||
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
|
||||
# define __WEAK_LLSC_MB sync
|
||||
# define smp_llsc_mb() \
|
||||
__asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
|
||||
# define __LLSC_CLOBBER
|
||||
#define __WEAK_LLSC_MB " sync \n"
|
||||
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
||||
#define __LLSC_CLOBBER
|
||||
#else
|
||||
# define __WEAK_LLSC_MB
|
||||
# define smp_llsc_mb() do { } while (0)
|
||||
# define __LLSC_CLOBBER "memory"
|
||||
#define __WEAK_LLSC_MB " \n"
|
||||
#define smp_llsc_mb() do { } while (0)
|
||||
#define __LLSC_CLOBBER "memory"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/sync.h>
|
||||
#include <asm/war.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
@ -33,7 +32,7 @@
|
||||
" .set arch=r4000 \n" \
|
||||
"2: sc $1, %2 \n" \
|
||||
" beqzl $1, 1b \n" \
|
||||
__stringify(__WEAK_LLSC_MB) " \n" \
|
||||
__WEAK_LLSC_MB \
|
||||
"3: \n" \
|
||||
" .insn \n" \
|
||||
" .set pop \n" \
|
||||
@ -51,19 +50,19 @@
|
||||
"i" (-EFAULT) \
|
||||
: "memory"); \
|
||||
} else if (cpu_has_llsc) { \
|
||||
loongson_llsc_mb(); \
|
||||
__asm__ __volatile__( \
|
||||
" .set push \n" \
|
||||
" .set noat \n" \
|
||||
" .set push \n" \
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
|
||||
" " __SYNC(full, loongson3_war) " \n" \
|
||||
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
|
||||
" .set pop \n" \
|
||||
" " insn " \n" \
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
|
||||
"2: "user_sc("$1", "%2")" \n" \
|
||||
" beqz $1, 1b \n" \
|
||||
__stringify(__WEAK_LLSC_MB) " \n" \
|
||||
__WEAK_LLSC_MB \
|
||||
"3: \n" \
|
||||
" .insn \n" \
|
||||
" .set pop \n" \
|
||||
@ -148,7 +147,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
" .set arch=r4000 \n"
|
||||
"2: sc $1, %2 \n"
|
||||
" beqzl $1, 1b \n"
|
||||
__stringify(__WEAK_LLSC_MB) " \n"
|
||||
__WEAK_LLSC_MB
|
||||
"3: \n"
|
||||
" .insn \n"
|
||||
" .set pop \n"
|
||||
@ -165,13 +164,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
"i" (-EFAULT)
|
||||
: "memory");
|
||||
} else if (cpu_has_llsc) {
|
||||
loongson_llsc_mb();
|
||||
__asm__ __volatile__(
|
||||
"# futex_atomic_cmpxchg_inatomic \n"
|
||||
" .set push \n"
|
||||
" .set noat \n"
|
||||
" .set push \n"
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
||||
" " __SYNC(full, loongson3_war) " \n"
|
||||
"1: "user_ll("%1", "%3")" \n"
|
||||
" bne %1, %z4, 3f \n"
|
||||
" .set pop \n"
|
||||
@ -179,7 +178,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
||||
"2: "user_sc("$1", "%2")" \n"
|
||||
" beqz $1, 1b \n"
|
||||
"3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
|
||||
__WEAK_LLSC_MB
|
||||
"3: \n"
|
||||
" .insn \n"
|
||||
" .set pop \n"
|
||||
" .section .fixup,\"ax\" \n"
|
||||
@ -194,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
|
||||
"i" (-EFAULT)
|
||||
: "memory");
|
||||
loongson_llsc_mb();
|
||||
} else
|
||||
return -ENOSYS;
|
||||
|
||||
|
@ -91,11 +91,13 @@ MULTIPLEWORD := -mmultiple
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
ifndef CONFIG_CC_IS_CLANG
|
||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
|
||||
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
||||
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef CONFIG_CC_IS_CLANG
|
||||
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
|
||||
@ -141,6 +143,7 @@ endif
|
||||
endif
|
||||
|
||||
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
|
||||
ifndef CONFIG_CC_IS_CLANG
|
||||
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
|
||||
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
|
||||
@ -149,6 +152,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
|
||||
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
||||
endif
|
||||
endif
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
|
||||
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <string.h>
|
||||
|
||||
#define INT_MAX ((int)(~0U>>1))
|
||||
#define UINT32_MAX ((u32)~0U)
|
||||
#define INT32_MAX ((s32)(UINT32_MAX >> 1))
|
||||
|
||||
#include "of.h"
|
||||
|
||||
|
@ -77,7 +77,12 @@ enum fixed_addresses {
|
||||
static inline void __set_fixmap(enum fixed_addresses idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
map_kernel_page(fix_to_virt(idx), phys, flags);
|
||||
if (__builtin_constant_p(idx))
|
||||
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
|
||||
else if (WARN_ON(idx >= __end_of_fixed_addresses))
|
||||
return;
|
||||
|
||||
map_kernel_page(__fix_to_virt(idx), phys, flags);
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@ -36,12 +36,10 @@
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
DECLARE_STATIC_KEY_FALSE(shared_processor);
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
{
|
||||
if (!static_branch_unlikely(&shared_processor))
|
||||
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
||||
return false;
|
||||
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
|
||||
return n;
|
||||
}
|
||||
|
||||
extern unsigned long __clear_user(void __user *addr, unsigned long size);
|
||||
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
|
||||
|
||||
static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
||||
might_fault();
|
||||
if (likely(access_ok(addr, size))) {
|
||||
allow_write_to_user(addr, size);
|
||||
ret = __clear_user(addr, size);
|
||||
ret = __arch_clear_user(addr, size);
|
||||
prevent_write_to_user(addr, size);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
return clear_user(addr, size);
|
||||
}
|
||||
|
||||
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
|
@ -897,12 +897,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
||||
|
||||
/* Log the event */
|
||||
if (pe->type & EEH_PE_PHB) {
|
||||
pr_err("EEH: PHB#%x failure detected, location: %s\n",
|
||||
pr_err("EEH: Recovering PHB#%x, location: %s\n",
|
||||
pe->phb->global_number, eeh_pe_loc_get(pe));
|
||||
} else {
|
||||
struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
|
||||
|
||||
pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
|
||||
pr_err("EEH: Recovering PHB#%x-PE#%x\n",
|
||||
pe->phb->global_number, pe->addr);
|
||||
pr_err("EEH: PE location: %s, PHB location: %s\n",
|
||||
eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
|
||||
|
@ -142,32 +142,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
|
||||
|
||||
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
|
||||
|
||||
if (rfi_flush || thread_priv) {
|
||||
if (rfi_flush) {
|
||||
struct seq_buf s;
|
||||
seq_buf_init(&s, buf, PAGE_SIZE - 1);
|
||||
|
||||
seq_buf_printf(&s, "Mitigation: ");
|
||||
|
||||
if (rfi_flush)
|
||||
seq_buf_printf(&s, "RFI Flush");
|
||||
|
||||
if (rfi_flush && thread_priv)
|
||||
seq_buf_printf(&s, ", ");
|
||||
|
||||
seq_buf_printf(&s, "Mitigation: RFI Flush");
|
||||
if (thread_priv)
|
||||
seq_buf_printf(&s, "L1D private per thread");
|
||||
seq_buf_printf(&s, ", L1D private per thread");
|
||||
|
||||
seq_buf_printf(&s, "\n");
|
||||
|
||||
return s.len;
|
||||
}
|
||||
|
||||
if (thread_priv)
|
||||
return sprintf(buf, "Vulnerable: L1D private per thread\n");
|
||||
|
||||
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
|
||||
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_meltdown(dev, attr, buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
@ -232,7 +232,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
|
||||
* Accumulate stolen time by scanning the dispatch trace log.
|
||||
* Called on entry from user mode.
|
||||
*/
|
||||
void accumulate_stolen_time(void)
|
||||
void notrace accumulate_stolen_time(void)
|
||||
{
|
||||
u64 sst, ust;
|
||||
unsigned long save_irq_soft_mask = irq_soft_mask_return();
|
||||
|
@ -250,15 +250,22 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
|
||||
}
|
||||
NOKPROBE_SYMBOL(oops_end);
|
||||
|
||||
static char *get_mmu_str(void)
|
||||
{
|
||||
if (early_radix_enabled())
|
||||
return " MMU=Radix";
|
||||
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
|
||||
return " MMU=Hash";
|
||||
return "";
|
||||
}
|
||||
|
||||
static int __die(const char *str, struct pt_regs *regs, long err)
|
||||
{
|
||||
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
|
||||
|
||||
printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
|
||||
printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
|
||||
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
|
||||
PAGE_SIZE / 1024,
|
||||
early_radix_enabled() ? " MMU=Radix" : "",
|
||||
early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
|
||||
PAGE_SIZE / 1024, get_mmu_str(),
|
||||
IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
|
||||
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
|
||||
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
|
||||
|
@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
|
||||
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
|
||||
CACHELINE_MASK = (L1_CACHE_BYTES-1)
|
||||
|
||||
_GLOBAL(__clear_user)
|
||||
_GLOBAL(__arch_clear_user)
|
||||
/*
|
||||
* Use dcbz on the complete cache lines in the destination
|
||||
* to set them to zero. This requires that the destination
|
||||
@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
|
||||
EX_TABLE(8b, 91b)
|
||||
EX_TABLE(9b, 91b)
|
||||
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
EXPORT_SYMBOL(__arch_clear_user)
|
||||
|
@ -17,7 +17,7 @@ PPC64_CACHES:
|
||||
.section ".text"
|
||||
|
||||
/**
|
||||
* __clear_user: - Zero a block of memory in user space, with less checking.
|
||||
* __arch_clear_user: - Zero a block of memory in user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @n: Number of bytes to zero.
|
||||
*
|
||||
@ -58,7 +58,7 @@ err3; stb r0,0(r3)
|
||||
mr r3,r4
|
||||
blr
|
||||
|
||||
_GLOBAL_TOC(__clear_user)
|
||||
_GLOBAL_TOC(__arch_clear_user)
|
||||
cmpdi r4,32
|
||||
neg r6,r3
|
||||
li r0,0
|
||||
@ -181,4 +181,4 @@ err1; dcbz 0,r3
|
||||
cmpdi r4,32
|
||||
blt .Lshort_clear
|
||||
b .Lmedium_clear
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
EXPORT_SYMBOL(__arch_clear_user)
|
||||
|
@ -294,10 +294,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
||||
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
|
||||
HPTE_V_BOLTED, psize, psize,
|
||||
ssize);
|
||||
|
||||
if (ret == -1) {
|
||||
/* Try to remove a non bolted entry */
|
||||
ret = mmu_hash_ops.hpte_remove(hpteg);
|
||||
if (ret != -1)
|
||||
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
|
||||
HPTE_V_BOLTED, psize, psize,
|
||||
ssize);
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
if (debug_pagealloc_enabled() &&
|
||||
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
|
||||
|
@ -411,6 +411,10 @@ static struct bus_type cmm_subsys = {
|
||||
.dev_name = "cmm",
|
||||
};
|
||||
|
||||
static void cmm_release_device(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* cmm_sysfs_register - Register with sysfs
|
||||
*
|
||||
@ -426,6 +430,7 @@ static int cmm_sysfs_register(struct device *dev)
|
||||
|
||||
dev->id = 0;
|
||||
dev->bus = &cmm_subsys;
|
||||
dev->release = cmm_release_device;
|
||||
|
||||
if ((rc = device_register(dev)))
|
||||
goto subsys_unregister;
|
||||
|
@ -152,7 +152,7 @@ static int papr_scm_meta_get(struct papr_scm_priv *p,
|
||||
int len, read;
|
||||
int64_t ret;
|
||||
|
||||
if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
|
||||
if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
|
||||
return -EINVAL;
|
||||
|
||||
for (len = hdr->in_length; len; len -= read) {
|
||||
@ -206,7 +206,7 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
|
||||
__be64 data_be;
|
||||
int64_t ret;
|
||||
|
||||
if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
|
||||
if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
|
||||
return -EINVAL;
|
||||
|
||||
for (len = hdr->in_length; len; len -= wrote) {
|
||||
|
@ -74,9 +74,6 @@
|
||||
#include "pseries.h"
|
||||
#include "../../../../drivers/pci/pci.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(shared_processor);
|
||||
EXPORT_SYMBOL_GPL(shared_processor);
|
||||
|
||||
int CMO_PrPSP = -1;
|
||||
int CMO_SecPSP = -1;
|
||||
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
|
||||
@ -761,10 +758,6 @@ static void __init pSeries_setup_arch(void)
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
vpa_init(boot_cpuid);
|
||||
|
||||
if (lppaca_shared_proc(get_lppaca()))
|
||||
static_branch_enable(&shared_processor);
|
||||
|
||||
ppc_md.power_save = pseries_lpar_idle;
|
||||
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
|
@ -20,7 +20,7 @@ objdump="$1"
|
||||
vmlinux="$2"
|
||||
|
||||
bad_relocs=$(
|
||||
"$objdump" -R "$vmlinux" |
|
||||
$objdump -R "$vmlinux" |
|
||||
# Only look at relocation lines.
|
||||
grep -E '\<R_' |
|
||||
# These relocations are okay
|
||||
|
@ -18,14 +18,14 @@ vmlinux="$2"
|
||||
#__end_interrupts should be located within the first 64K
|
||||
|
||||
end_intr=0x$(
|
||||
"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
|
||||
$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
|
||||
--stop-address=0xc000000000010000 |
|
||||
grep '\<__end_interrupts>:' |
|
||||
awk '{print $1}'
|
||||
)
|
||||
|
||||
BRANCHES=$(
|
||||
"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
|
||||
$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
|
||||
--stop-address=${end_intr} |
|
||||
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
|
||||
grep -v '\<__start_initialization_multiplatform>' |
|
||||
|
@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image)
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
int rc;
|
||||
|
||||
preempt_disable();
|
||||
rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
|
||||
preempt_enable();
|
||||
return rc == 0;
|
||||
#else
|
||||
return false;
|
||||
|
@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
|
||||
unsigned long num_sdb, gfp_t gfp_flags)
|
||||
{
|
||||
int i, rc;
|
||||
unsigned long *new, *tail;
|
||||
unsigned long *new, *tail, *tail_prev = NULL;
|
||||
|
||||
if (!sfb->sdbt || !sfb->tail)
|
||||
return -EINVAL;
|
||||
@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
|
||||
sfb->num_sdbt++;
|
||||
/* Link current page to tail of chain */
|
||||
*tail = (unsigned long)(void *) new + 1;
|
||||
tail_prev = tail;
|
||||
tail = new;
|
||||
}
|
||||
|
||||
@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
|
||||
* issue, a new realloc call (if required) might succeed.
|
||||
*/
|
||||
rc = alloc_sample_data_block(tail, gfp_flags);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
/* Undo last SDBT. An SDBT with no SDB at its first
|
||||
* entry but with an SDBT entry instead can not be
|
||||
* handled by the interrupt handler code.
|
||||
* Avoid this situation.
|
||||
*/
|
||||
if (tail_prev) {
|
||||
sfb->num_sdbt--;
|
||||
free_page((unsigned long) new);
|
||||
tail = tail_prev;
|
||||
}
|
||||
break;
|
||||
}
|
||||
sfb->num_sdb++;
|
||||
tail++;
|
||||
tail_prev = new = NULL; /* Allocated at least one SBD */
|
||||
}
|
||||
|
||||
/* Link sampling buffer to its origin */
|
||||
|
@ -60,6 +60,11 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
|
||||
reliable = false;
|
||||
regs = NULL;
|
||||
if (!__kernel_text_address(ip)) {
|
||||
/* skip bogus %r14 */
|
||||
state->regs = NULL;
|
||||
return unwind_next_frame(state);
|
||||
}
|
||||
} else {
|
||||
sf = (struct stack_frame *) state->sp;
|
||||
sp = READ_ONCE_NOCHECK(sf->back_chain);
|
||||
|
@ -119,9 +119,15 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
|
||||
*/
|
||||
int memcpy_real(void *dest, void *src, size_t count)
|
||||
{
|
||||
if (S390_lowcore.nodat_stack != 0)
|
||||
return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
|
||||
3, dest, src, count);
|
||||
int rc;
|
||||
|
||||
if (S390_lowcore.nodat_stack != 0) {
|
||||
preempt_disable();
|
||||
rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
|
||||
dest, src, count);
|
||||
preempt_enable();
|
||||
return rc;
|
||||
}
|
||||
/*
|
||||
* This is a really early memcpy_real call, the stacks are
|
||||
* not set up yet. Just call _memcpy_real on the early boot
|
||||
|
@ -83,7 +83,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int full_read(int fd, void *buf, int len)
|
||||
static int full_read(int fd, void *buf, int len, bool abortable)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@ -93,7 +93,7 @@ static int full_read(int fd, void *buf, int len)
|
||||
buf += rc;
|
||||
len -= rc;
|
||||
}
|
||||
} while (len && (rc > 0 || rc == -EINTR));
|
||||
} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
@ -104,7 +104,7 @@ static int full_read(int fd, void *buf, int len)
|
||||
|
||||
static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
|
||||
{
|
||||
return full_read(fd, msg, sizeof(msg->header));
|
||||
return full_read(fd, msg, sizeof(msg->header), true);
|
||||
}
|
||||
|
||||
static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
|
||||
@ -118,7 +118,7 @@ static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
|
||||
size = msg->header.size;
|
||||
if (size > max_payload_size)
|
||||
return -EPROTO;
|
||||
return full_read(fd, &msg->payload, size);
|
||||
return full_read(fd, &msg->payload, size, false);
|
||||
}
|
||||
|
||||
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
|
||||
|
@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
|
||||
tracks->xa = 0;
|
||||
tracks->error = 0;
|
||||
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
|
||||
|
||||
if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
|
||||
tracks->error = CDS_NO_INFO;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Grab the TOC header so we can see how many tracks there are */
|
||||
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
|
||||
if (ret) {
|
||||
@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
|
||||
ret = open_for_data(cdi);
|
||||
if (ret)
|
||||
goto err;
|
||||
cdrom_mmc3_profile(cdi);
|
||||
if (CDROM_CAN(CDC_GENERIC_PACKET))
|
||||
cdrom_mmc3_profile(cdi);
|
||||
if (mode & FMODE_WRITE) {
|
||||
ret = -EROFS;
|
||||
if (cdrom_open_write(cdi))
|
||||
@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
|
||||
it doesn't give enough information or fails. then we return
|
||||
the toc contents. */
|
||||
use_toc:
|
||||
if (!CDROM_CAN(CDC_PLAY_AUDIO))
|
||||
return -ENOSYS;
|
||||
|
||||
toc.cdte_format = CDROM_MSF;
|
||||
toc.cdte_track = CDROM_LEADOUT;
|
||||
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
|
||||
|
@ -280,7 +280,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
|
||||
else
|
||||
clk = clk_register_gpio_gate(&pdev->dev, node->name,
|
||||
parent_names ? parent_names[0] : NULL, gpiod,
|
||||
0);
|
||||
CLK_SET_RATE_PARENT);
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
|
@ -459,6 +459,7 @@ struct dummy_clk {
|
||||
};
|
||||
static struct dummy_clk dummy_clks[] __initdata = {
|
||||
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
|
||||
DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
|
||||
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
|
||||
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
|
||||
};
|
||||
|
@ -343,6 +343,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
|
||||
if (clk_flags & CLK_SET_RATE_PARENT) {
|
||||
rate = f->freq;
|
||||
if (f->pre_div) {
|
||||
if (!rate)
|
||||
rate = req->rate;
|
||||
rate /= 2;
|
||||
rate *= f->pre_div + 1;
|
||||
}
|
||||
|
@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
|
||||
};
|
||||
|
||||
/* msm8998 */
|
||||
DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
|
||||
DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
|
||||
DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
|
||||
DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
|
||||
@ -670,6 +671,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
|
||||
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
|
||||
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
|
||||
static struct clk_smd_rpm *msm8998_clks[] = {
|
||||
[RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
|
||||
[RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
|
||||
[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
|
||||
[RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
|
||||
[RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
|
||||
|
@ -32,6 +32,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
|
||||
if (!f)
|
||||
return NULL;
|
||||
|
||||
if (!f->freq)
|
||||
return f;
|
||||
|
||||
for (; f->freq; f++)
|
||||
if (rate <= f->freq)
|
||||
return f;
|
||||
|
@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
|
||||
}
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk)) {
|
||||
pr_err("Failed to get clk!\n");
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret) {
|
||||
|
@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
|
||||
}
|
||||
|
||||
if (!to->clkevt.name)
|
||||
to->clkevt.name = np->name;
|
||||
to->clkevt.name = np->full_name;
|
||||
|
||||
to->np = np;
|
||||
|
||||
|
@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
|
||||
if (fsl_qdma->irq_base < 0)
|
||||
return fsl_qdma->irq_base;
|
||||
|
||||
fsl_qdma->feature = of_property_read_bool(np, "big-endian");
|
||||
INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
|
||||
|
||||
|
@ -1433,6 +1433,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
|
||||
|
||||
chan->err = false;
|
||||
chan->idle = true;
|
||||
chan->desc_pendingcount = 0;
|
||||
chan->desc_submitcount = 0;
|
||||
|
||||
return err;
|
||||
|
@ -164,6 +164,12 @@ static int lp_irq_type(struct irq_data *d, unsigned type)
|
||||
value |= TRIG_SEL_BIT | INT_INV_BIT;
|
||||
|
||||
outl(value, reg);
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_BOTH)
|
||||
irq_set_handler_locked(d, handle_edge_irq);
|
||||
else if (type & IRQ_TYPE_LEVEL_MASK)
|
||||
irq_set_handler_locked(d, handle_level_irq);
|
||||
|
||||
spin_unlock_irqrestore(&lg->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
@ -377,7 +377,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
||||
* It's assumed that only a single type of gpio controller is available
|
||||
* on the current machine, so overwriting global data is fine.
|
||||
*/
|
||||
mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
|
||||
if (devtype->irq_set_type)
|
||||
mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
|
||||
|
||||
if (devtype->gpio_dir_out)
|
||||
gc->direction_output = devtype->gpio_dir_out;
|
||||
@ -386,6 +387,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
||||
|
||||
gc->to_irq = mpc8xxx_gpio_to_irq;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,qoriq-gpio"))
|
||||
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
|
||||
|
||||
ret = gpiochip_add_data(gc, mpc8xxx_gc);
|
||||
if (ret) {
|
||||
pr_err("%pOF: GPIO chip registration failed with status %d\n",
|
||||
|
@ -411,6 +411,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct mxc_gpio_port *port;
|
||||
int irq_count;
|
||||
int irq_base;
|
||||
int err;
|
||||
|
||||
@ -426,9 +427,15 @@ static int mxc_gpio_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(port->base))
|
||||
return PTR_ERR(port->base);
|
||||
|
||||
port->irq_high = platform_get_irq(pdev, 1);
|
||||
if (port->irq_high < 0)
|
||||
port->irq_high = 0;
|
||||
irq_count = platform_irq_count(pdev);
|
||||
if (irq_count < 0)
|
||||
return irq_count;
|
||||
|
||||
if (irq_count > 1) {
|
||||
port->irq_high = platform_get_irq(pdev, 1);
|
||||
if (port->irq_high < 0)
|
||||
port->irq_high = 0;
|
||||
}
|
||||
|
||||
port->irq = platform_get_irq(pdev, 0);
|
||||
if (port->irq < 0)
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/hmm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
@ -788,7 +789,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct mm_struct *mm = gtt->usertask->mm;
|
||||
struct mm_struct *mm;
|
||||
unsigned long start = gtt->userptr;
|
||||
struct vm_area_struct *vma;
|
||||
struct hmm_range *range;
|
||||
@ -796,25 +797,14 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
uint64_t *pfns;
|
||||
int r = 0;
|
||||
|
||||
if (!mm) /* Happens during process shutdown */
|
||||
return -ESRCH;
|
||||
|
||||
if (unlikely(!mirror)) {
|
||||
DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
if (unlikely(!vma || start < vma->vm_start)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
|
||||
vma->vm_file)) {
|
||||
r = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
mm = mirror->hmm->mmu_notifier.mm;
|
||||
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
|
||||
return -ESRCH;
|
||||
|
||||
range = kzalloc(sizeof(*range), GFP_KERNEL);
|
||||
if (unlikely(!range)) {
|
||||
@ -847,6 +837,17 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, start);
|
||||
if (unlikely(!vma || start < vma->vm_start)) {
|
||||
r = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
|
||||
vma->vm_file)) {
|
||||
r = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
r = hmm_range_fault(range, 0);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
@ -865,15 +866,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
}
|
||||
|
||||
gtt->range = range;
|
||||
mmput(mm);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
up_read(&mm->mmap_sem);
|
||||
out_free_pfns:
|
||||
hmm_range_unregister(range);
|
||||
kvfree(pfns);
|
||||
out_free_ranges:
|
||||
kfree(range);
|
||||
out:
|
||||
mmput(mm);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -564,8 +564,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
|
||||
struct drm_property_blob *blob;
|
||||
int ret;
|
||||
|
||||
if (!length || length > MAX_BLOB_PROP_SIZE -
|
||||
sizeof(struct drm_property_blob))
|
||||
if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
|
||||
|
@ -781,6 +781,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
|
||||
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
|
||||
parser->global.report_size == 8)
|
||||
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
|
||||
|
||||
if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
|
||||
parser->global.report_size == 8)
|
||||
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
|
||||
}
|
||||
|
||||
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
|
||||
|
@ -573,6 +573,7 @@
|
||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
|
||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
|
||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
|
||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a
|
||||
|
||||
#define USB_VENDOR_ID_HUION 0x256c
|
||||
#define USB_DEVICE_ID_HUION_TABLET 0x006e
|
||||
@ -959,6 +960,7 @@
|
||||
|
||||
#define I2C_VENDOR_ID_RAYDIUM 0x2386
|
||||
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
|
||||
#define I2C_PRODUCT_ID_RAYDIUM_3118 0x3118
|
||||
|
||||
#define USB_VENDOR_ID_RAZER 0x1532
|
||||
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
|
||||
|
@ -1102,6 +1102,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
|
||||
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
|
||||
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
|
||||
NULL, 0, &response);
|
||||
/* Ignore these intermittent errors */
|
||||
if (ret == HIDPP_ERROR_RESOURCE_ERROR)
|
||||
return -EIO;
|
||||
if (ret > 0) {
|
||||
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
|
||||
__func__, ret);
|
||||
|
@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
|
||||
|
@ -744,7 +744,8 @@ static void rmi_remove(struct hid_device *hdev)
|
||||
{
|
||||
struct rmi_data *hdata = hid_get_drvdata(hdev);
|
||||
|
||||
if (hdata->device_flags & RMI_DEVICE) {
|
||||
if ((hdata->device_flags & RMI_DEVICE)
|
||||
&& test_bit(RMI_STARTED, &hdata->flags)) {
|
||||
clear_bit(RMI_STARTED, &hdata->flags);
|
||||
cancel_work_sync(&hdata->reset_work);
|
||||
rmi_unregister_transport_device(&hdata->xport);
|
||||
|
@ -170,6 +170,8 @@ static const struct i2c_hid_quirks {
|
||||
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
||||
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
||||
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
||||
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_BOGUS_IRQ },
|
||||
{ 0, 0 }
|
||||
|
@ -2308,7 +2308,7 @@ static void hv_crash_handler(struct pt_regs *regs)
|
||||
vmbus_connection.conn_state = DISCONNECTED;
|
||||
cpu = smp_processor_id();
|
||||
hv_stimer_cleanup(cpu);
|
||||
hv_synic_cleanup(cpu);
|
||||
hv_synic_disable_regs(cpu);
|
||||
hyperv_cleanup();
|
||||
};
|
||||
|
||||
|
@ -1985,6 +1985,11 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
|
||||
pm_runtime_set_suspended(i2c_dev->dev);
|
||||
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
|
||||
|
||||
if (i2c_dev->dma) {
|
||||
stm32_i2c_dma_free(i2c_dev->dma);
|
||||
i2c_dev->dma = NULL;
|
||||
}
|
||||
|
||||
clk_free:
|
||||
clk_disable_unprepare(i2c_dev->clk);
|
||||
|
||||
@ -1995,21 +2000,21 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
|
||||
|
||||
if (i2c_dev->dma) {
|
||||
stm32_i2c_dma_free(i2c_dev->dma);
|
||||
i2c_dev->dma = NULL;
|
||||
}
|
||||
|
||||
i2c_del_adapter(&i2c_dev->adap);
|
||||
pm_runtime_get_sync(i2c_dev->dev);
|
||||
|
||||
clk_disable_unprepare(i2c_dev->clk);
|
||||
|
||||
pm_runtime_put_noidle(i2c_dev->dev);
|
||||
pm_runtime_disable(i2c_dev->dev);
|
||||
pm_runtime_set_suspended(i2c_dev->dev);
|
||||
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
|
||||
|
||||
if (i2c_dev->dma) {
|
||||
stm32_i2c_dma_free(i2c_dev->dma);
|
||||
i2c_dev->dma = NULL;
|
||||
}
|
||||
|
||||
clk_disable_unprepare(i2c_dev->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3156,6 +3156,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
|
||||
|
||||
mutex_unlock(&input_dev->mutex);
|
||||
|
||||
disable_irq(data->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3168,6 +3170,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
|
||||
if (!input_dev)
|
||||
return 0;
|
||||
|
||||
enable_irq(data->irq);
|
||||
|
||||
mutex_lock(&input_dev->mutex);
|
||||
|
||||
if (input_dev->users)
|
||||
|
@ -334,7 +334,12 @@ static int ili210x_i2c_probe(struct i2c_client *client,
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0);
|
||||
touchscreen_parse_properties(input, true, &priv->prop);
|
||||
input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
|
||||
|
||||
error = input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
|
||||
if (error) {
|
||||
dev_err(dev, "Unable to set up slots, err: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = devm_add_action(dev, ili210x_cancel_work, priv);
|
||||
if (error)
|
||||
|
@ -149,6 +149,11 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
|
||||
gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
|
||||
}
|
||||
|
||||
static void st1232_ts_power_off(void *data)
|
||||
{
|
||||
st1232_ts_power(data, false);
|
||||
}
|
||||
|
||||
static const struct st_chip_info st1232_chip_info = {
|
||||
.have_z = true,
|
||||
.max_x = 0x31f, /* 800 - 1 */
|
||||
@ -229,6 +234,13 @@ static int st1232_ts_probe(struct i2c_client *client,
|
||||
|
||||
st1232_ts_power(ts, true);
|
||||
|
||||
error = devm_add_action_or_reset(&client->dev, st1232_ts_power_off, ts);
|
||||
if (error) {
|
||||
dev_err(&client->dev,
|
||||
"Failed to install power off action: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
input_dev->name = "st1232-touchscreen";
|
||||
input_dev->id.bustype = BUS_I2C;
|
||||
input_dev->dev.parent = &client->dev;
|
||||
@ -271,15 +283,6 @@ static int st1232_ts_probe(struct i2c_client *client,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int st1232_ts_remove(struct i2c_client *client)
|
||||
{
|
||||
struct st1232_ts_data *ts = i2c_get_clientdata(client);
|
||||
|
||||
st1232_ts_power(ts, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused st1232_ts_suspend(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
@ -329,7 +332,6 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
|
||||
|
||||
static struct i2c_driver st1232_ts_driver = {
|
||||
.probe = st1232_ts_probe,
|
||||
.remove = st1232_ts_remove,
|
||||
.id_table = st1232_ts_id,
|
||||
.driver = {
|
||||
.name = ST1232_TS_NAME,
|
||||
|
@ -3644,19 +3644,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
/* Interrupt lines */
|
||||
|
||||
irq = platform_get_irq_byname(pdev, "combined");
|
||||
irq = platform_get_irq_byname_optional(pdev, "combined");
|
||||
if (irq > 0)
|
||||
smmu->combined_irq = irq;
|
||||
else {
|
||||
irq = platform_get_irq_byname(pdev, "eventq");
|
||||
irq = platform_get_irq_byname_optional(pdev, "eventq");
|
||||
if (irq > 0)
|
||||
smmu->evtq.q.irq = irq;
|
||||
|
||||
irq = platform_get_irq_byname(pdev, "priq");
|
||||
irq = platform_get_irq_byname_optional(pdev, "priq");
|
||||
if (irq > 0)
|
||||
smmu->priq.q.irq = irq;
|
||||
|
||||
irq = platform_get_irq_byname(pdev, "gerror");
|
||||
irq = platform_get_irq_byname_optional(pdev, "gerror");
|
||||
if (irq > 0)
|
||||
smmu->gerr_irq = irq;
|
||||
}
|
||||
|
@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
|
||||
if (!dma_dev)
|
||||
return NULL;
|
||||
|
||||
rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
|
||||
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
|
||||
if (!rk_domain)
|
||||
return NULL;
|
||||
|
||||
if (type == IOMMU_DOMAIN_DMA &&
|
||||
iommu_get_dma_cookie(&rk_domain->domain))
|
||||
return NULL;
|
||||
goto err_free_domain;
|
||||
|
||||
/*
|
||||
* rk32xx iommus use a 2 level pagetable.
|
||||
@ -1021,6 +1021,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
|
||||
err_put_cookie:
|
||||
if (type == IOMMU_DOMAIN_DMA)
|
||||
iommu_put_dma_cookie(&rk_domain->domain);
|
||||
err_free_domain:
|
||||
kfree(rk_domain);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_DMA)
|
||||
iommu_put_dma_cookie(&rk_domain->domain);
|
||||
kfree(rk_domain);
|
||||
}
|
||||
|
||||
static int rk_iommu_add_device(struct device *dev)
|
||||
|
@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
|
||||
return (addr & smmu->pfn_mask) == addr;
|
||||
}
|
||||
|
||||
static dma_addr_t smmu_pde_to_dma(u32 pde)
|
||||
static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
|
||||
{
|
||||
return pde << 12;
|
||||
return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
|
||||
}
|
||||
|
||||
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
|
||||
@ -549,6 +549,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
|
||||
dma_addr_t *dmap)
|
||||
{
|
||||
unsigned int pd_index = iova_pd_index(iova);
|
||||
struct tegra_smmu *smmu = as->smmu;
|
||||
struct page *pt_page;
|
||||
u32 *pd;
|
||||
|
||||
@ -557,7 +558,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
|
||||
return NULL;
|
||||
|
||||
pd = page_address(as->pd);
|
||||
*dmap = smmu_pde_to_dma(pd[pd_index]);
|
||||
*dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
|
||||
|
||||
return tegra_smmu_pte_offset(pt_page, iova);
|
||||
}
|
||||
@ -599,7 +600,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
||||
} else {
|
||||
u32 *pd = page_address(as->pd);
|
||||
|
||||
*dmap = smmu_pde_to_dma(pd[pde]);
|
||||
*dmap = smmu_pde_to_dma(smmu, pd[pde]);
|
||||
}
|
||||
|
||||
return tegra_smmu_pte_offset(as->pts[pde], iova);
|
||||
@ -624,7 +625,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
|
||||
if (--as->count[pde] == 0) {
|
||||
struct tegra_smmu *smmu = as->smmu;
|
||||
u32 *pd = page_address(as->pd);
|
||||
dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
|
||||
dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
|
||||
|
||||
tegra_smmu_set_pde(as, iova, 0);
|
||||
|
||||
|
@ -281,6 +281,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
|
||||
pr_err("failed to map parent interrupt %d\n", parent_irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_property_read_bool(dn, "brcm,irq-can-wake"))
|
||||
enable_irq_wake(parent_irq);
|
||||
|
||||
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
|
||||
intc);
|
||||
|
||||
|
@ -108,6 +108,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
|
||||
goto out_unmap_irq;
|
||||
}
|
||||
|
||||
domain = irq_domain_add_legacy(node, num_chips * 32,
|
||||
JZ4740_IRQ_BASE, 0,
|
||||
&irq_domain_simple_ops, NULL);
|
||||
if (!domain) {
|
||||
err = -ENOMEM;
|
||||
goto out_unmap_base;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_chips; i++) {
|
||||
/* Mask all irqs */
|
||||
writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
|
||||
@ -134,14 +142,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
|
||||
IRQ_NOPROBE | IRQ_LEVEL);
|
||||
}
|
||||
|
||||
domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
|
||||
&irq_domain_simple_ops, NULL);
|
||||
if (!domain)
|
||||
pr_warn("unable to register IRQ domain\n");
|
||||
|
||||
setup_irq(parent_irq, &intc_cascade_action);
|
||||
return 0;
|
||||
|
||||
out_unmap_base:
|
||||
iounmap(intc->base);
|
||||
out_unmap_irq:
|
||||
irq_dispose_mapping(parent_irq);
|
||||
out_free:
|
||||
|
@ -305,6 +305,13 @@ static int an30259a_probe(struct i2c_client *client)
|
||||
|
||||
chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
|
||||
|
||||
if (IS_ERR(chip->regmap)) {
|
||||
err = PTR_ERR(chip->regmap);
|
||||
dev_err(&client->dev, "Failed to allocate register map: %d\n",
|
||||
err);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
for (i = 0; i < chip->num_leds; i++) {
|
||||
struct led_init_data init_data = {};
|
||||
|
||||
|
@ -334,9 +334,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
|
||||
return ret;
|
||||
}
|
||||
|
||||
led->regulator = devm_regulator_get(&led->client->dev, "vled");
|
||||
if (IS_ERR(led->regulator))
|
||||
led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
|
||||
if (IS_ERR(led->regulator)) {
|
||||
ret = PTR_ERR(led->regulator);
|
||||
if (ret != -ENODEV) {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&led->client->dev,
|
||||
"Failed to get vled regulator: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
led->regulator = NULL;
|
||||
}
|
||||
|
||||
child = device_get_next_child_node(&led->client->dev, child);
|
||||
if (!child) {
|
||||
|
@ -302,10 +302,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
|
||||
container_of(nb, struct led_netdev_data, notifier);
|
||||
|
||||
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
|
||||
&& evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
|
||||
&& evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
|
||||
&& evt != NETDEV_CHANGENAME)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!(dev == trigger_data->net_dev ||
|
||||
(evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) ||
|
||||
(evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
@ -315,6 +317,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
|
||||
|
||||
clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
|
||||
switch (evt) {
|
||||
case NETDEV_CHANGENAME:
|
||||
case NETDEV_REGISTER:
|
||||
if (trigger_data->net_dev)
|
||||
dev_put(trigger_data->net_dev);
|
||||
|
@ -214,11 +214,24 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
|
||||
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
|
||||
struct imx_mu_con_priv *cp = chan->con_priv;
|
||||
|
||||
if (cp->type == IMX_MU_TYPE_TXDB)
|
||||
if (cp->type == IMX_MU_TYPE_TXDB) {
|
||||
tasklet_kill(&cp->txdb_tasklet);
|
||||
return;
|
||||
}
|
||||
|
||||
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) |
|
||||
IMX_MU_xCR_RIEn(cp->idx) | IMX_MU_xCR_GIEn(cp->idx));
|
||||
switch (cp->type) {
|
||||
case IMX_MU_TYPE_TX:
|
||||
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
|
||||
break;
|
||||
case IMX_MU_TYPE_RX:
|
||||
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
|
||||
break;
|
||||
case IMX_MU_TYPE_RXDB:
|
||||
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
free_irq(priv->irq, chan);
|
||||
}
|
||||
|
@ -723,6 +723,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
|
||||
* IO can always make forward progress:
|
||||
*/
|
||||
nr /= c->btree_pages;
|
||||
if (nr == 0)
|
||||
nr = 1;
|
||||
nr = min_t(unsigned long, nr, mca_can_free(c));
|
||||
|
||||
i = 0;
|
||||
|
@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
|
||||
/* not spare disk, or LEVEL_MULTIPATH */
|
||||
if (sb->level == LEVEL_MULTIPATH ||
|
||||
(rdev->desc_nr >= 0 &&
|
||||
rdev->desc_nr < MD_SB_DISKS &&
|
||||
sb->disks[rdev->desc_nr].state &
|
||||
((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
|
||||
spare_disk = false;
|
||||
|
@ -965,17 +965,19 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
|
||||
*
|
||||
* @ctx : current context
|
||||
* @vaddr : device virtual address to unmap
|
||||
* @ctx_free : true if in context free flow, false otherwise.
|
||||
*
|
||||
* This function does the following:
|
||||
* - Unmap the physical pages related to the given virtual address
|
||||
* - return the device virtual block to the virtual block list
|
||||
*/
|
||||
static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
||||
static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
|
||||
{
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
|
||||
struct hl_vm_hash_node *hnode = NULL;
|
||||
struct hl_userptr *userptr = NULL;
|
||||
struct hl_va_range *va_range;
|
||||
enum vm_type_t *vm_type;
|
||||
u64 next_vaddr, i;
|
||||
u32 page_size;
|
||||
@ -1003,6 +1005,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
||||
|
||||
if (*vm_type == VM_TYPE_USERPTR) {
|
||||
is_userptr = true;
|
||||
va_range = &ctx->host_va_range;
|
||||
userptr = hnode->ptr;
|
||||
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
|
||||
&phys_pg_pack);
|
||||
@ -1014,6 +1017,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
||||
}
|
||||
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
|
||||
is_userptr = false;
|
||||
va_range = &ctx->dram_va_range;
|
||||
phys_pg_pack = hnode->ptr;
|
||||
} else {
|
||||
dev_warn(hdev->dev,
|
||||
@ -1052,12 +1056,18 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
||||
|
||||
mutex_unlock(&ctx->mmu_lock);
|
||||
|
||||
if (add_va_block(hdev,
|
||||
is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
|
||||
vaddr,
|
||||
vaddr + phys_pg_pack->total_size - 1))
|
||||
dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
|
||||
vaddr);
|
||||
/*
|
||||
* No point in maintaining the free VA block list if the context is
|
||||
* closing as the list will be freed anyway
|
||||
*/
|
||||
if (!ctx_free) {
|
||||
rc = add_va_block(hdev, va_range, vaddr,
|
||||
vaddr + phys_pg_pack->total_size - 1);
|
||||
if (rc)
|
||||
dev_warn(hdev->dev,
|
||||
"add va block failed for vaddr: 0x%llx\n",
|
||||
vaddr);
|
||||
}
|
||||
|
||||
atomic_dec(&phys_pg_pack->mapping_cnt);
|
||||
kfree(hnode);
|
||||
@ -1189,8 +1199,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
|
||||
break;
|
||||
|
||||
case HL_MEM_OP_UNMAP:
|
||||
rc = unmap_device_va(ctx,
|
||||
args->in.unmap.device_virt_addr);
|
||||
rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
|
||||
false);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1620,7 +1630,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
|
||||
dev_dbg(hdev->dev,
|
||||
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
|
||||
hnode->vaddr, ctx->asid);
|
||||
unmap_device_va(ctx, hnode->vaddr);
|
||||
unmap_device_va(ctx, hnode->vaddr, true);
|
||||
}
|
||||
|
||||
spin_lock(&vm->idr_lock);
|
||||
|
@ -51,6 +51,11 @@
|
||||
#define ESDHC_CLOCK_HCKEN 0x00000002
|
||||
#define ESDHC_CLOCK_IPGEN 0x00000001
|
||||
|
||||
/* System Control 2 Register */
|
||||
#define ESDHC_SYSTEM_CONTROL_2 0x3c
|
||||
#define ESDHC_SMPCLKSEL 0x00800000
|
||||
#define ESDHC_EXTN 0x00400000
|
||||
|
||||
/* Host Controller Capabilities Register 2 */
|
||||
#define ESDHC_CAPABILITIES_1 0x114
|
||||
|
||||
@ -59,7 +64,16 @@
|
||||
#define ESDHC_HS400_WNDW_ADJUST 0x00000040
|
||||
#define ESDHC_HS400_MODE 0x00000010
|
||||
#define ESDHC_TB_EN 0x00000004
|
||||
#define ESDHC_TB_MODE_MASK 0x00000003
|
||||
#define ESDHC_TB_MODE_SW 0x00000003
|
||||
#define ESDHC_TB_MODE_3 0x00000002
|
||||
|
||||
#define ESDHC_TBSTAT 0x124
|
||||
|
||||
#define ESDHC_TBPTR 0x128
|
||||
#define ESDHC_WNDW_STRT_PTR_SHIFT 8
|
||||
#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
|
||||
#define ESDHC_WNDW_END_PTR_MASK 0x7f
|
||||
|
||||
/* SD Clock Control Register */
|
||||
#define ESDHC_SDCLKCTL 0x144
|
||||
|
@ -77,8 +77,11 @@ struct sdhci_esdhc {
|
||||
bool quirk_incorrect_hostver;
|
||||
bool quirk_limited_clk_division;
|
||||
bool quirk_unreliable_pulse_detection;
|
||||
bool quirk_fixup_tuning;
|
||||
bool quirk_tuning_erratum_type1;
|
||||
bool quirk_tuning_erratum_type2;
|
||||
bool quirk_ignore_data_inhibit;
|
||||
bool quirk_delay_before_data_reset;
|
||||
bool in_sw_tuning;
|
||||
unsigned int peripheral_clock;
|
||||
const struct esdhc_clk_fixup *clk_fixup;
|
||||
u32 div_ratio;
|
||||
@ -408,6 +411,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
|
||||
|
||||
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
int base = reg & ~0x3;
|
||||
u32 value;
|
||||
u32 ret;
|
||||
@ -416,10 +421,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
ret = esdhc_writew_fixup(host, reg, val, value);
|
||||
if (reg != SDHCI_TRANSFER_MODE)
|
||||
iowrite32be(ret, host->ioaddr + base);
|
||||
|
||||
/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
|
||||
* 1us later after ESDHC_EXTN is set.
|
||||
*/
|
||||
if (base == ESDHC_SYSTEM_CONTROL_2) {
|
||||
if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
|
||||
esdhc->in_sw_tuning) {
|
||||
udelay(1);
|
||||
ret |= ESDHC_SMPCLKSEL;
|
||||
iowrite32be(ret, host->ioaddr + base);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
int base = reg & ~0x3;
|
||||
u32 value;
|
||||
u32 ret;
|
||||
@ -428,6 +447,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
ret = esdhc_writew_fixup(host, reg, val, value);
|
||||
if (reg != SDHCI_TRANSFER_MODE)
|
||||
iowrite32(ret, host->ioaddr + base);
|
||||
|
||||
/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
|
||||
* 1us later after ESDHC_EXTN is set.
|
||||
*/
|
||||
if (base == ESDHC_SYSTEM_CONTROL_2) {
|
||||
if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
|
||||
esdhc->in_sw_tuning) {
|
||||
udelay(1);
|
||||
ret |= ESDHC_SMPCLKSEL;
|
||||
iowrite32(ret, host->ioaddr + base);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
|
||||
@ -705,6 +736,11 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
|
||||
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 val;
|
||||
|
||||
if (esdhc->quirk_delay_before_data_reset &&
|
||||
(mask & SDHCI_RESET_DATA) &&
|
||||
(host->flags & SDHCI_REQ_USE_DMA))
|
||||
mdelay(5);
|
||||
|
||||
sdhci_reset(host, mask);
|
||||
|
||||
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
||||
@ -793,16 +829,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
|
||||
}
|
||||
}
|
||||
|
||||
static struct soc_device_attribute soc_fixup_tuning[] = {
|
||||
static struct soc_device_attribute soc_tuning_erratum_type1[] = {
|
||||
{ .family = "QorIQ T1023", .revision = "1.0", },
|
||||
{ .family = "QorIQ T1040", .revision = "1.0", },
|
||||
{ .family = "QorIQ T2080", .revision = "1.0", },
|
||||
{ .family = "QorIQ T1023", .revision = "1.0", },
|
||||
{ .family = "QorIQ LS1021A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LS1080A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LS2080A", .revision = "1.0", },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
|
||||
{ .family = "QorIQ LS1012A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LS1043A", .revision = "1.*", },
|
||||
{ .family = "QorIQ LS1046A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LS1080A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LS2080A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LA1575A", .revision = "1.0", },
|
||||
{ },
|
||||
};
|
||||
|
||||
@ -826,15 +867,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
|
||||
esdhc_clock_enable(host, true);
|
||||
}
|
||||
|
||||
static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
|
||||
u8 *window_end)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
u8 tbstat_15_8, tbstat_7_0;
|
||||
u32 val;
|
||||
|
||||
if (esdhc->quirk_tuning_erratum_type1) {
|
||||
*window_start = 5 * esdhc->div_ratio;
|
||||
*window_end = 3 * esdhc->div_ratio;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Write TBCTL[11:8]=4'h8 */
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
val &= ~(0xf << 8);
|
||||
val |= 8 << 8;
|
||||
sdhci_writel(host, val, ESDHC_TBCTL);
|
||||
|
||||
mdelay(1);
|
||||
|
||||
/* Read TBCTL[31:0] register and rewrite again */
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
sdhci_writel(host, val, ESDHC_TBCTL);
|
||||
|
||||
mdelay(1);
|
||||
|
||||
/* Read the TBSTAT[31:0] register twice */
|
||||
val = sdhci_readl(host, ESDHC_TBSTAT);
|
||||
val = sdhci_readl(host, ESDHC_TBSTAT);
|
||||
|
||||
/* Reset data lines by setting ESDHCCTL[RSTD] */
|
||||
sdhci_reset(host, SDHCI_RESET_DATA);
|
||||
/* Write 32'hFFFF_FFFF to IRQSTAT register */
|
||||
sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
|
||||
|
||||
/* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
|
||||
* or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
|
||||
* then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
|
||||
* and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
|
||||
*/
|
||||
tbstat_7_0 = val & 0xff;
|
||||
tbstat_15_8 = (val >> 8) & 0xff;
|
||||
|
||||
if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
|
||||
*window_start = 8 * esdhc->div_ratio;
|
||||
*window_end = 4 * esdhc->div_ratio;
|
||||
} else {
|
||||
*window_start = 5 * esdhc->div_ratio;
|
||||
*window_end = 3 * esdhc->div_ratio;
|
||||
}
|
||||
}
|
||||
|
||||
static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
|
||||
u8 window_start, u8 window_end)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
|
||||
val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
|
||||
ESDHC_WNDW_STRT_PTR_MASK;
|
||||
val |= window_end & ESDHC_WNDW_END_PTR_MASK;
|
||||
sdhci_writel(host, val, ESDHC_TBPTR);
|
||||
|
||||
/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
val &= ~ESDHC_TB_MODE_MASK;
|
||||
val |= ESDHC_TB_MODE_SW;
|
||||
sdhci_writel(host, val, ESDHC_TBCTL);
|
||||
|
||||
esdhc->in_sw_tuning = true;
|
||||
ret = sdhci_execute_tuning(mmc, opcode);
|
||||
esdhc->in_sw_tuning = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
u8 window_start, window_end;
|
||||
int ret, retries = 1;
|
||||
bool hs400_tuning;
|
||||
unsigned int clk;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/* For tuning mode, the sd clock divisor value
|
||||
* must be larger than 3 according to reference manual.
|
||||
@ -843,39 +966,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
if (host->clock > clk)
|
||||
esdhc_of_set_clock(host, clk);
|
||||
|
||||
if (esdhc->quirk_limited_clk_division &&
|
||||
host->flags & SDHCI_HS400_TUNING)
|
||||
esdhc_of_set_clock(host, host->clock);
|
||||
|
||||
esdhc_tuning_block_enable(host, true);
|
||||
|
||||
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
|
||||
ret = sdhci_execute_tuning(mmc, opcode);
|
||||
|
||||
if (hs400_tuning) {
|
||||
do {
|
||||
if (esdhc->quirk_limited_clk_division &&
|
||||
hs400_tuning)
|
||||
esdhc_of_set_clock(host, host->clock);
|
||||
|
||||
/* Do HW tuning */
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
val &= ~ESDHC_TB_MODE_MASK;
|
||||
val |= ESDHC_TB_MODE_3;
|
||||
sdhci_writel(host, val, ESDHC_TBCTL);
|
||||
|
||||
ret = sdhci_execute_tuning(mmc, opcode);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/* If HW tuning fails and triggers erratum,
|
||||
* try workaround.
|
||||
*/
|
||||
ret = host->tuning_err;
|
||||
if (ret == -EAGAIN &&
|
||||
(esdhc->quirk_tuning_erratum_type1 ||
|
||||
esdhc->quirk_tuning_erratum_type2)) {
|
||||
/* Recover HS400 tuning flag */
|
||||
if (hs400_tuning)
|
||||
host->flags |= SDHCI_HS400_TUNING;
|
||||
pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
|
||||
mmc_hostname(mmc));
|
||||
/* Do SW tuning */
|
||||
esdhc_prepare_sw_tuning(host, &window_start,
|
||||
&window_end);
|
||||
ret = esdhc_execute_sw_tuning(mmc, opcode,
|
||||
window_start,
|
||||
window_end);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/* Retry both HW/SW tuning with reduced clock. */
|
||||
ret = host->tuning_err;
|
||||
if (ret == -EAGAIN && retries) {
|
||||
/* Recover HS400 tuning flag */
|
||||
if (hs400_tuning)
|
||||
host->flags |= SDHCI_HS400_TUNING;
|
||||
|
||||
clk = host->max_clk / (esdhc->div_ratio + 1);
|
||||
esdhc_of_set_clock(host, clk);
|
||||
pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
|
||||
mmc_hostname(mmc));
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (retries--);
|
||||
|
||||
if (ret) {
|
||||
esdhc_tuning_block_enable(host, false);
|
||||
} else if (hs400_tuning) {
|
||||
val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
|
||||
val |= ESDHC_FLW_CTL_BG;
|
||||
sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
|
||||
}
|
||||
|
||||
if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
|
||||
|
||||
/* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
|
||||
* program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
|
||||
*/
|
||||
val = sdhci_readl(host, ESDHC_TBPTR);
|
||||
val = (val & ~((0x7f << 8) | 0x7f)) |
|
||||
(3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
|
||||
sdhci_writel(host, val, ESDHC_TBPTR);
|
||||
|
||||
/* program the software tuning mode by setting
|
||||
* TBCTL[TB_MODE]=2'h3
|
||||
*/
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
val |= 0x3;
|
||||
sdhci_writel(host, val, ESDHC_TBCTL);
|
||||
sdhci_execute_tuning(mmc, opcode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1046,6 +1203,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
|
||||
if (match)
|
||||
esdhc->clk_fixup = match->data;
|
||||
np = pdev->dev.of_node;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
|
||||
esdhc->quirk_delay_before_data_reset = true;
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
if (!IS_ERR(clk)) {
|
||||
/*
|
||||
@ -1111,10 +1272,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
||||
|
||||
pltfm_host = sdhci_priv(host);
|
||||
esdhc = sdhci_pltfm_priv(pltfm_host);
|
||||
if (soc_device_match(soc_fixup_tuning))
|
||||
esdhc->quirk_fixup_tuning = true;
|
||||
if (soc_device_match(soc_tuning_erratum_type1))
|
||||
esdhc->quirk_tuning_erratum_type1 = true;
|
||||
else
|
||||
esdhc->quirk_fixup_tuning = false;
|
||||
esdhc->quirk_tuning_erratum_type1 = false;
|
||||
|
||||
if (soc_device_match(soc_tuning_erratum_type2))
|
||||
esdhc->quirk_tuning_erratum_type2 = true;
|
||||
else
|
||||
esdhc->quirk_tuning_erratum_type2 = false;
|
||||
|
||||
if (esdhc->vendor_ver == VENDOR_V_22)
|
||||
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
|
||||
|
@ -2225,9 +2225,6 @@ static void bond_miimon_commit(struct bonding *bond)
|
||||
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
|
||||
/* make it immediately active */
|
||||
bond_set_active_slave(slave);
|
||||
} else if (slave != primary) {
|
||||
/* prevent it from being the active one */
|
||||
bond_set_backup_slave(slave);
|
||||
}
|
||||
|
||||
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
|
||||
|
@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
||||
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
|
||||
|
||||
/* Locate the first rule available */
|
||||
if (fs->location == RX_CLS_LOC_ANY)
|
||||
@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
|
||||
|
||||
if (rule->fs.flow_type != fs->flow_type ||
|
||||
rule->fs.ring_cookie != fs->ring_cookie ||
|
||||
rule->fs.m_ext.data[0] != fs->m_ext.data[0])
|
||||
rule->fs.h_ext.data[0] != fs->h_ext.data[0])
|
||||
continue;
|
||||
|
||||
switch (fs->flow_type & ~FLOW_EXT) {
|
||||
@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
||||
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
|
||||
|
||||
layout = &udf_tcpip6_layout;
|
||||
slice_num = bcm_sf2_get_slice_number(layout, 0);
|
||||
|
@ -1560,8 +1560,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
|
||||
|
||||
if (enabled) {
|
||||
/* Enable VLAN filtering. */
|
||||
tpid = ETH_P_8021AD;
|
||||
tpid2 = ETH_P_8021Q;
|
||||
tpid = ETH_P_8021Q;
|
||||
tpid2 = ETH_P_8021AD;
|
||||
} else {
|
||||
/* Disable VLAN filtering. */
|
||||
tpid = ETH_P_SJA1105;
|
||||
@ -1570,9 +1570,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
|
||||
|
||||
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
|
||||
general_params = table->entries;
|
||||
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
|
||||
general_params->tpid = tpid;
|
||||
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
|
||||
general_params->tpid = tpid;
|
||||
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
|
||||
general_params->tpid2 = tpid2;
|
||||
/* When VLAN filtering is on, we need to at least be able to
|
||||
* decode management traffic through the "backup plan".
|
||||
|
@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
|
||||
return size;
|
||||
}
|
||||
|
||||
/* TPID and TPID2 are intentionally reversed so that semantic
|
||||
* compatibility with E/T is kept.
|
||||
*/
|
||||
static size_t
|
||||
sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
|
||||
sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
|
||||
sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
|
||||
sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
|
||||
sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
|
||||
sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
|
||||
sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
|
||||
sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
|
||||
sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
|
||||
sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
|
||||
sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
|
||||
sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
|
||||
|
@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
|
||||
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
|
||||
struct ena_ring *tx_ring, *rx_ring;
|
||||
|
||||
u32 tx_work_done;
|
||||
u32 rx_work_done;
|
||||
int tx_work_done;
|
||||
int rx_work_done = 0;
|
||||
int tx_budget;
|
||||
int napi_comp_call = 0;
|
||||
int ret;
|
||||
@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
|
||||
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
|
||||
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
|
||||
/* On netpoll the budget is zero and the handler should only clean the
|
||||
* tx completions.
|
||||
*/
|
||||
if (likely(budget))
|
||||
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
|
||||
|
||||
/* If the device is about to reset or down, avoid unmask
|
||||
* the interrupt and return 0 so NAPI won't reschedule
|
||||
|
@ -1995,6 +1995,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
|
||||
u32 data1 = le32_to_cpu(cmpl->event_data1);
|
||||
|
||||
if (!bp->fw_health)
|
||||
goto async_event_process_exit;
|
||||
|
||||
bp->fw_reset_timestamp = jiffies;
|
||||
bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
|
||||
if (!bp->fw_reset_min_dsecs)
|
||||
@ -4438,8 +4441,9 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
|
||||
FUNC_DRV_RGTR_REQ_ENABLES_VER);
|
||||
|
||||
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
|
||||
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
|
||||
FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
|
||||
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
|
||||
if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
|
||||
flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
|
||||
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
|
||||
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
|
||||
req.flags = cpu_to_le32(flags);
|
||||
@ -6174,7 +6178,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
|
||||
tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
|
||||
val = clamp_t(u16, tmr, 1,
|
||||
coal_cap->cmpl_aggr_dma_tmr_during_int_max);
|
||||
req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
|
||||
req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
|
||||
req->enables |=
|
||||
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
|
||||
}
|
||||
@ -7096,14 +7100,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
goto err_recovery_out;
|
||||
if (!fw_health) {
|
||||
fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
|
||||
bp->fw_health = fw_health;
|
||||
if (!fw_health) {
|
||||
rc = -ENOMEM;
|
||||
goto err_recovery_out;
|
||||
}
|
||||
}
|
||||
fw_health->flags = le32_to_cpu(resp->flags);
|
||||
if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
|
||||
!(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
|
||||
@ -8766,6 +8762,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
|
||||
}
|
||||
if (resc_reinit || fw_reset) {
|
||||
if (fw_reset) {
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
bp->ctx = NULL;
|
||||
rc = bnxt_fw_init_one(bp);
|
||||
if (rc) {
|
||||
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
|
||||
@ -9954,8 +9953,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
|
||||
struct bnxt_fw_health *fw_health = bp->fw_health;
|
||||
u32 val;
|
||||
|
||||
if (!fw_health || !fw_health->enabled ||
|
||||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
return;
|
||||
|
||||
if (fw_health->tmr_counter) {
|
||||
@ -10416,6 +10414,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
|
||||
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
|
||||
}
|
||||
|
||||
static void bnxt_alloc_fw_health(struct bnxt *bp)
|
||||
{
|
||||
if (bp->fw_health)
|
||||
return;
|
||||
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
|
||||
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
|
||||
return;
|
||||
|
||||
bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
|
||||
if (!bp->fw_health) {
|
||||
netdev_warn(bp->dev, "Failed to allocate fw_health\n");
|
||||
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
|
||||
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_fw_init_one_p1(struct bnxt *bp)
|
||||
{
|
||||
int rc;
|
||||
@ -10462,6 +10477,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
|
||||
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
|
||||
rc);
|
||||
|
||||
bnxt_alloc_fw_health(bp);
|
||||
rc = bnxt_hwrm_error_recovery_qcfg(bp);
|
||||
if (rc)
|
||||
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
|
||||
@ -10547,6 +10563,12 @@ static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* In case fw capabilities have changed, destroy the unneeded
|
||||
* reporters and create newly capable ones.
|
||||
*/
|
||||
bnxt_dl_fw_reporters_destroy(bp, false);
|
||||
bnxt_dl_fw_reporters_create(bp);
|
||||
bnxt_fw_init_one_p3(bp);
|
||||
return 0;
|
||||
}
|
||||
@ -10680,8 +10702,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
||||
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
|
||||
return;
|
||||
case BNXT_FW_RESET_STATE_ENABLE_DEV:
|
||||
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
|
||||
bp->fw_health) {
|
||||
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
|
||||
u32 val;
|
||||
|
||||
val = bnxt_fw_health_readl(bp,
|
||||
@ -11322,11 +11343,11 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
|
||||
if (BNXT_PF(bp)) {
|
||||
if (BNXT_PF(bp))
|
||||
bnxt_sriov_disable(bp);
|
||||
bnxt_dl_unregister(bp);
|
||||
}
|
||||
|
||||
bnxt_dl_fw_reporters_destroy(bp, true);
|
||||
bnxt_dl_unregister(bp);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
unregister_netdev(dev);
|
||||
bnxt_shutdown_tc(bp);
|
||||
@ -11341,6 +11362,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
||||
bnxt_dcb_free(bp);
|
||||
kfree(bp->edev);
|
||||
bp->edev = NULL;
|
||||
kfree(bp->fw_health);
|
||||
bp->fw_health = NULL;
|
||||
bnxt_cleanup_pci(bp);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
@ -11820,8 +11843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
goto init_err_cleanup_tc;
|
||||
|
||||
if (BNXT_PF(bp))
|
||||
bnxt_dl_register(bp);
|
||||
bnxt_dl_register(bp);
|
||||
bnxt_dl_fw_reporters_create(bp);
|
||||
|
||||
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
|
||||
board_info[ent->driver_data].name,
|
||||
|
@ -1658,6 +1658,7 @@ struct bnxt {
|
||||
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
|
||||
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
|
||||
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
|
||||
#define BNXT_FW_CAP_HOT_RESET 0x00200000
|
||||
|
||||
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
|
||||
u32 hwrm_spec_code;
|
||||
|
@ -19,11 +19,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
|
||||
struct devlink_fmsg *fmsg)
|
||||
{
|
||||
struct bnxt *bp = devlink_health_reporter_priv(reporter);
|
||||
struct bnxt_fw_health *health = bp->fw_health;
|
||||
u32 val, health_status;
|
||||
int rc;
|
||||
|
||||
if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
return 0;
|
||||
|
||||
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
|
||||
@ -103,21 +102,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
|
||||
.recover = bnxt_fw_fatal_recover,
|
||||
};
|
||||
|
||||
static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
||||
void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_fw_health *health = bp->fw_health;
|
||||
|
||||
if (!health)
|
||||
if (!bp->dl || !health)
|
||||
return;
|
||||
|
||||
health->fw_reporter =
|
||||
devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
|
||||
0, false, bp);
|
||||
if (IS_ERR(health->fw_reporter)) {
|
||||
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
|
||||
PTR_ERR(health->fw_reporter));
|
||||
health->fw_reporter = NULL;
|
||||
}
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
|
||||
goto err_recovery;
|
||||
|
||||
health->fw_reset_reporter =
|
||||
devlink_health_reporter_create(bp->dl,
|
||||
@ -127,8 +120,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
||||
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
|
||||
PTR_ERR(health->fw_reset_reporter));
|
||||
health->fw_reset_reporter = NULL;
|
||||
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
|
||||
}
|
||||
|
||||
err_recovery:
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
|
||||
return;
|
||||
|
||||
if (!health->fw_reporter) {
|
||||
health->fw_reporter =
|
||||
devlink_health_reporter_create(bp->dl,
|
||||
&bnxt_dl_fw_reporter_ops,
|
||||
0, false, bp);
|
||||
if (IS_ERR(health->fw_reporter)) {
|
||||
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
|
||||
PTR_ERR(health->fw_reporter));
|
||||
health->fw_reporter = NULL;
|
||||
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (health->fw_fatal_reporter)
|
||||
return;
|
||||
|
||||
health->fw_fatal_reporter =
|
||||
devlink_health_reporter_create(bp->dl,
|
||||
&bnxt_dl_fw_fatal_reporter_ops,
|
||||
@ -137,24 +152,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
||||
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
|
||||
PTR_ERR(health->fw_fatal_reporter));
|
||||
health->fw_fatal_reporter = NULL;
|
||||
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
|
||||
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
|
||||
{
|
||||
struct bnxt_fw_health *health = bp->fw_health;
|
||||
|
||||
if (!health)
|
||||
if (!bp->dl || !health)
|
||||
return;
|
||||
|
||||
if (health->fw_reporter)
|
||||
devlink_health_reporter_destroy(health->fw_reporter);
|
||||
|
||||
if (health->fw_reset_reporter)
|
||||
if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
|
||||
health->fw_reset_reporter) {
|
||||
devlink_health_reporter_destroy(health->fw_reset_reporter);
|
||||
health->fw_reset_reporter = NULL;
|
||||
}
|
||||
|
||||
if (health->fw_fatal_reporter)
|
||||
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
|
||||
return;
|
||||
|
||||
if (health->fw_reporter) {
|
||||
devlink_health_reporter_destroy(health->fw_reporter);
|
||||
health->fw_reporter = NULL;
|
||||
}
|
||||
|
||||
if (health->fw_fatal_reporter) {
|
||||
devlink_health_reporter_destroy(health->fw_fatal_reporter);
|
||||
health->fw_fatal_reporter = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
|
||||
@ -162,9 +188,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
|
||||
struct bnxt_fw_health *fw_health = bp->fw_health;
|
||||
struct bnxt_fw_reporter_ctx fw_reporter_ctx;
|
||||
|
||||
if (!fw_health)
|
||||
return;
|
||||
|
||||
fw_reporter_ctx.sp_event = event;
|
||||
switch (event) {
|
||||
case BNXT_FW_RESET_NOTIFY_SP_EVENT:
|
||||
@ -203,6 +226,8 @@ static const struct devlink_ops bnxt_dl_ops = {
|
||||
#endif /* CONFIG_BNXT_SRIOV */
|
||||
};
|
||||
|
||||
static const struct devlink_ops bnxt_vf_dl_ops;
|
||||
|
||||
enum bnxt_dl_param_id {
|
||||
BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
|
||||
@ -416,7 +441,10 @@ int bnxt_dl_register(struct bnxt *bp)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
|
||||
if (BNXT_PF(bp))
|
||||
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
|
||||
else
|
||||
dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
|
||||
if (!dl) {
|
||||
netdev_warn(bp->dev, "devlink_alloc failed");
|
||||
return -ENOMEM;
|
||||
@ -435,6 +463,9 @@ int bnxt_dl_register(struct bnxt *bp)
|
||||
goto err_dl_free;
|
||||
}
|
||||
|
||||
if (!BNXT_PF(bp))
|
||||
return 0;
|
||||
|
||||
rc = devlink_params_register(dl, bnxt_dl_params,
|
||||
ARRAY_SIZE(bnxt_dl_params));
|
||||
if (rc) {
|
||||
@ -462,8 +493,6 @@ int bnxt_dl_register(struct bnxt *bp)
|
||||
|
||||
devlink_params_publish(dl);
|
||||
|
||||
bnxt_dl_fw_reporters_create(bp);
|
||||
|
||||
return 0;
|
||||
|
||||
err_dl_port_unreg:
|
||||
@ -486,12 +515,14 @@ void bnxt_dl_unregister(struct bnxt *bp)
|
||||
if (!dl)
|
||||
return;
|
||||
|
||||
bnxt_dl_fw_reporters_destroy(bp);
|
||||
devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
|
||||
ARRAY_SIZE(bnxt_dl_port_params));
|
||||
devlink_port_unregister(&bp->dl_port);
|
||||
devlink_params_unregister(dl, bnxt_dl_params,
|
||||
ARRAY_SIZE(bnxt_dl_params));
|
||||
if (BNXT_PF(bp)) {
|
||||
devlink_port_params_unregister(&bp->dl_port,
|
||||
bnxt_dl_port_params,
|
||||
ARRAY_SIZE(bnxt_dl_port_params));
|
||||
devlink_port_unregister(&bp->dl_port);
|
||||
devlink_params_unregister(dl, bnxt_dl_params,
|
||||
ARRAY_SIZE(bnxt_dl_params));
|
||||
}
|
||||
devlink_unregister(dl);
|
||||
devlink_free(dl);
|
||||
}
|
||||
|
@ -57,6 +57,8 @@ struct bnxt_dl_nvm_param {
|
||||
};
|
||||
|
||||
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
|
||||
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
|
||||
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
|
||||
int bnxt_dl_register(struct bnxt *bp);
|
||||
void bnxt_dl_unregister(struct bnxt *bp);
|
||||
|
||||
|
@ -3064,8 +3064,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
|
||||
}
|
||||
}
|
||||
|
||||
if (info->dest_buf)
|
||||
memcpy(info->dest_buf + off, dma_buf, len);
|
||||
if (info->dest_buf) {
|
||||
if ((info->seg_start + off + len) <=
|
||||
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
|
||||
memcpy(info->dest_buf + off, dma_buf, len);
|
||||
} else {
|
||||
rc = -ENOBUFS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (cmn_req->req_type ==
|
||||
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
|
||||
@ -3119,7 +3126,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
|
||||
|
||||
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
|
||||
u16 segment_id, u32 *seg_len,
|
||||
void *buf, u32 offset)
|
||||
void *buf, u32 buf_len, u32 offset)
|
||||
{
|
||||
struct hwrm_dbg_coredump_retrieve_input req = {0};
|
||||
struct bnxt_hwrm_dbg_dma_info info = {NULL};
|
||||
@ -3134,8 +3141,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
|
||||
seq_no);
|
||||
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
|
||||
data_len);
|
||||
if (buf)
|
||||
if (buf) {
|
||||
info.dest_buf = buf + offset;
|
||||
info.buf_len = buf_len;
|
||||
info.seg_start = offset;
|
||||
}
|
||||
|
||||
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
|
||||
if (!rc)
|
||||
@ -3225,14 +3235,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
|
||||
static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
||||
{
|
||||
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
|
||||
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
|
||||
struct coredump_segment_record *seg_record = NULL;
|
||||
u32 offset = 0, seg_hdr_len, seg_record_len;
|
||||
struct bnxt_coredump_segment_hdr seg_hdr;
|
||||
struct bnxt_coredump coredump = {NULL};
|
||||
time64_t start_time;
|
||||
u16 start_utc;
|
||||
int rc = 0, i;
|
||||
|
||||
if (buf)
|
||||
buf_len = *dump_len;
|
||||
|
||||
start_time = ktime_get_real_seconds();
|
||||
start_utc = sys_tz.tz_minuteswest * 60;
|
||||
seg_hdr_len = sizeof(seg_hdr);
|
||||
@ -3265,6 +3278,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
||||
u32 duration = 0, seg_len = 0;
|
||||
unsigned long start, end;
|
||||
|
||||
if (buf && ((offset + seg_hdr_len) >
|
||||
BNXT_COREDUMP_BUF_LEN(buf_len))) {
|
||||
rc = -ENOBUFS;
|
||||
goto err;
|
||||
}
|
||||
|
||||
start = jiffies;
|
||||
|
||||
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
|
||||
@ -3277,9 +3296,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
||||
|
||||
/* Write segment data into the buffer */
|
||||
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
|
||||
&seg_len, buf,
|
||||
&seg_len, buf, buf_len,
|
||||
offset + seg_hdr_len);
|
||||
if (rc)
|
||||
if (rc && rc == -ENOBUFS)
|
||||
goto err;
|
||||
else if (rc)
|
||||
netdev_err(bp->dev,
|
||||
"Failed to retrieve coredump for seg = %d\n",
|
||||
seg_record->segment_id);
|
||||
@ -3309,7 +3330,8 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
||||
rc);
|
||||
kfree(coredump.data);
|
||||
*dump_len += sizeof(struct bnxt_coredump_record);
|
||||
|
||||
if (rc == -ENOBUFS)
|
||||
netdev_err(bp->dev, "Firmware returned large coredump buffer");
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,8 @@ struct bnxt_coredump {
|
||||
u16 total_segs;
|
||||
};
|
||||
|
||||
#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
|
||||
|
||||
struct bnxt_hwrm_dbg_dma_info {
|
||||
void *dest_buf;
|
||||
int dest_buf_size;
|
||||
@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
|
||||
u16 seq_off;
|
||||
u16 data_len_off;
|
||||
u16 segs;
|
||||
u32 seg_start;
|
||||
u32 buf_len;
|
||||
};
|
||||
|
||||
struct hwrm_dbg_cmn_input {
|
||||
|
@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
|
||||
{
|
||||
struct net_device *dev = edev->net;
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct bnxt_hw_resc *hw_resc;
|
||||
int max_idx, max_cp_rings;
|
||||
int avail_msix, idx;
|
||||
int total_vecs;
|
||||
int rc = 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
|
||||
}
|
||||
edev->ulp_tbl[ulp_id].msix_base = idx;
|
||||
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
|
||||
if (bp->total_irqs < (idx + avail_msix)) {
|
||||
hw_resc = &bp->hw_resc;
|
||||
total_vecs = idx + avail_msix;
|
||||
if (bp->total_irqs < total_vecs ||
|
||||
(BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
|
||||
if (netif_running(dev)) {
|
||||
bnxt_close_nic(bp, true, false);
|
||||
rc = bnxt_open_nic(bp, true, false);
|
||||
@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
|
||||
}
|
||||
|
||||
if (BNXT_NEW_RM(bp)) {
|
||||
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
|
||||
int resv_msix;
|
||||
|
||||
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
|
||||
|
@ -503,6 +503,7 @@ struct link_config {
|
||||
|
||||
enum cc_pause requested_fc; /* flow control user has requested */
|
||||
enum cc_pause fc; /* actual link flow control */
|
||||
enum cc_pause advertised_fc; /* actual advertised flow control */
|
||||
|
||||
enum cc_fec requested_fec; /* Forward Error Correction: */
|
||||
enum cc_fec fec; /* requested and actual in use */
|
||||
|
@ -793,8 +793,8 @@ static void get_pauseparam(struct net_device *dev,
|
||||
struct port_info *p = netdev_priv(dev);
|
||||
|
||||
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
||||
epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
|
||||
epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
|
||||
epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
|
||||
epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
|
||||
}
|
||||
|
||||
static int set_pauseparam(struct net_device *dev,
|
||||
|
@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
|
||||
if (cc_pause & PAUSE_TX)
|
||||
fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
|
||||
else
|
||||
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
|
||||
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
|
||||
FW_PORT_CAP32_802_3_PAUSE;
|
||||
} else if (cc_pause & PAUSE_TX) {
|
||||
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
|
||||
}
|
||||
@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
|
||||
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
||||
{
|
||||
const struct fw_port_cmd *cmd = (const void *)rpl;
|
||||
int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
struct adapter *adapter = pi->adapter;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
int link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
enum fw_port_module_type mod_type;
|
||||
unsigned int speed, fc, fec;
|
||||
fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
struct adapter *adapter = pi->adapter;
|
||||
unsigned int speed, fc, fec, adv_fc;
|
||||
enum fw_port_module_type mod_type;
|
||||
int action, link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
|
||||
/* Extract the various fields from the Port Information message.
|
||||
*/
|
||||
action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
switch (action) {
|
||||
case FW_PORT_ACTION_GET_PORT_INFO: {
|
||||
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
|
||||
@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
||||
}
|
||||
|
||||
fec = fwcap_to_cc_fec(acaps);
|
||||
adv_fc = fwcap_to_cc_pause(acaps);
|
||||
fc = fwcap_to_cc_pause(linkattr);
|
||||
speed = fwcap_to_speed(linkattr);
|
||||
|
||||
@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
||||
}
|
||||
|
||||
if (link_ok != lc->link_ok || speed != lc->speed ||
|
||||
fc != lc->fc || fec != lc->fec) { /* something changed */
|
||||
fc != lc->fc || adv_fc != lc->advertised_fc ||
|
||||
fec != lc->fec) {
|
||||
/* something changed */
|
||||
if (!link_ok && lc->link_ok) {
|
||||
lc->link_down_rc = linkdnrc;
|
||||
dev_warn_ratelimited(adapter->pdev_dev,
|
||||
@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
||||
}
|
||||
lc->link_ok = link_ok;
|
||||
lc->speed = speed;
|
||||
lc->advertised_fc = adv_fc;
|
||||
lc->fc = fc;
|
||||
lc->fec = fec;
|
||||
|
||||
|
@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
|
||||
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
||||
pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
|
||||
pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
|
||||
pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
|
||||
pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -135,6 +135,7 @@ struct link_config {
|
||||
|
||||
enum cc_pause requested_fc; /* flow control user has requested */
|
||||
enum cc_pause fc; /* actual link flow control */
|
||||
enum cc_pause advertised_fc; /* actual advertised flow control */
|
||||
|
||||
enum cc_fec auto_fec; /* Forward Error Correction: */
|
||||
enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
|
||||
|
@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
|
||||
static void t4vf_handle_get_port_info(struct port_info *pi,
|
||||
const struct fw_port_cmd *cmd)
|
||||
{
|
||||
int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
struct adapter *adapter = pi->adapter;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
int link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
enum fw_port_module_type mod_type;
|
||||
unsigned int speed, fc, fec;
|
||||
fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
struct adapter *adapter = pi->adapter;
|
||||
unsigned int speed, fc, fec, adv_fc;
|
||||
enum fw_port_module_type mod_type;
|
||||
int action, link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
|
||||
/* Extract the various fields from the Port Information message. */
|
||||
action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
switch (action) {
|
||||
case FW_PORT_ACTION_GET_PORT_INFO: {
|
||||
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
|
||||
@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
||||
}
|
||||
|
||||
fec = fwcap_to_cc_fec(acaps);
|
||||
adv_fc = fwcap_to_cc_pause(acaps);
|
||||
fc = fwcap_to_cc_pause(linkattr);
|
||||
speed = fwcap_to_speed(linkattr);
|
||||
|
||||
@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
||||
}
|
||||
|
||||
if (link_ok != lc->link_ok || speed != lc->speed ||
|
||||
fc != lc->fc || fec != lc->fec) { /* something changed */
|
||||
fc != lc->fc || adv_fc != lc->advertised_fc ||
|
||||
fec != lc->fec) {
|
||||
/* something changed */
|
||||
if (!link_ok && lc->link_ok) {
|
||||
lc->link_down_rc = linkdnrc;
|
||||
dev_warn_ratelimited(adapter->pdev_dev,
|
||||
@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
||||
}
|
||||
lc->link_ok = link_ok;
|
||||
lc->speed = speed;
|
||||
lc->advertised_fc = adv_fc;
|
||||
lc->fc = fc;
|
||||
lc->fec = fec;
|
||||
|
||||
|
@ -3674,7 +3674,7 @@ static int mvpp2_open(struct net_device *dev)
|
||||
valid = true;
|
||||
}
|
||||
|
||||
if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
|
||||
if (priv->hw_version == MVPP22 && port->link_irq) {
|
||||
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
|
||||
dev->name, port);
|
||||
if (err) {
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/xz.h>
|
||||
#include "mlxfw_mfa2.h"
|
||||
#include "mlxfw_mfa2_file.h"
|
||||
@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
|
||||
comp_size = be32_to_cpu(comp->size);
|
||||
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
|
||||
|
||||
comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
|
||||
comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
|
||||
if (!comp_data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
comp_data->comp.data_size = comp_size;
|
||||
@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
|
||||
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
|
||||
return &comp_data->comp;
|
||||
err_out:
|
||||
kfree(comp_data);
|
||||
vfree(comp_data);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
|
||||
const struct mlxfw_mfa2_comp_data *comp_data;
|
||||
|
||||
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
|
||||
kfree(comp_data);
|
||||
vfree(comp_data);
|
||||
}
|
||||
|
||||
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
|
||||
|
@ -5421,6 +5421,7 @@ enum mlxsw_reg_htgt_trap_group {
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
|
||||
|
||||
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
|
||||
|
@ -4398,8 +4398,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
|
||||
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
|
||||
/* PKT Sample trap */
|
||||
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
|
||||
false, SP_IP2ME, DISCARD),
|
||||
@ -4483,6 +4483,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
|
||||
rate = 19 * 1024;
|
||||
burst_size = 12;
|
||||
break;
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
|
||||
rate = 360;
|
||||
burst_size = 7;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
@ -4522,6 +4526,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
|
||||
priority = 5;
|
||||
tc = 5;
|
||||
break;
|
||||
|
@ -6985,6 +6985,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
|
||||
rif = mlxsw_sp->router->rifs[i];
|
||||
if (rif && rif->ops &&
|
||||
rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
|
||||
continue;
|
||||
if (rif && rif->dev && rif->dev != dev &&
|
||||
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
|
||||
mlxsw_sp->mac_mask)) {
|
||||
|
@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
||||
struct device *dev = dwmac->dev;
|
||||
const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
|
||||
struct meson8b_dwmac_clk_configs *clk_configs;
|
||||
static const struct clk_div_table div_table[] = {
|
||||
{ .div = 2, .val = 2, },
|
||||
{ .div = 3, .val = 3, },
|
||||
{ .div = 4, .val = 4, },
|
||||
{ .div = 5, .val = 5, },
|
||||
{ .div = 6, .val = 6, },
|
||||
{ .div = 7, .val = 7, },
|
||||
};
|
||||
|
||||
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
|
||||
if (!clk_configs)
|
||||
@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
||||
clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
|
||||
clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
|
||||
clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
|
||||
clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
|
||||
CLK_DIVIDER_ALLOW_ZERO |
|
||||
CLK_DIVIDER_ROUND_CLOSEST;
|
||||
clk_configs->m250_div.table = div_table;
|
||||
clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
|
||||
CLK_DIVIDER_ROUND_CLOSEST;
|
||||
clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
|
||||
&clk_divider_ops,
|
||||
&clk_configs->m250_div.hw);
|
||||
|
@ -38,7 +38,6 @@ struct pdp_ctx {
|
||||
struct hlist_node hlist_addr;
|
||||
|
||||
union {
|
||||
u64 tid;
|
||||
struct {
|
||||
u64 tid;
|
||||
u16 flow;
|
||||
@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
|
||||
mtu = dst_mtu(&rt->dst);
|
||||
}
|
||||
|
||||
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
|
||||
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
|
||||
|
||||
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
|
||||
mtu < ntohs(iph->tot_len)) {
|
||||
@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev)
|
||||
}
|
||||
|
||||
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
|
||||
static void gtp_hashtable_free(struct gtp_dev *gtp);
|
||||
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
|
||||
|
||||
static void gtp_destructor(struct net_device *dev)
|
||||
{
|
||||
struct gtp_dev *gtp = netdev_priv(dev);
|
||||
|
||||
kfree(gtp->addr_hash);
|
||||
kfree(gtp->tid_hash);
|
||||
}
|
||||
|
||||
static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (!data[IFLA_GTP_PDP_HASHSIZE])
|
||||
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
|
||||
hashsize = 1024;
|
||||
else
|
||||
} else {
|
||||
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
|
||||
if (!hashsize)
|
||||
hashsize = 1024;
|
||||
}
|
||||
|
||||
err = gtp_hashtable_new(gtp, hashsize);
|
||||
if (err < 0)
|
||||
@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
||||
|
||||
gn = net_generic(dev_net(dev), gtp_net_id);
|
||||
list_add_rcu(>p->list, &gn->gtp_dev_list);
|
||||
dev->priv_destructor = gtp_destructor;
|
||||
|
||||
netdev_dbg(dev, "registered new GTP interface\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_hashtable:
|
||||
gtp_hashtable_free(gtp);
|
||||
kfree(gtp->addr_hash);
|
||||
kfree(gtp->tid_hash);
|
||||
out_encap:
|
||||
gtp_encap_disable(gtp);
|
||||
return err;
|
||||
@ -693,8 +704,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
||||
static void gtp_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct gtp_dev *gtp = netdev_priv(dev);
|
||||
struct pdp_ctx *pctx;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gtp->hash_size; i++)
|
||||
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
|
||||
pdp_context_delete(pctx);
|
||||
|
||||
gtp_hashtable_free(gtp);
|
||||
list_del_rcu(>p->list);
|
||||
unregister_netdevice_queue(dev, head);
|
||||
}
|
||||
@ -772,20 +788,6 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void gtp_hashtable_free(struct gtp_dev *gtp)
|
||||
{
|
||||
struct pdp_ctx *pctx;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gtp->hash_size; i++)
|
||||
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
|
||||
pdp_context_delete(pctx);
|
||||
|
||||
synchronize_rcu();
|
||||
kfree(gtp->addr_hash);
|
||||
kfree(gtp->tid_hash);
|
||||
}
|
||||
|
||||
static struct sock *gtp_encap_enable_socket(int fd, int type,
|
||||
struct gtp_dev *gtp)
|
||||
{
|
||||
@ -926,24 +928,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
|
||||
}
|
||||
}
|
||||
|
||||
static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
|
||||
struct genl_info *info)
|
||||
static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct pdp_ctx *pctx, *pctx_tid = NULL;
|
||||
struct net_device *dev = gtp->dev;
|
||||
u32 hash_ms, hash_tid = 0;
|
||||
struct pdp_ctx *pctx;
|
||||
unsigned int version;
|
||||
bool found = false;
|
||||
__be32 ms_addr;
|
||||
|
||||
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
|
||||
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
|
||||
version = nla_get_u32(info->attrs[GTPA_VERSION]);
|
||||
|
||||
hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) {
|
||||
if (pctx->ms_addr_ip4.s_addr == ms_addr) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
pctx = ipv4_pdp_find(gtp, ms_addr);
|
||||
if (pctx)
|
||||
found = true;
|
||||
if (version == GTP_V0)
|
||||
pctx_tid = gtp0_pdp_find(gtp,
|
||||
nla_get_u64(info->attrs[GTPA_TID]));
|
||||
else if (version == GTP_V1)
|
||||
pctx_tid = gtp1_pdp_find(gtp,
|
||||
nla_get_u32(info->attrs[GTPA_I_TEI]));
|
||||
if (pctx_tid)
|
||||
found = true;
|
||||
|
||||
if (found) {
|
||||
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
|
||||
@ -951,6 +960,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
|
||||
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (pctx && pctx_tid)
|
||||
return -EEXIST;
|
||||
if (!pctx)
|
||||
pctx = pctx_tid;
|
||||
|
||||
ipv4_pdp_fill(pctx, info);
|
||||
|
||||
if (pctx->gtp_version == GTP_V0)
|
||||
@ -1074,7 +1088,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = ipv4_pdp_add(gtp, sk, info);
|
||||
err = gtp_pdp_add(gtp, sk, info);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
@ -1232,43 +1246,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
|
||||
int i, j, bucket = cb->args[0], skip = cb->args[1];
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct gtp_net *gn = net_generic(net, gtp_net_id);
|
||||
unsigned long tid = cb->args[1];
|
||||
int i, k = cb->args[0], ret;
|
||||
struct pdp_ctx *pctx;
|
||||
struct gtp_net *gn;
|
||||
|
||||
gn = net_generic(net, gtp_net_id);
|
||||
|
||||
if (cb->args[4])
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
|
||||
if (last_gtp && last_gtp != gtp)
|
||||
continue;
|
||||
else
|
||||
last_gtp = NULL;
|
||||
|
||||
for (i = k; i < gtp->hash_size; i++) {
|
||||
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) {
|
||||
if (tid && tid != pctx->u.tid)
|
||||
continue;
|
||||
else
|
||||
tid = 0;
|
||||
|
||||
ret = gtp_genl_fill_info(skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
cb->nlh->nlmsg_type, pctx);
|
||||
if (ret < 0) {
|
||||
for (i = bucket; i < gtp->hash_size; i++) {
|
||||
j = 0;
|
||||
hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
|
||||
hlist_tid) {
|
||||
if (j >= skip &&
|
||||
gtp_genl_fill_info(skb,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
cb->nlh->nlmsg_type, pctx)) {
|
||||
cb->args[0] = i;
|
||||
cb->args[1] = pctx->u.tid;
|
||||
cb->args[1] = j;
|
||||
cb->args[2] = (unsigned long)gtp;
|
||||
goto out;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
skip = 0;
|
||||
}
|
||||
bucket = 0;
|
||||
}
|
||||
cb->args[4] = 1;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
|
||||
{
|
||||
struct sixpack *sp;
|
||||
|
||||
write_lock_bh(&disc_data_lock);
|
||||
write_lock_irq(&disc_data_lock);
|
||||
sp = tty->disc_data;
|
||||
tty->disc_data = NULL;
|
||||
write_unlock_bh(&disc_data_lock);
|
||||
write_unlock_irq(&disc_data_lock);
|
||||
if (!sp)
|
||||
return;
|
||||
|
||||
|
@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty)
|
||||
{
|
||||
struct mkiss *ax;
|
||||
|
||||
write_lock_bh(&disc_data_lock);
|
||||
write_lock_irq(&disc_data_lock);
|
||||
ax = tty->disc_data;
|
||||
tty->disc_data = NULL;
|
||||
write_unlock_bh(&disc_data_lock);
|
||||
write_unlock_irq(&disc_data_lock);
|
||||
|
||||
if (!ax)
|
||||
return;
|
||||
|
@ -1165,6 +1165,9 @@ int rndis_set_subchannel(struct net_device *ndev,
|
||||
wait_event(nvdev->subchan_open,
|
||||
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
|
||||
|
||||
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
|
||||
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
|
||||
|
||||
/* ignore failures from setting rss parameters, still have channels */
|
||||
if (dev_info)
|
||||
rndis_filter_set_rss_param(rdev, dev_info->rss_key);
|
||||
@ -1174,9 +1177,6 @@ int rndis_set_subchannel(struct net_device *ndev,
|
||||
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
|
||||
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
|
||||
|
||||
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
|
||||
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
|
||||
.config_intr = aqr_config_intr,
|
||||
.ack_interrupt = aqr_ack_interrupt,
|
||||
.read_status = aqr_read_status,
|
||||
.suspend = aqr107_suspend,
|
||||
.resume = aqr107_resume,
|
||||
},
|
||||
{
|
||||
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
|
||||
|
@ -444,8 +444,7 @@ static void phylink_mac_link_up(struct phylink *pl,
|
||||
|
||||
pl->cur_interface = link_state.interface;
|
||||
pl->ops->mac_link_up(pl->config, pl->link_an_mode,
|
||||
pl->phy_state.interface,
|
||||
pl->phydev);
|
||||
pl->cur_interface, pl->phydev);
|
||||
|
||||
if (ndev)
|
||||
netif_carrier_on(ndev);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user