This is the 6.1.35 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmSTArUACgkQONu9yGCS aT7L1w//S9q9UHGxXd4awCjyj3doC4UMaMjH0b/BT8GmDM5lpbqqvWBgWe7zrvnK L/VOGj6pkgBuH7uBLmRVZKkRwnyFcxKnW+BwVxn3WFE5UjCBrxqjry228Wp9FCR9 i/4C/6ls+vA+Ll2GkvSVogPkTwtC1A8hsI6r4qQMbNyeTySd9SF6nfo3uR2olBW6 UN3NdIY6Lzo7t/Jbn+4zLhgDbQ28bn3IymS8jPAFQBoRHNOeKh6UFEu7Mzjbn6Nb YrcHxr4/xAjnof7JV1JZOVVA2BoUCIStB4INv6Ke9XucIs8Y0UzU7MeeqPLh1lBO aKhGADdO3shj0OGsLfmzfYlGu5+5lQ3yFN8ZVTzQTYTZUFLnddUjB3a/DwKR0Nza A6LjUID2jRmoNtMWDHlX9Wqyv5iuIzl2hqs2jGc3o/EHAQ2IFlMMee+mjkEqmalO iRyCt5Ekr/P8MrTQHPcJ26uPh+TGvcooBSGSKu6WNFozNQ7sxSl7QJONLMs0tUtk JzcTxw7cAtbxcsfDX0k3z2Mp6d1YuQvwo22mVYioO7+nhB6MaRNVPkhbVf3koMeD lB/ISmr8Lv4CZLS4xYUlYHQyPGT+YTkLpbncV+WQe4+9ounbuz71tKpoB7XbBVgY 52Qc9aLAAKi8yyFbNRoxWo98RXx5fWTzQMMqM2N2KExsF75EW/k= =/x/n -----END PGP SIGNATURE----- Merge 6.1.35 into android14-6.1-lts Changes in 6.1.35 x86/head/64: Switch to KERNEL_CS as soon as new GDT is installed test_firmware: Use kstrtobool() instead of strtobool() test_firmware: prevent race conditions by a correct implementation of locking cgroup: bpf: use cgroup_lock()/cgroup_unlock() wrappers cgroup: always put cset in cgroup_css_set_put_fork cgroup: fix missing cpus_read_{lock,unlock}() in cgroup_transfer_tasks() qcom: llcc/edac: Fix the base address used for accessing LLCC banks EDAC/qcom: Get rid of hardcoded register offsets ksmbd: validate smb request protocol id of: overlay: Fix missing of_node_put() in error case of init_overlay_changeset() power: supply: ab8500: Fix external_power_changed race power: supply: sc27xx: Fix external_power_changed race power: supply: bq27xxx: Use mod_delayed_work() instead of cancel() + schedule() ARM: dts: vexpress: add missing cache properties tools: gpio: fix debounce_period_us output of lsgpio selftests: gpio: gpio-sim: Fix BUG: test FAILED due to recent change power: supply: Ratelimit no data debug output PCI/DPC: Quirk PIO log size for Intel Ice Lake Root Ports platform/x86: asus-wmi: Ignore WMI events with codes 0x7B, 0xC0 regulator: Fix error checking for debugfs_create_dir irqchip/gic-v3: Disable pseudo NMIs on Mediatek devices w/ firmware issues irqchip/meson-gpio: Mark OF related data as maybe unused power: supply: Fix logic checking if system is running from battery drm: panel-orientation-quirks: Change Air's quirk to support Air Plus btrfs: scrub: try harder to mark RAID56 block groups read-only btrfs: handle memory allocation failure in btrfs_csum_one_bio ASoC: soc-pcm: test if a BE can be prepared ASoC: Intel: avs: Account for UID of ACPI device ASoC: Intel: avs: Add missing checks on FE startup parisc: Improve cache flushing for PCXL in arch_sync_dma_for_cpu() parisc: Flush gatt writes and adjust gatt mask in parisc_agp_mask_memory() MIPS: unhide PATA_PLATFORM MIPS: Restore Au1300 support MIPS: Alchemy: fix dbdma2 mips: Move initrd_start check after initrd address sanitisation. ASoC: cs35l41: Fix default regmap values for some registers ASoC: dwc: move DMA init to snd_soc_dai_driver probe() xen/blkfront: Only check REQ_FUA for writes drm:amd:amdgpu: Fix missing buffer object unlock in failure path io_uring: unlock sqd->lock before sq thread release CPU NVMe: Add MAXIO 1602 to bogus nid list. irqchip/gic: Correctly validate OF quirk descriptors wifi: cfg80211: fix locking in regulatory disconnect wifi: cfg80211: fix double lock bug in reg_wdev_chan_valid() epoll: ep_autoremove_wake_function should use list_del_init_careful ocfs2: fix use-after-free when unmounting read-only filesystem ocfs2: check new file size on fallocate call zswap: do not shrink if cgroup may not zswap nios2: dts: Fix tse_mac "max-frame-size" property nilfs2: fix incomplete buffer cleanup in nilfs_btnode_abort_change_key() nilfs2: fix possible out-of-bounds segment allocation in resize ioctl nilfs2: reject devices with insufficient block count LoongArch: Fix perf event id calculation io_uring/net: save msghdr->msg_control for retries kexec: support purgatories with .text.hot sections x86/purgatory: remove PGO flags riscv/purgatory: remove PGO flags powerpc/purgatory: remove PGO flags btrfs: do not ASSERT() on duplicated global roots btrfs: fix iomap_begin length for nocow writes btrfs: can_nocow_file_extent should pass down args->strict from callers ALSA: usb-audio: Fix broken resume due to UAC3 power state ALSA: usb-audio: Add quirk flag for HEM devices to enable native DSD playback dm thin metadata: check fail_io before using data_sm dm thin: fix issue_discard to pass GFP_NOIO to __blkdev_issue_discard net: ethernet: stmicro: stmmac: fix possible memory leak in __stmmac_open nouveau: fix client work fence deletion race RDMA/uverbs: Restrict usage of privileged QKEYs drm/amdgpu: vcn_4_0 set instance 0 init sched score to 1 net: usb: qmi_wwan: add support for Compal RXM-G1 drm/amd/display: edp do not add non-edid timings drm/amd: Make sure image is written to trigger VBIOS image update flow drm/amd: Tighten permissions on VBIOS flashing attributes drm/amd/pm: workaround for compute workload type on some skus drm/amdgpu: add missing radeon secondary PCI ID ALSA: hda/realtek: Add a quirk for Compaq N14JP6 thunderbolt: Do not touch CL state configuration during discovery thunderbolt: dma_test: Use correct value for absent rings when creating paths thunderbolt: Mask ring interrupt on Intel hardware as well clk: pxa: fix NULL pointer dereference in pxa3xx_clk_update_accr USB: serial: option: add Quectel EM061KGL series serial: lantiq: add missing interrupt ack usb: typec: ucsi: Fix command cancellation usb: typec: Fix fast_role_swap_current show function usb: gadget: udc: core: Offload usb_udc_vbus_handler processing usb: gadget: udc: core: Prevent soft_connect_store() race USB: dwc3: qcom: fix NULL-deref on suspend USB: dwc3: fix use-after-free on core driver unbind usb: dwc3: gadget: Reset num TRBs before giving back the request RDMA/rtrs: Fix the last iu->buf leak in err path RDMA/rtrs: Fix rxe_dealloc_pd warning RDMA/rxe: Fix packet length checks RDMA/rxe: Fix ref count error in check_rkey() spi: cadence-quadspi: Add missing check for dma_set_mask spi: fsl-dspi: avoid SCK glitches with continuous transfers netfilter: nf_tables: integrate pipapo into commit protocol netfilter: nfnetlink: skip error delivery on batch in case of ENOMEM ice: Fix XDP memory leak when NIC is brought up and down netfilter: nf_tables: incorrect error path handling with NFT_MSG_NEWRULE net: enetc: correct the indexes of highest and 2nd highest TCs ping6: Fix send to link-local addresses with VRF. igb: Fix extts capture value format for 82580/i354/i350 net/sched: simplify tcf_pedit_act net/sched: act_pedit: remove extra check for key type net/sched: act_pedit: Parse L3 Header for L4 offset octeontx2-af: Fix promiscuous mode net/sched: cls_u32: Fix reference counter leak leading to overflow wifi: mac80211: fix link activation settings order wifi: cfg80211: fix link del callback to call correct handler wifi: mac80211: take lock before setting vif links RDMA/rxe: Removed unused name from rxe_task struct RDMA/rxe: Fix the use-before-initialization error of resp_pkts iavf: remove mask from iavf_irq_enable_queues() octeontx2-af: fixed resource availability check octeontx2-af: fix lbk link credits on cn10k RDMA/mlx5: Initiate dropless RQ for RAW Ethernet functions RDMA/mlx5: Create an indirect flow table for steering anchor RDMA/cma: Always set static rate to 0 for RoCE IB/uverbs: Fix to consider event queue closing also upon non-blocking mode RDMA/mlx5: Fix affinity assignment IB/isert: Fix dead lock in ib_isert IB/isert: Fix possible list corruption in CMA handler IB/isert: Fix incorrect release of isert connection net: ethtool: correct MAX attribute value for stats ipvlan: fix bound dev checking for IPv6 l3s mode sctp: fix an error code in sctp_sf_eat_auth() igc: Clean the TX buffer and TX descriptor ring igc: Fix possible system crash when loading module igb: fix nvm.ops.read() error handling net: phylink: report correct max speed for QUSGMII net: phylink: use a dedicated helper to parse usgmii control word drm/nouveau: don't detect DSM for non-NVIDIA device drm/bridge: ti-sn65dsi86: Avoid possible buffer overflow drm/nouveau/dp: check for NULL nv_connector->native_mode drm/nouveau: add nv_encoder pointer check for NULL selftests/tc-testing: Fix Error: Specified qdisc kind is unknown. selftests/tc-testing: Fix Error: failed to find target LOG selftests/tc-testing: Fix SFB db test sched: add new attr TCA_EXT_WARN_MSG to report tc extact message net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs net/sched: qdisc_destroy() old ingress and clsact Qdiscs before grafting selftests: forwarding: hw_stats_l3: Set addrgenmode in a separate step cifs: fix lease break oops in xfstest generic/098 ext4: drop the call to ext4_error() from ext4_get_group_info() net/sched: cls_api: Fix lockup on flushing explicitly created chain net: dsa: felix: fix taprio guard band overflow at 10Mbps with jumbo frames net: lapbether: only support ethernet devices net: macsec: fix double free of percpu stats sfc: fix XDP queues mode with legacy IRQ dm: don't lock fs when the map is NULL during suspend or resume net: tipc: resize nlattr array to correct size selftests/ptp: Fix timestamp printf format for PTP_SYS_OFFSET octeon_ep: Add missing check for ioremap afs: Fix vlserver probe RTT handling parisc: Delete redundant register definitions in <asm/assembly.h> rcu/kvfree: Avoid freeing new kfree_rcu() memory after old grace period drm/amdgpu: Don't set struct drm_driver.output_poll_changed net/sched: act_api: move TCA_EXT_WARN_MSG to the correct hierarchy Revert "net/sched: act_api: move TCA_EXT_WARN_MSG to the correct hierarchy" net/sched: act_api: add specific EXT_WARN_MSG for tc action neighbour: delete neigh_lookup_nodev as not used scsi: target: core: Fix error path in target_setup_session() x86/boot/compressed: prefer cc-option for CFLAGS additions MIPS: Move '-Wa,-msoft-float' check from as-option to cc-option MIPS: Prefer cc-option for additions to cflags kbuild: Update assembler calls to use proper flags and language target Linux 6.1.35 Change-Id: Ib27a87c9bcf16c70a0f4dd567551c2ae44702a4b Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
1e4b07ffa3
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 34
|
||||
SUBLEVEL = 35
|
||||
EXTRAVERSION =
|
||||
NAME = Curry Ramen
|
||||
|
||||
|
@ -132,6 +132,7 @@ L2: cache-controller@2c0f0000 {
|
||||
reg = <0x2c0f0000 0x1000>;
|
||||
interrupts = <0 84 4>;
|
||||
cache-level = <2>;
|
||||
cache-unified;
|
||||
};
|
||||
|
||||
pmu {
|
||||
|
@ -271,7 +271,7 @@ static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
||||
WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
|
||||
|
||||
/* Make sure interrupt enabled. */
|
||||
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
|
||||
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) |
|
||||
(evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE;
|
||||
|
||||
cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
|
||||
@ -594,7 +594,7 @@ static struct pmu pmu = {
|
||||
|
||||
static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
|
||||
{
|
||||
return (pev->event_id & 0xff);
|
||||
return M_PERFCTL_EVENT(pev->event_id);
|
||||
}
|
||||
|
||||
static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
|
||||
@ -849,7 +849,7 @@ static void resume_local_counters(void)
|
||||
|
||||
static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
|
||||
{
|
||||
raw_event.event_id = config & 0xff;
|
||||
raw_event.event_id = M_PERFCTL_EVENT(config);
|
||||
|
||||
return &raw_event;
|
||||
}
|
||||
|
@ -82,6 +82,7 @@ config MIPS
|
||||
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PATA_PLATFORM
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
|
@ -109,7 +109,7 @@ endif
|
||||
# (specifically newer than 2.24.51.20140728) we then also need to explicitly
|
||||
# set ".set hardfloat" in all files which manipulate floating point registers.
|
||||
#
|
||||
ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
|
||||
ifneq ($(call cc-option,$(cflags-y) -Wa$(comma)-msoft-float,),)
|
||||
cflags-y += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
|
||||
endif
|
||||
|
||||
@ -152,7 +152,7 @@ cflags-y += -fno-stack-check
|
||||
#
|
||||
# Avoid this by explicitly disabling that assembler behaviour.
|
||||
#
|
||||
cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
|
||||
cflags-y += $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
|
||||
|
||||
#
|
||||
# CPU-dependent compiler/assembler options for optimization.
|
||||
|
@ -30,6 +30,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
@ -623,17 +624,18 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
|
||||
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
|
||||
|
||||
/*
|
||||
* There is an errata on the Au1200/Au1550 parts that could result
|
||||
* in "stale" data being DMA'ed. It has to do with the snoop logic on
|
||||
* the cache eviction buffer. DMA_NONCOHERENT is on by default for
|
||||
* these parts. If it is fixed in the future, these dma_cache_inv will
|
||||
* just be nothing more than empty macros. See io.h.
|
||||
* There is an erratum on certain Au1200/Au1550 revisions that could
|
||||
* result in "stale" data being DMA'ed. It has to do with the snoop
|
||||
* logic on the cache eviction buffer. dma_default_coherent is set
|
||||
* to false on these parts.
|
||||
*/
|
||||
dma_cache_wback_inv((unsigned long)buf, nbytes);
|
||||
if (!dma_default_coherent)
|
||||
dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
|
||||
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
||||
wmb(); /* drain writebuffer */
|
||||
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
||||
ctp->chan_ptr->ddma_dbell = 0;
|
||||
wmb(); /* force doorbell write out to dma engine */
|
||||
|
||||
/* Get next descriptor pointer. */
|
||||
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
||||
@ -685,17 +687,18 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
|
||||
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
|
||||
#endif
|
||||
/*
|
||||
* There is an errata on the Au1200/Au1550 parts that could result in
|
||||
* "stale" data being DMA'ed. It has to do with the snoop logic on the
|
||||
* cache eviction buffer. DMA_NONCOHERENT is on by default for these
|
||||
* parts. If it is fixed in the future, these dma_cache_inv will just
|
||||
* be nothing more than empty macros. See io.h.
|
||||
* There is an erratum on certain Au1200/Au1550 revisions that could
|
||||
* result in "stale" data being DMA'ed. It has to do with the snoop
|
||||
* logic on the cache eviction buffer. dma_default_coherent is set
|
||||
* to false on these parts.
|
||||
*/
|
||||
dma_cache_inv((unsigned long)buf, nbytes);
|
||||
if (!dma_default_coherent)
|
||||
dma_cache_inv(KSEG0ADDR(buf), nbytes);
|
||||
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
||||
wmb(); /* drain writebuffer */
|
||||
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
||||
ctp->chan_ptr->ddma_dbell = 0;
|
||||
wmb(); /* force doorbell write out to dma engine */
|
||||
|
||||
/* Get next descriptor pointer. */
|
||||
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
||||
|
@ -1502,6 +1502,10 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case PRID_IMP_NETLOGIC_AU13XX:
|
||||
c->cputype = CPU_ALCHEMY;
|
||||
__cpu_name[cpu] = "Au1300";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1861,6 +1865,7 @@ void cpu_probe(void)
|
||||
cpu_probe_mips(c, cpu);
|
||||
break;
|
||||
case PRID_COMP_ALCHEMY:
|
||||
case PRID_COMP_NETLOGIC:
|
||||
cpu_probe_alchemy(c, cpu);
|
||||
break;
|
||||
case PRID_COMP_SIBYTE:
|
||||
|
@ -158,10 +158,6 @@ static unsigned long __init init_initrd(void)
|
||||
pr_err("initrd start must be page aligned\n");
|
||||
goto disable;
|
||||
}
|
||||
if (initrd_start < PAGE_OFFSET) {
|
||||
pr_err("initrd start < PAGE_OFFSET\n");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitize initrd addresses. For example firmware
|
||||
@ -174,6 +170,11 @@ static unsigned long __init init_initrd(void)
|
||||
initrd_end = (unsigned long)__va(end);
|
||||
initrd_start = (unsigned long)__va(__pa(initrd_start));
|
||||
|
||||
if (initrd_start < PAGE_OFFSET) {
|
||||
pr_err("initrd start < PAGE_OFFSET\n");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
ROOT_DEV = Root_RAM0;
|
||||
return PFN_UP(end);
|
||||
disable:
|
||||
|
@ -25,7 +25,7 @@ cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f
|
||||
# binutils does not merge support for the flag then we can revisit & remove
|
||||
# this later - for now it ensures vendor toolchains don't cause problems.
|
||||
#
|
||||
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
|
||||
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
|
||||
|
||||
# Enable the workarounds for Loongson2f
|
||||
ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
|
||||
|
@ -97,7 +97,7 @@ rgmii_0_eth_tse_0: ethernet@400 {
|
||||
rx-fifo-depth = <8192>;
|
||||
tx-fifo-depth = <8192>;
|
||||
address-bits = <48>;
|
||||
max-frame-size = <1518>;
|
||||
max-frame-size = <1500>;
|
||||
local-mac-address = [00 00 00 00 00 00];
|
||||
altr,has-supplementary-unicast;
|
||||
altr,enable-sup-addr = <1>;
|
||||
|
@ -106,7 +106,7 @@ tse_mac: ethernet@4000 {
|
||||
interrupt-names = "rx_irq", "tx_irq";
|
||||
rx-fifo-depth = <8192>;
|
||||
tx-fifo-depth = <8192>;
|
||||
max-frame-size = <1518>;
|
||||
max-frame-size = <1500>;
|
||||
local-mac-address = [ 00 00 00 00 00 00 ];
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <&phy0>;
|
||||
|
@ -90,10 +90,6 @@
|
||||
#include <asm/asmregs.h>
|
||||
#include <asm/psw.h>
|
||||
|
||||
sp = 30
|
||||
gp = 27
|
||||
ipsw = 22
|
||||
|
||||
/*
|
||||
* We provide two versions of each macro to convert from physical
|
||||
* to virtual and vice versa. The "_r1" versions take one argument
|
||||
|
@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/*
|
||||
* fdc: The data cache line is written back to memory, if and only if
|
||||
* it is dirty, and then invalidated from the data cache.
|
||||
*/
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
unsigned long addr = (unsigned long) phys_to_virt(paddr);
|
||||
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
flush_kernel_dcache_range(addr, size);
|
||||
return;
|
||||
case DMA_FROM_DEVICE:
|
||||
purge_kernel_dcache_range_asm(addr, addr + size);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,11 @@ KASAN_SANITIZE := n
|
||||
|
||||
targets += trampoline_$(BITS).o purgatory.ro
|
||||
|
||||
# When profile-guided optimization is enabled, llvm emits two different
|
||||
# overlapping text sections, which is not supported by kexec. Remove profile
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
|
||||
|
||||
$(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE
|
||||
|
@ -25,6 +25,11 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
CFLAGS_string.o := -D__DISABLE_EXPORTS
|
||||
CFLAGS_ctype.o := -D__DISABLE_EXPORTS
|
||||
|
||||
# When profile-guided optimization is enabled, llvm emits two different
|
||||
# overlapping text sections, which is not supported by kexec. Remove profile
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
|
||||
|
@ -50,7 +50,7 @@ KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
# Disable relocation relaxation in case the link is not PIE.
|
||||
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
||||
KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no)
|
||||
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
|
||||
|
||||
# sev.c indirectly inludes inat-table.h which is generated during
|
||||
|
@ -85,6 +85,15 @@ SYM_CODE_START_NOALIGN(startup_64)
|
||||
call startup_64_setup_env
|
||||
popq %rsi
|
||||
|
||||
/* Now switch to __KERNEL_CS so IRET works reliably */
|
||||
pushq $__KERNEL_CS
|
||||
leaq .Lon_kernel_cs(%rip), %rax
|
||||
pushq %rax
|
||||
lretq
|
||||
|
||||
.Lon_kernel_cs:
|
||||
UNWIND_HINT_EMPTY
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
/*
|
||||
* Activate SEV/SME memory encryption if supported/enabled. This needs to
|
||||
@ -98,15 +107,6 @@ SYM_CODE_START_NOALIGN(startup_64)
|
||||
popq %rsi
|
||||
#endif
|
||||
|
||||
/* Now switch to __KERNEL_CS so IRET works reliably */
|
||||
pushq $__KERNEL_CS
|
||||
leaq .Lon_kernel_cs(%rip), %rax
|
||||
pushq %rax
|
||||
lretq
|
||||
|
||||
.Lon_kernel_cs:
|
||||
UNWIND_HINT_EMPTY
|
||||
|
||||
/* Sanitize CPU configuration */
|
||||
call verify_cpu
|
||||
|
||||
|
@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
|
||||
|
||||
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
|
||||
# When profile-guided optimization is enabled, llvm emits two different
|
||||
# overlapping text sections, which is not supported by kexec. Remove profile
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
|
||||
|
@ -780,7 +780,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||
ring_req->u.rw.handle = info->handle;
|
||||
ring_req->operation = rq_data_dir(req) ?
|
||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||
if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
|
||||
if (req_op(req) == REQ_OP_FLUSH ||
|
||||
(req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) {
|
||||
/*
|
||||
* Ideally we can do an unordered flush-to-disk.
|
||||
* In case the backend onlysupports barriers, use that.
|
||||
|
@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem)
|
||||
{
|
||||
struct _parisc_agp_info *info = &parisc_agp_info;
|
||||
|
||||
/* force fdc ops to be visible to IOMMU */
|
||||
asm_io_sync();
|
||||
|
||||
writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
|
||||
readq(info->ioc_regs+IOC_PCOM); /* flush */
|
||||
}
|
||||
@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
|
||||
info->gatt[j] =
|
||||
parisc_agp_mask_memory(agp_bridge,
|
||||
paddr, type);
|
||||
asm_io_fdc(&info->gatt[j]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,7 +195,16 @@ static unsigned long
|
||||
parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
|
||||
int type)
|
||||
{
|
||||
return SBA_PDIR_VALID_BIT | addr;
|
||||
unsigned ci; /* coherent index */
|
||||
dma_addr_t pa;
|
||||
|
||||
pa = addr & IOVP_MASK;
|
||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
|
||||
|
||||
pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
|
||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
|
||||
return cpu_to_le64(pa);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -164,7 +164,7 @@ void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask)
|
||||
accr &= ~disable;
|
||||
accr |= enable;
|
||||
|
||||
writel(accr, ACCR);
|
||||
writel(accr, clk_regs + ACCR);
|
||||
if (xclkcfg)
|
||||
__asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
|
||||
|
||||
|
@ -21,30 +21,9 @@
|
||||
#define TRP_SYN_REG_CNT 6
|
||||
#define DRP_SYN_REG_CNT 8
|
||||
|
||||
#define LLCC_COMMON_STATUS0 0x0003000c
|
||||
#define LLCC_LB_CNT_MASK GENMASK(31, 28)
|
||||
#define LLCC_LB_CNT_SHIFT 28
|
||||
|
||||
/* Single & double bit syndrome register offsets */
|
||||
#define TRP_ECC_SB_ERR_SYN0 0x0002304c
|
||||
#define TRP_ECC_DB_ERR_SYN0 0x00020370
|
||||
#define DRP_ECC_SB_ERR_SYN0 0x0004204c
|
||||
#define DRP_ECC_DB_ERR_SYN0 0x00042070
|
||||
|
||||
/* Error register offsets */
|
||||
#define TRP_ECC_ERROR_STATUS1 0x00020348
|
||||
#define TRP_ECC_ERROR_STATUS0 0x00020344
|
||||
#define DRP_ECC_ERROR_STATUS1 0x00042048
|
||||
#define DRP_ECC_ERROR_STATUS0 0x00042044
|
||||
|
||||
/* TRP, DRP interrupt register offsets */
|
||||
#define DRP_INTERRUPT_STATUS 0x00041000
|
||||
#define TRP_INTERRUPT_0_STATUS 0x00020480
|
||||
#define DRP_INTERRUPT_CLEAR 0x00041008
|
||||
#define DRP_ECC_ERROR_CNTR_CLEAR 0x00040004
|
||||
#define TRP_INTERRUPT_0_CLEAR 0x00020484
|
||||
#define TRP_ECC_ERROR_CNTR_CLEAR 0x00020440
|
||||
|
||||
/* Mask and shift macros */
|
||||
#define ECC_DB_ERR_COUNT_MASK GENMASK(4, 0)
|
||||
#define ECC_DB_ERR_WAYS_MASK GENMASK(31, 16)
|
||||
@ -60,15 +39,6 @@
|
||||
#define DRP_TRP_INT_CLEAR GENMASK(1, 0)
|
||||
#define DRP_TRP_CNT_CLEAR GENMASK(1, 0)
|
||||
|
||||
/* Config registers offsets*/
|
||||
#define DRP_ECC_ERROR_CFG 0x00040000
|
||||
|
||||
/* Tag RAM, Data RAM interrupt register offsets */
|
||||
#define CMN_INTERRUPT_0_ENABLE 0x0003001c
|
||||
#define CMN_INTERRUPT_2_ENABLE 0x0003003c
|
||||
#define TRP_INTERRUPT_0_ENABLE 0x00020488
|
||||
#define DRP_INTERRUPT_ENABLE 0x0004100c
|
||||
|
||||
#define SB_ERROR_THRESHOLD 0x1
|
||||
#define SB_ERROR_THRESHOLD_SHIFT 24
|
||||
#define SB_DB_TRP_INTERRUPT_ENABLE 0x3
|
||||
@ -88,9 +58,6 @@ enum {
|
||||
static const struct llcc_edac_reg_data edac_reg_data[] = {
|
||||
[LLCC_DRAM_CE] = {
|
||||
.name = "DRAM Single-bit",
|
||||
.synd_reg = DRP_ECC_SB_ERR_SYN0,
|
||||
.count_status_reg = DRP_ECC_ERROR_STATUS1,
|
||||
.ways_status_reg = DRP_ECC_ERROR_STATUS0,
|
||||
.reg_cnt = DRP_SYN_REG_CNT,
|
||||
.count_mask = ECC_SB_ERR_COUNT_MASK,
|
||||
.ways_mask = ECC_SB_ERR_WAYS_MASK,
|
||||
@ -98,9 +65,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
|
||||
},
|
||||
[LLCC_DRAM_UE] = {
|
||||
.name = "DRAM Double-bit",
|
||||
.synd_reg = DRP_ECC_DB_ERR_SYN0,
|
||||
.count_status_reg = DRP_ECC_ERROR_STATUS1,
|
||||
.ways_status_reg = DRP_ECC_ERROR_STATUS0,
|
||||
.reg_cnt = DRP_SYN_REG_CNT,
|
||||
.count_mask = ECC_DB_ERR_COUNT_MASK,
|
||||
.ways_mask = ECC_DB_ERR_WAYS_MASK,
|
||||
@ -108,9 +72,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
|
||||
},
|
||||
[LLCC_TRAM_CE] = {
|
||||
.name = "TRAM Single-bit",
|
||||
.synd_reg = TRP_ECC_SB_ERR_SYN0,
|
||||
.count_status_reg = TRP_ECC_ERROR_STATUS1,
|
||||
.ways_status_reg = TRP_ECC_ERROR_STATUS0,
|
||||
.reg_cnt = TRP_SYN_REG_CNT,
|
||||
.count_mask = ECC_SB_ERR_COUNT_MASK,
|
||||
.ways_mask = ECC_SB_ERR_WAYS_MASK,
|
||||
@ -118,9 +79,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
|
||||
},
|
||||
[LLCC_TRAM_UE] = {
|
||||
.name = "TRAM Double-bit",
|
||||
.synd_reg = TRP_ECC_DB_ERR_SYN0,
|
||||
.count_status_reg = TRP_ECC_ERROR_STATUS1,
|
||||
.ways_status_reg = TRP_ECC_ERROR_STATUS0,
|
||||
.reg_cnt = TRP_SYN_REG_CNT,
|
||||
.count_mask = ECC_DB_ERR_COUNT_MASK,
|
||||
.ways_mask = ECC_DB_ERR_WAYS_MASK,
|
||||
@ -128,7 +86,7 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
|
||||
static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap)
|
||||
{
|
||||
u32 sb_err_threshold;
|
||||
int ret;
|
||||
@ -137,31 +95,31 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
|
||||
* Configure interrupt enable registers such that Tag, Data RAM related
|
||||
* interrupts are propagated to interrupt controller for servicing
|
||||
*/
|
||||
ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
|
||||
ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
|
||||
TRP0_INTERRUPT_ENABLE,
|
||||
TRP0_INTERRUPT_ENABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_update_bits(llcc_bcast_regmap, TRP_INTERRUPT_0_ENABLE,
|
||||
ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->trp_interrupt_0_enable,
|
||||
SB_DB_TRP_INTERRUPT_ENABLE,
|
||||
SB_DB_TRP_INTERRUPT_ENABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
|
||||
ret = regmap_write(llcc_bcast_regmap, DRP_ECC_ERROR_CFG,
|
||||
ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_ecc_error_cfg,
|
||||
sb_err_threshold);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
|
||||
ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
|
||||
DRP0_INTERRUPT_ENABLE,
|
||||
DRP0_INTERRUPT_ENABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_write(llcc_bcast_regmap, DRP_INTERRUPT_ENABLE,
|
||||
ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_interrupt_enable,
|
||||
SB_DB_DRP_INTERRUPT_ENABLE);
|
||||
return ret;
|
||||
}
|
||||
@ -175,24 +133,28 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
|
||||
switch (err_type) {
|
||||
case LLCC_DRAM_CE:
|
||||
case LLCC_DRAM_UE:
|
||||
ret = regmap_write(drv->bcast_regmap, DRP_INTERRUPT_CLEAR,
|
||||
ret = regmap_write(drv->bcast_regmap,
|
||||
drv->edac_reg_offset->drp_interrupt_clear,
|
||||
DRP_TRP_INT_CLEAR);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_write(drv->bcast_regmap, DRP_ECC_ERROR_CNTR_CLEAR,
|
||||
ret = regmap_write(drv->bcast_regmap,
|
||||
drv->edac_reg_offset->drp_ecc_error_cntr_clear,
|
||||
DRP_TRP_CNT_CLEAR);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case LLCC_TRAM_CE:
|
||||
case LLCC_TRAM_UE:
|
||||
ret = regmap_write(drv->bcast_regmap, TRP_INTERRUPT_0_CLEAR,
|
||||
ret = regmap_write(drv->bcast_regmap,
|
||||
drv->edac_reg_offset->trp_interrupt_0_clear,
|
||||
DRP_TRP_INT_CLEAR);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_write(drv->bcast_regmap, TRP_ECC_ERROR_CNTR_CLEAR,
|
||||
ret = regmap_write(drv->bcast_regmap,
|
||||
drv->edac_reg_offset->trp_ecc_error_cntr_clear,
|
||||
DRP_TRP_CNT_CLEAR);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -205,17 +167,55 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct qcom_llcc_syn_regs {
|
||||
u32 synd_reg;
|
||||
u32 count_status_reg;
|
||||
u32 ways_status_reg;
|
||||
};
|
||||
|
||||
static void get_reg_offsets(struct llcc_drv_data *drv, int err_type,
|
||||
struct qcom_llcc_syn_regs *syn_regs)
|
||||
{
|
||||
const struct llcc_edac_reg_offset *edac_reg_offset = drv->edac_reg_offset;
|
||||
|
||||
switch (err_type) {
|
||||
case LLCC_DRAM_CE:
|
||||
syn_regs->synd_reg = edac_reg_offset->drp_ecc_sb_err_syn0;
|
||||
syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
|
||||
syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
|
||||
break;
|
||||
case LLCC_DRAM_UE:
|
||||
syn_regs->synd_reg = edac_reg_offset->drp_ecc_db_err_syn0;
|
||||
syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
|
||||
syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
|
||||
break;
|
||||
case LLCC_TRAM_CE:
|
||||
syn_regs->synd_reg = edac_reg_offset->trp_ecc_sb_err_syn0;
|
||||
syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
|
||||
syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
|
||||
break;
|
||||
case LLCC_TRAM_UE:
|
||||
syn_regs->synd_reg = edac_reg_offset->trp_ecc_db_err_syn0;
|
||||
syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
|
||||
syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/
|
||||
static int
|
||||
dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
|
||||
{
|
||||
struct llcc_edac_reg_data reg_data = edac_reg_data[err_type];
|
||||
struct qcom_llcc_syn_regs regs = { };
|
||||
int err_cnt, err_ways, ret, i;
|
||||
u32 synd_reg, synd_val;
|
||||
|
||||
get_reg_offsets(drv, err_type, ®s);
|
||||
|
||||
for (i = 0; i < reg_data.reg_cnt; i++) {
|
||||
synd_reg = reg_data.synd_reg + (i * 4);
|
||||
ret = regmap_read(drv->regmap, drv->offsets[bank] + synd_reg,
|
||||
synd_reg = regs.synd_reg + (i * 4);
|
||||
ret = regmap_read(drv->regmaps[bank], synd_reg,
|
||||
&synd_val);
|
||||
if (ret)
|
||||
goto clear;
|
||||
@ -224,8 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
|
||||
reg_data.name, i, synd_val);
|
||||
}
|
||||
|
||||
ret = regmap_read(drv->regmap,
|
||||
drv->offsets[bank] + reg_data.count_status_reg,
|
||||
ret = regmap_read(drv->regmaps[bank], regs.count_status_reg,
|
||||
&err_cnt);
|
||||
if (ret)
|
||||
goto clear;
|
||||
@ -235,8 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
|
||||
edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
|
||||
reg_data.name, err_cnt);
|
||||
|
||||
ret = regmap_read(drv->regmap,
|
||||
drv->offsets[bank] + reg_data.ways_status_reg,
|
||||
ret = regmap_read(drv->regmaps[bank], regs.ways_status_reg,
|
||||
&err_ways);
|
||||
if (ret)
|
||||
goto clear;
|
||||
@ -297,8 +295,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
|
||||
|
||||
/* Iterate over the banks and look for Tag RAM or Data RAM errors */
|
||||
for (i = 0; i < drv->num_banks; i++) {
|
||||
ret = regmap_read(drv->regmap,
|
||||
drv->offsets[i] + DRP_INTERRUPT_STATUS,
|
||||
ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->drp_interrupt_status,
|
||||
&drp_error);
|
||||
|
||||
if (!ret && (drp_error & SB_ECC_ERROR)) {
|
||||
@ -313,8 +310,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
|
||||
if (!ret)
|
||||
irq_rc = IRQ_HANDLED;
|
||||
|
||||
ret = regmap_read(drv->regmap,
|
||||
drv->offsets[i] + TRP_INTERRUPT_0_STATUS,
|
||||
ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->trp_interrupt_0_status,
|
||||
&trp_error);
|
||||
|
||||
if (!ret && (trp_error & SB_ECC_ERROR)) {
|
||||
@ -346,7 +342,7 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
|
||||
int ecc_irq;
|
||||
int rc;
|
||||
|
||||
rc = qcom_llcc_core_setup(llcc_driv_data->bcast_regmap);
|
||||
rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -1214,7 +1214,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
||||
|
||||
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
|
||||
.fb_create = amdgpu_display_user_framebuffer_create,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
};
|
||||
|
||||
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
|
||||
|
@ -1605,6 +1605,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
|
||||
0x5874,
|
||||
0x5940,
|
||||
0x5941,
|
||||
0x5b70,
|
||||
0x5b72,
|
||||
0x5b73,
|
||||
0x5b74,
|
||||
|
@ -3550,6 +3550,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
|
||||
void *fw_pri_cpu_addr;
|
||||
int ret;
|
||||
|
||||
if (adev->psp.vbflash_image_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
dev_info(adev->dev, "VBIOS flash to PSP started");
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
|
||||
@ -3601,13 +3604,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
|
||||
}
|
||||
|
||||
static const struct bin_attribute psp_vbflash_bin_attr = {
|
||||
.attr = {.name = "psp_vbflash", .mode = 0664},
|
||||
.attr = {.name = "psp_vbflash", .mode = 0660},
|
||||
.size = 0,
|
||||
.write = amdgpu_psp_vbflash_write,
|
||||
.read = amdgpu_psp_vbflash_read,
|
||||
};
|
||||
|
||||
static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL);
|
||||
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
|
||||
|
||||
int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -6969,8 +6969,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
|
||||
if (unlikely(r != 0))
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
return r;
|
||||
}
|
||||
|
||||
gfx_v10_0_kiq_init_queue(ring);
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
|
@ -3650,8 +3650,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
|
||||
if (unlikely(r != 0))
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
return r;
|
||||
}
|
||||
|
||||
gfx_v9_0_kiq_init_queue(ring);
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
|
@ -117,7 +117,11 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
atomic_set(&adev->vcn.inst[i].sched_score, 0);
|
||||
/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
|
||||
if (i == 0)
|
||||
atomic_set(&adev->vcn.inst[i].sched_score, 1);
|
||||
else
|
||||
atomic_set(&adev->vcn.inst[i].sched_score, 0);
|
||||
|
||||
/* VCN UNIFIED TRAP */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
|
||||
|
@ -83,7 +83,6 @@
|
||||
#include <drm/drm_atomic_uapi.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_blend.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
@ -2875,7 +2874,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
|
||||
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
|
||||
.fb_create = amdgpu_display_user_framebuffer_create,
|
||||
.get_format_info = amd_get_format_info,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
.atomic_check = amdgpu_dm_atomic_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
@ -6942,7 +6940,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
||||
drm_add_modes_noedid(connector, 640, 480);
|
||||
} else {
|
||||
amdgpu_dm_connector_ddc_get_modes(connector, edid);
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
/* most eDP supports only timings from its edid,
|
||||
* usually only detailed timings are available
|
||||
* from eDP edid. timings which are not from edid
|
||||
* may damage eDP
|
||||
*/
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
amdgpu_dm_connector_add_freesync_modes(connector, edid);
|
||||
}
|
||||
amdgpu_dm_fbc_init(connector);
|
||||
|
@ -1675,10 +1675,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
|
||||
(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
|
||||
((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_COMPUTE_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
PP_SMC_POWER_PROFILE_CUSTOM);
|
||||
} else {
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
}
|
||||
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
|
||||
if (refclk_lut[i] == refclk_rate)
|
||||
break;
|
||||
|
||||
/* avoid buffer overflow and "1" is the default rate in the datasheet. */
|
||||
if (i >= refclk_lut_size)
|
||||
i = 1;
|
||||
|
||||
regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
|
||||
REFCLK_FREQ(i));
|
||||
|
||||
|
@ -179,7 +179,7 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
}, { /* AYA NEO AIR */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "AIR"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1080x1920_leftside_up,
|
||||
}, { /* AYA NEO NEXT */
|
||||
|
@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
|
||||
int optimus_funcs;
|
||||
struct pci_dev *parent_pdev;
|
||||
|
||||
if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
|
||||
return;
|
||||
|
||||
*has_pr3 = false;
|
||||
parent_pdev = pci_upstream_bridge(pdev);
|
||||
if (parent_pdev) {
|
||||
|
@ -730,7 +730,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
|
||||
#endif
|
||||
|
||||
nouveau_connector_set_edid(nv_connector, edid);
|
||||
nouveau_connector_set_encoder(connector, nv_encoder);
|
||||
if (nv_encoder)
|
||||
nouveau_connector_set_encoder(connector, nv_encoder);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -966,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
|
||||
/* Determine display colour depth for everything except LVDS now,
|
||||
* DP requires this before mode_valid() is called.
|
||||
*/
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
|
||||
nouveau_connector_detect_depth(connector);
|
||||
|
||||
/* Find the native mode if this is a digital panel, if we didn't
|
||||
@ -987,7 +988,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
|
||||
* "native" mode as some VBIOS tables require us to use the
|
||||
* pixel clock as part of the lookup...
|
||||
*/
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
|
||||
nouveau_connector_detect_depth(connector);
|
||||
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
|
||||
|
@ -139,10 +139,16 @@ nouveau_name(struct drm_device *dev)
|
||||
static inline bool
|
||||
nouveau_cli_work_ready(struct dma_fence *fence)
|
||||
{
|
||||
if (!dma_fence_is_signaled(fence))
|
||||
return false;
|
||||
dma_fence_put(fence);
|
||||
return true;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irq(fence->lock);
|
||||
if (!dma_fence_is_signaled_locked(fence))
|
||||
ret = false;
|
||||
spin_unlock_irq(fence->lock);
|
||||
|
||||
if (ret == true)
|
||||
dma_fence_put(fence);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -3293,7 +3293,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
|
||||
route->path_rec->traffic_class = tos;
|
||||
route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
|
||||
route->path_rec->rate_selector = IB_SA_EQ;
|
||||
route->path_rec->rate = iboe_get_rate(ndev);
|
||||
route->path_rec->rate = IB_RATE_PORT_CURRENT;
|
||||
dev_put(ndev);
|
||||
route->path_rec->packet_life_time_selector = IB_SA_EQ;
|
||||
/* In case ACK timeout is set, use this value to calculate
|
||||
@ -4955,7 +4955,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
ib.rec.rate = iboe_get_rate(ndev);
|
||||
ib.rec.rate = IB_RATE_PORT_CURRENT;
|
||||
ib.rec.hop_limit = 1;
|
||||
ib.rec.mtu = iboe_get_mtu(ndev->mtu);
|
||||
|
||||
|
@ -1850,8 +1850,13 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
|
||||
attr->path_mtu = cmd->base.path_mtu;
|
||||
if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
|
||||
attr->path_mig_state = cmd->base.path_mig_state;
|
||||
if (cmd->base.attr_mask & IB_QP_QKEY)
|
||||
if (cmd->base.attr_mask & IB_QP_QKEY) {
|
||||
if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) {
|
||||
ret = -EPERM;
|
||||
goto release_qp;
|
||||
}
|
||||
attr->qkey = cmd->base.qkey;
|
||||
}
|
||||
if (cmd->base.attr_mask & IB_QP_RQ_PSN)
|
||||
attr->rq_psn = cmd->base.rq_psn;
|
||||
if (cmd->base.attr_mask & IB_QP_SQ_PSN)
|
||||
|
@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
|
||||
spin_lock_irq(&ev_queue->lock);
|
||||
|
||||
while (list_empty(&ev_queue->event_list)) {
|
||||
spin_unlock_irq(&ev_queue->lock);
|
||||
if (ev_queue->is_closed) {
|
||||
spin_unlock_irq(&ev_queue->lock);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&ev_queue->lock);
|
||||
if (filp->f_flags & O_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
|
||||
@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
|
||||
return -ERESTARTSYS;
|
||||
|
||||
spin_lock_irq(&ev_queue->lock);
|
||||
|
||||
/* If device was disassociated and no event exists set an error */
|
||||
if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
|
||||
spin_unlock_irq(&ev_queue->lock);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
|
||||
|
@ -696,8 +696,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_flow_table_attr ft_attr = {};
|
||||
struct mlx5_flow_table *ft;
|
||||
|
||||
if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
|
||||
ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
|
||||
ft_attr.prio = priority;
|
||||
ft_attr.max_fte = num_entries;
|
||||
ft_attr.flags = flags;
|
||||
@ -2026,6 +2024,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_flow_prio *ft_prio,
|
||||
enum mlx5_flow_namespace_type ns_type)
|
||||
{
|
||||
struct mlx5_flow_table_attr ft_attr = {};
|
||||
struct mlx5_flow_namespace *ns;
|
||||
struct mlx5_flow_table *ft;
|
||||
|
||||
if (ft_prio->anchor.ft)
|
||||
return 0;
|
||||
|
||||
ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
|
||||
if (!ns)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
|
||||
ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
|
||||
ft_attr.prio = 0;
|
||||
ft_attr.max_fte = 2;
|
||||
ft_attr.level = 1;
|
||||
|
||||
ft = mlx5_create_flow_table(ns, &ft_attr);
|
||||
if (IS_ERR(ft))
|
||||
return PTR_ERR(ft);
|
||||
|
||||
ft_prio->anchor.ft = ft;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
if (ft_prio->anchor.ft) {
|
||||
mlx5_destroy_flow_table(ft_prio->anchor.ft);
|
||||
ft_prio->anchor.ft = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_group *fg;
|
||||
void *flow_group_in;
|
||||
int err = 0;
|
||||
|
||||
if (ft_prio->anchor.fg_drop)
|
||||
return 0;
|
||||
|
||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
||||
|
||||
fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
|
||||
if (IS_ERR(fg)) {
|
||||
err = PTR_ERR(fg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ft_prio->anchor.fg_drop = fg;
|
||||
|
||||
out:
|
||||
kvfree(flow_group_in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
if (ft_prio->anchor.fg_drop) {
|
||||
mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
|
||||
ft_prio->anchor.fg_drop = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_group *fg;
|
||||
void *flow_group_in;
|
||||
int err = 0;
|
||||
|
||||
if (ft_prio->anchor.fg_goto_table)
|
||||
return 0;
|
||||
|
||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
|
||||
fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
|
||||
if (IS_ERR(fg)) {
|
||||
err = PTR_ERR(fg);
|
||||
goto out;
|
||||
}
|
||||
ft_prio->anchor.fg_goto_table = fg;
|
||||
|
||||
out:
|
||||
kvfree(flow_group_in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
if (ft_prio->anchor.fg_goto_table) {
|
||||
mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
|
||||
ft_prio->anchor.fg_goto_table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_handle *handle;
|
||||
|
||||
if (ft_prio->anchor.rule_drop)
|
||||
return 0;
|
||||
|
||||
flow_act.fg = ft_prio->anchor.fg_drop;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
|
||||
handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
|
||||
NULL, 0);
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
|
||||
ft_prio->anchor.rule_drop = handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
if (ft_prio->anchor.rule_drop) {
|
||||
mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
|
||||
ft_prio->anchor.rule_drop = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
struct mlx5_flow_destination dest = {};
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_handle *handle;
|
||||
|
||||
if (ft_prio->anchor.rule_goto_table)
|
||||
return 0;
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
|
||||
flow_act.fg = ft_prio->anchor.fg_goto_table;
|
||||
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest.ft = ft_prio->flow_table;
|
||||
|
||||
handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
|
||||
&dest, 1);
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
|
||||
ft_prio->anchor.rule_goto_table = handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
if (ft_prio->anchor.rule_goto_table) {
|
||||
mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
|
||||
ft_prio->anchor.rule_goto_table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_flow_prio *ft_prio,
|
||||
enum mlx5_flow_namespace_type ns_type)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = steering_anchor_create_ft(dev, ft_prio, ns_type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = steering_anchor_create_fg_drop(ft_prio);
|
||||
if (err)
|
||||
goto destroy_ft;
|
||||
|
||||
err = steering_anchor_create_fg_goto_table(ft_prio);
|
||||
if (err)
|
||||
goto destroy_fg_drop;
|
||||
|
||||
err = steering_anchor_create_rule_drop(ft_prio);
|
||||
if (err)
|
||||
goto destroy_fg_goto_table;
|
||||
|
||||
err = steering_anchor_create_rule_goto_table(ft_prio);
|
||||
if (err)
|
||||
goto destroy_rule_drop;
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_rule_drop:
|
||||
steering_anchor_destroy_rule_drop(ft_prio);
|
||||
destroy_fg_goto_table:
|
||||
steering_anchor_destroy_fg_goto_table(ft_prio);
|
||||
destroy_fg_drop:
|
||||
steering_anchor_destroy_fg_drop(ft_prio);
|
||||
destroy_ft:
|
||||
steering_anchor_destroy_ft(ft_prio);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
|
||||
{
|
||||
steering_anchor_destroy_rule_goto_table(ft_prio);
|
||||
steering_anchor_destroy_rule_drop(ft_prio);
|
||||
steering_anchor_destroy_fg_goto_table(ft_prio);
|
||||
steering_anchor_destroy_fg_drop(ft_prio);
|
||||
steering_anchor_destroy_ft(ft_prio);
|
||||
}
|
||||
|
||||
static int steering_anchor_cleanup(struct ib_uobject *uobject,
|
||||
enum rdma_remove_reason why,
|
||||
struct uverbs_attr_bundle *attrs)
|
||||
@ -2036,6 +2265,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&obj->dev->flow_db->lock);
|
||||
if (!--obj->ft_prio->anchor.rule_goto_table_ref)
|
||||
steering_anchor_destroy_rule_goto_table(obj->ft_prio);
|
||||
|
||||
put_flow_table(obj->dev, obj->ft_prio, true);
|
||||
mutex_unlock(&obj->dev->flow_db->lock);
|
||||
|
||||
@ -2043,6 +2275,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
|
||||
int count)
|
||||
{
|
||||
while (count--)
|
||||
mlx5_steering_anchor_destroy_res(&prio[count]);
|
||||
}
|
||||
|
||||
void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
|
||||
fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
|
||||
fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
|
||||
fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
|
||||
fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
|
||||
fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
|
||||
fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
|
||||
}
|
||||
|
||||
static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
|
||||
struct mlx5_ib_flow_matcher *obj)
|
||||
{
|
||||
@ -2183,21 +2433,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&dev->flow_db->lock);
|
||||
|
||||
ft_prio = _get_flow_table(dev, priority, ns_type, 0);
|
||||
if (IS_ERR(ft_prio)) {
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
err = PTR_ERR(ft_prio);
|
||||
goto free_obj;
|
||||
}
|
||||
|
||||
ft_prio->refcount++;
|
||||
ft_id = mlx5_flow_table_id(ft_prio->flow_table);
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
|
||||
if (!ft_prio->anchor.rule_goto_table_ref) {
|
||||
err = steering_anchor_create_res(dev, ft_prio, ns_type);
|
||||
if (err)
|
||||
goto put_flow_table;
|
||||
}
|
||||
|
||||
ft_prio->anchor.rule_goto_table_ref++;
|
||||
|
||||
ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
|
||||
&ft_id, sizeof(ft_id));
|
||||
if (err)
|
||||
goto put_flow_table;
|
||||
goto destroy_res;
|
||||
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
|
||||
uobj->object = obj;
|
||||
obj->dev = dev;
|
||||
@ -2206,8 +2466,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_res:
|
||||
--ft_prio->anchor.rule_goto_table_ref;
|
||||
mlx5_steering_anchor_destroy_res(ft_prio);
|
||||
put_flow_table:
|
||||
mutex_lock(&dev->flow_db->lock);
|
||||
put_flow_table(dev, ft_prio, true);
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
free_obj:
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
||||
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
|
||||
#else
|
||||
static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
|
||||
mutex_init(&dev->flow_db->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {}
|
||||
#endif
|
||||
|
||||
static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
/* When a steering anchor is created, a special flow table is also
|
||||
* created for the user to reference. Since the user can reference it,
|
||||
* the kernel cannot trust that when the user destroys the steering
|
||||
* anchor, they no longer reference the flow table.
|
||||
*
|
||||
* To address this issue, when a user destroys a steering anchor, only
|
||||
* the flow steering rule in the table is destroyed, but the table
|
||||
* itself is kept to deal with the above scenario. The remaining
|
||||
* resources are only removed when the RDMA device is destroyed, which
|
||||
* is a safe assumption that all references are gone.
|
||||
*/
|
||||
mlx5_ib_fs_cleanup_anchor(dev);
|
||||
kfree(dev->flow_db);
|
||||
}
|
||||
#endif /* _MLX5_IB_FS_H */
|
||||
|
@ -4250,6 +4250,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||
mlx5_ib_stage_post_ib_reg_umr_init,
|
||||
NULL),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
|
||||
mlx5_ib_stage_delay_drop_init,
|
||||
mlx5_ib_stage_delay_drop_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
|
||||
mlx5_ib_restrack_init,
|
||||
NULL),
|
||||
|
@ -233,8 +233,19 @@ enum {
|
||||
#define MLX5_IB_NUM_SNIFFER_FTS 2
|
||||
#define MLX5_IB_NUM_EGRESS_FTS 1
|
||||
#define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
|
||||
|
||||
struct mlx5_ib_anchor {
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_group *fg_goto_table;
|
||||
struct mlx5_flow_group *fg_drop;
|
||||
struct mlx5_flow_handle *rule_goto_table;
|
||||
struct mlx5_flow_handle *rule_drop;
|
||||
unsigned int rule_goto_table_ref;
|
||||
};
|
||||
|
||||
struct mlx5_ib_flow_prio {
|
||||
struct mlx5_flow_table *flow_table;
|
||||
struct mlx5_ib_anchor anchor;
|
||||
unsigned int refcount;
|
||||
};
|
||||
|
||||
@ -1553,6 +1564,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
|
||||
MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
|
||||
return 0;
|
||||
|
||||
if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
|
||||
return 0;
|
||||
|
||||
return dev->lag_active ||
|
||||
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
|
||||
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
|
||||
|
@ -1156,6 +1156,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
|
||||
|
||||
MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
|
||||
MLX5_SET(tisc, tisc, transport_domain, tdn);
|
||||
if (!mlx5_ib_lag_should_assign_affinity(dev) &&
|
||||
mlx5_lag_is_lacp_owner(dev->mdev))
|
||||
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
|
||||
if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
|
||||
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
|
||||
|
||||
|
@ -156,6 +156,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
pkt->mask = RXE_GRH_MASK;
|
||||
pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
|
||||
|
||||
/* remove udp header */
|
||||
skb_pull(skb, sizeof(struct udphdr));
|
||||
|
||||
rxe_rcv(skb);
|
||||
|
||||
return 0;
|
||||
@ -397,6 +400,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* remove udp header */
|
||||
skb_pull(skb, sizeof(struct udphdr));
|
||||
|
||||
rxe_rcv(skb);
|
||||
|
||||
return 0;
|
||||
|
@ -180,6 +180,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
spin_lock_init(&qp->rq.producer_lock);
|
||||
spin_lock_init(&qp->rq.consumer_lock);
|
||||
|
||||
skb_queue_head_init(&qp->req_pkts);
|
||||
skb_queue_head_init(&qp->resp_pkts);
|
||||
|
||||
atomic_set(&qp->ssn, 0);
|
||||
atomic_set(&qp->skb_out, 0);
|
||||
}
|
||||
@ -240,12 +243,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
qp->req.opcode = -1;
|
||||
qp->comp.opcode = -1;
|
||||
|
||||
skb_queue_head_init(&qp->req_pkts);
|
||||
|
||||
rxe_init_task(&qp->req.task, qp,
|
||||
rxe_requester, "req");
|
||||
rxe_init_task(&qp->comp.task, qp,
|
||||
rxe_completer, "comp");
|
||||
rxe_init_task(&qp->req.task, qp, rxe_requester);
|
||||
rxe_init_task(&qp->comp.task, qp, rxe_completer);
|
||||
|
||||
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
|
||||
if (init->qp_type == IB_QPT_RC) {
|
||||
@ -290,10 +289,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
}
|
||||
}
|
||||
|
||||
skb_queue_head_init(&qp->resp_pkts);
|
||||
|
||||
rxe_init_task(&qp->resp.task, qp,
|
||||
rxe_responder, "resp");
|
||||
rxe_init_task(&qp->resp.task, qp, rxe_responder);
|
||||
|
||||
qp->resp.opcode = OPCODE_NONE;
|
||||
qp->resp.msn = 0;
|
||||
|
@ -466,8 +466,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
|
||||
if (mw->access & IB_ZERO_BASED)
|
||||
qp->resp.offset = mw->addr;
|
||||
|
||||
rxe_put(mw);
|
||||
rxe_get(mr);
|
||||
rxe_put(mw);
|
||||
mw = NULL;
|
||||
} else {
|
||||
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
|
||||
if (!mr) {
|
||||
|
@ -94,12 +94,10 @@ void rxe_do_task(struct tasklet_struct *t)
|
||||
task->ret = ret;
|
||||
}
|
||||
|
||||
int rxe_init_task(struct rxe_task *task,
|
||||
void *arg, int (*func)(void *), char *name)
|
||||
int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
|
||||
{
|
||||
task->arg = arg;
|
||||
task->func = func;
|
||||
snprintf(task->name, sizeof(task->name), "%s", name);
|
||||
task->destroyed = false;
|
||||
|
||||
tasklet_setup(&task->tasklet, rxe_do_task);
|
||||
|
@ -25,7 +25,6 @@ struct rxe_task {
|
||||
void *arg;
|
||||
int (*func)(void *arg);
|
||||
int ret;
|
||||
char name[16];
|
||||
bool destroyed;
|
||||
};
|
||||
|
||||
@ -34,8 +33,7 @@ struct rxe_task {
|
||||
* arg => parameter to pass to fcn
|
||||
* func => function to call until it returns != 0
|
||||
*/
|
||||
int rxe_init_task(struct rxe_task *task,
|
||||
void *arg, int (*func)(void *), char *name);
|
||||
int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *));
|
||||
|
||||
/* cleanup task */
|
||||
void rxe_cleanup_task(struct rxe_task *task);
|
||||
|
@ -657,9 +657,13 @@ static int
|
||||
isert_connect_error(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
||||
struct isert_np *isert_np = cma_id->context;
|
||||
|
||||
ib_drain_qp(isert_conn->qp);
|
||||
|
||||
mutex_lock(&isert_np->mutex);
|
||||
list_del_init(&isert_conn->node);
|
||||
mutex_unlock(&isert_np->mutex);
|
||||
isert_conn->cm_id = NULL;
|
||||
isert_put_conn(isert_conn);
|
||||
|
||||
@ -2432,6 +2436,7 @@ isert_free_np(struct iscsi_np *np)
|
||||
{
|
||||
struct isert_np *isert_np = np->np_context;
|
||||
struct isert_conn *isert_conn, *n;
|
||||
LIST_HEAD(drop_conn_list);
|
||||
|
||||
if (isert_np->cm_id)
|
||||
rdma_destroy_id(isert_np->cm_id);
|
||||
@ -2451,7 +2456,7 @@ isert_free_np(struct iscsi_np *np)
|
||||
node) {
|
||||
isert_info("cleaning isert_conn %p state (%d)\n",
|
||||
isert_conn, isert_conn->state);
|
||||
isert_connect_release(isert_conn);
|
||||
list_move_tail(&isert_conn->node, &drop_conn_list);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2462,11 +2467,16 @@ isert_free_np(struct iscsi_np *np)
|
||||
node) {
|
||||
isert_info("cleaning isert_conn %p state (%d)\n",
|
||||
isert_conn, isert_conn->state);
|
||||
isert_connect_release(isert_conn);
|
||||
list_move_tail(&isert_conn->node, &drop_conn_list);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&isert_np->mutex);
|
||||
|
||||
list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
|
||||
list_del_init(&isert_conn->node);
|
||||
isert_connect_release(isert_conn);
|
||||
}
|
||||
|
||||
np->np_context = NULL;
|
||||
kfree(isert_np);
|
||||
}
|
||||
@ -2561,8 +2571,6 @@ static void isert_wait_conn(struct iscsit_conn *conn)
|
||||
isert_put_unsol_pending_cmds(conn);
|
||||
isert_wait4cmds(conn);
|
||||
isert_wait4logout(isert_conn);
|
||||
|
||||
queue_work(isert_release_wq, &isert_conn->release_work);
|
||||
}
|
||||
|
||||
static void isert_free_conn(struct iscsit_conn *conn)
|
||||
|
@ -2042,6 +2042,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The caller should do the cleanup in case of error */
|
||||
static int create_cm(struct rtrs_clt_con *con)
|
||||
{
|
||||
struct rtrs_path *s = con->c.path;
|
||||
@ -2064,14 +2065,14 @@ static int create_cm(struct rtrs_clt_con *con)
|
||||
err = rdma_set_reuseaddr(cm_id, 1);
|
||||
if (err != 0) {
|
||||
rtrs_err(s, "Set address reuse failed, err: %d\n", err);
|
||||
goto destroy_cm;
|
||||
return err;
|
||||
}
|
||||
err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
|
||||
(struct sockaddr *)&clt_path->s.dst_addr,
|
||||
RTRS_CONNECT_TIMEOUT_MS);
|
||||
if (err) {
|
||||
rtrs_err(s, "Failed to resolve address, err: %d\n", err);
|
||||
goto destroy_cm;
|
||||
return err;
|
||||
}
|
||||
/*
|
||||
* Combine connection status and session events. This is needed
|
||||
@ -2086,29 +2087,15 @@ static int create_cm(struct rtrs_clt_con *con)
|
||||
if (err == 0)
|
||||
err = -ETIMEDOUT;
|
||||
/* Timedout or interrupted */
|
||||
goto errr;
|
||||
return err;
|
||||
}
|
||||
if (con->cm_err < 0) {
|
||||
err = con->cm_err;
|
||||
goto errr;
|
||||
}
|
||||
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
|
||||
if (con->cm_err < 0)
|
||||
return con->cm_err;
|
||||
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
|
||||
/* Device removal */
|
||||
err = -ECONNABORTED;
|
||||
goto errr;
|
||||
}
|
||||
return -ECONNABORTED;
|
||||
|
||||
return 0;
|
||||
|
||||
errr:
|
||||
stop_cm(con);
|
||||
mutex_lock(&con->con_mutex);
|
||||
destroy_con_cq_qp(con);
|
||||
mutex_unlock(&con->con_mutex);
|
||||
destroy_cm:
|
||||
destroy_cm(con);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
|
||||
@ -2336,7 +2323,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
|
||||
static int init_conns(struct rtrs_clt_path *clt_path)
|
||||
{
|
||||
unsigned int cid;
|
||||
int err;
|
||||
int err, i;
|
||||
|
||||
/*
|
||||
* On every new session connections increase reconnect counter
|
||||
@ -2352,10 +2339,8 @@ static int init_conns(struct rtrs_clt_path *clt_path)
|
||||
goto destroy;
|
||||
|
||||
err = create_cm(to_clt_con(clt_path->s.con[cid]));
|
||||
if (err) {
|
||||
destroy_con(to_clt_con(clt_path->s.con[cid]));
|
||||
if (err)
|
||||
goto destroy;
|
||||
}
|
||||
}
|
||||
err = alloc_path_reqs(clt_path);
|
||||
if (err)
|
||||
@ -2366,15 +2351,21 @@ static int init_conns(struct rtrs_clt_path *clt_path)
|
||||
return 0;
|
||||
|
||||
destroy:
|
||||
while (cid--) {
|
||||
struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
|
||||
/* Make sure we do the cleanup in the order they are created */
|
||||
for (i = 0; i <= cid; i++) {
|
||||
struct rtrs_clt_con *con;
|
||||
|
||||
stop_cm(con);
|
||||
if (!clt_path->s.con[i])
|
||||
break;
|
||||
|
||||
mutex_lock(&con->con_mutex);
|
||||
destroy_con_cq_qp(con);
|
||||
mutex_unlock(&con->con_mutex);
|
||||
destroy_cm(con);
|
||||
con = to_clt_con(clt_path->s.con[i]);
|
||||
if (con->c.cm_id) {
|
||||
stop_cm(con);
|
||||
mutex_lock(&con->con_mutex);
|
||||
destroy_con_cq_qp(con);
|
||||
mutex_unlock(&con->con_mutex);
|
||||
destroy_cm(con);
|
||||
}
|
||||
destroy_con(con);
|
||||
}
|
||||
/*
|
||||
|
@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
|
||||
goto err;
|
||||
|
||||
iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
|
||||
if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
|
||||
if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
|
||||
kfree(iu->buf);
|
||||
goto err;
|
||||
}
|
||||
|
||||
iu->cqe.done = done;
|
||||
iu->size = size;
|
||||
|
@ -16,7 +16,13 @@ void gic_enable_of_quirks(const struct device_node *np,
|
||||
const struct gic_quirk *quirks, void *data)
|
||||
{
|
||||
for (; quirks->desc; quirks++) {
|
||||
if (!of_device_is_compatible(np, quirks->compatible))
|
||||
if (!quirks->compatible && !quirks->property)
|
||||
continue;
|
||||
if (quirks->compatible &&
|
||||
!of_device_is_compatible(np, quirks->compatible))
|
||||
continue;
|
||||
if (quirks->property &&
|
||||
!of_property_read_bool(np, quirks->property))
|
||||
continue;
|
||||
if (quirks->init(data))
|
||||
pr_info("GIC: enabling workaround for %s\n",
|
||||
@ -28,7 +34,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
|
||||
void *data)
|
||||
{
|
||||
for (; quirks->desc; quirks++) {
|
||||
if (quirks->compatible)
|
||||
if (quirks->compatible || quirks->property)
|
||||
continue;
|
||||
if (quirks->iidr != (quirks->mask & iidr))
|
||||
continue;
|
||||
|
@ -13,6 +13,7 @@
|
||||
struct gic_quirk {
|
||||
const char *desc;
|
||||
const char *compatible;
|
||||
const char *property;
|
||||
bool (*init)(void *data);
|
||||
u32 iidr;
|
||||
u32 mask;
|
||||
|
@ -38,6 +38,7 @@
|
||||
|
||||
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
|
||||
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
|
||||
#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
|
||||
|
||||
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
|
||||
|
||||
@ -1718,6 +1719,15 @@ static bool gic_enable_quirk_msm8996(void *data)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool gic_enable_quirk_mtk_gicr(void *data)
|
||||
{
|
||||
struct gic_chip_data *d = data;
|
||||
|
||||
d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool gic_enable_quirk_cavium_38539(void *data)
|
||||
{
|
||||
struct gic_chip_data_v3 *d = data;
|
||||
@ -1753,6 +1763,11 @@ static const struct gic_quirk gic_quirks[] = {
|
||||
.compatible = "qcom,msm8996-gic-v3",
|
||||
.init = gic_enable_quirk_msm8996,
|
||||
},
|
||||
{
|
||||
.desc = "GICv3: Mediatek Chromebook GICR save problem",
|
||||
.property = "mediatek,broken-save-restore-fw",
|
||||
.init = gic_enable_quirk_mtk_gicr,
|
||||
},
|
||||
{
|
||||
.desc = "GICv3: HIP06 erratum 161010803",
|
||||
.iidr = 0x0204043b,
|
||||
@ -1789,6 +1804,11 @@ static void gic_enable_nmi_support(void)
|
||||
if (!gic_prio_masking_enabled())
|
||||
return;
|
||||
|
||||
if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
|
||||
pr_warn("Skipping NMI enable due to firmware issues\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
|
||||
if (!ppi_nmi_refs)
|
||||
return;
|
||||
|
@ -150,7 +150,7 @@ static const struct meson_gpio_irq_params s4_params = {
|
||||
INIT_MESON_S4_COMMON_DATA(82)
|
||||
};
|
||||
|
||||
static const struct of_device_id meson_irq_gpio_matches[] = {
|
||||
static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
|
||||
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
|
||||
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
|
||||
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
|
||||
|
@ -1151,13 +1151,10 @@ static int do_resume(struct dm_ioctl *param)
|
||||
/* Do we need to load a new map ? */
|
||||
if (new_map) {
|
||||
sector_t old_size, new_size;
|
||||
int srcu_idx;
|
||||
|
||||
/* Suspend if it isn't already suspended */
|
||||
old_map = dm_get_live_table(md, &srcu_idx);
|
||||
if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
|
||||
if (param->flags & DM_SKIP_LOCKFS_FLAG)
|
||||
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
if (param->flags & DM_NOFLUSH_FLAG)
|
||||
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
|
||||
if (!dm_suspended_md(md))
|
||||
|
@ -1750,13 +1750,15 @@ int dm_thin_remove_range(struct dm_thin_device *td,
|
||||
|
||||
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
||||
{
|
||||
int r;
|
||||
int r = -EINVAL;
|
||||
uint32_t ref_count;
|
||||
|
||||
down_read(&pmd->root_lock);
|
||||
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
||||
if (!r)
|
||||
*result = (ref_count > 1);
|
||||
if (!pmd->fail_io) {
|
||||
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
||||
if (!r)
|
||||
*result = (ref_count > 1);
|
||||
}
|
||||
up_read(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
@ -1764,10 +1766,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *re
|
||||
|
||||
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
|
||||
{
|
||||
int r = 0;
|
||||
int r = -EINVAL;
|
||||
|
||||
pmd_write_lock(pmd);
|
||||
r = dm_sm_inc_blocks(pmd->data_sm, b, e);
|
||||
if (!pmd->fail_io)
|
||||
r = dm_sm_inc_blocks(pmd->data_sm, b, e);
|
||||
pmd_write_unlock(pmd);
|
||||
|
||||
return r;
|
||||
@ -1775,10 +1778,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
|
||||
|
||||
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
|
||||
{
|
||||
int r = 0;
|
||||
int r = -EINVAL;
|
||||
|
||||
pmd_write_lock(pmd);
|
||||
r = dm_sm_dec_blocks(pmd->data_sm, b, e);
|
||||
if (!pmd->fail_io)
|
||||
r = dm_sm_dec_blocks(pmd->data_sm, b, e);
|
||||
pmd_write_unlock(pmd);
|
||||
|
||||
return r;
|
||||
|
@ -398,8 +398,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
|
||||
sector_t s = block_to_sectors(tc->pool, data_b);
|
||||
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
|
||||
|
||||
return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
|
||||
&op->bio);
|
||||
return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
|
||||
}
|
||||
|
||||
static void end_discard(struct discard_op *op, int r)
|
||||
|
@ -2801,6 +2801,10 @@ int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
|
||||
}
|
||||
|
||||
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
||||
if (!map) {
|
||||
/* avoid deadlock with fs/namespace.c:do_mount() */
|
||||
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
|
||||
}
|
||||
|
||||
r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
|
||||
if (r)
|
||||
|
@ -1253,7 +1253,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
||||
/* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
|
||||
* 4 octets FCS, 12 octets IFG.
|
||||
*/
|
||||
needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
|
||||
needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
|
||||
|
||||
dev_dbg(ocelot->dev,
|
||||
"port %d: max frame size %d needs %llu ps at speed %d\n",
|
||||
|
@ -189,8 +189,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
|
||||
int bw_sum = 0;
|
||||
u8 bw;
|
||||
|
||||
prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
|
||||
prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
|
||||
prio_top = tc_nums - 1;
|
||||
prio_next = tc_nums - 2;
|
||||
|
||||
/* Support highest prio and second prio tc in cbs mode */
|
||||
if (tc != prio_top && tc != prio_next)
|
||||
|
@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
|
||||
void iavf_update_stats(struct iavf_adapter *adapter);
|
||||
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
|
||||
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
|
||||
void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
|
||||
void iavf_irq_enable_queues(struct iavf_adapter *adapter);
|
||||
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
|
||||
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
|
||||
|
||||
|
@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_irq_enable_queues - Enable interrupt for specified queues
|
||||
* iavf_irq_enable_queues - Enable interrupt for all queues
|
||||
* @adapter: board private structure
|
||||
* @mask: bitmap of queues to enable
|
||||
**/
|
||||
void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
|
||||
void iavf_irq_enable_queues(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
int i;
|
||||
|
||||
for (i = 1; i < adapter->num_msix_vectors; i++) {
|
||||
if (mask & BIT(i - 1)) {
|
||||
wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
|
||||
IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
|
||||
}
|
||||
wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
|
||||
IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
|
||||
iavf_misc_irq_enable(adapter);
|
||||
iavf_irq_enable_queues(adapter, ~0);
|
||||
iavf_irq_enable_queues(adapter);
|
||||
|
||||
if (flush)
|
||||
iavf_flush(hw);
|
||||
|
@ -40,7 +40,7 @@
|
||||
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
|
||||
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
|
||||
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
|
||||
#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
|
||||
#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
|
||||
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
|
||||
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
|
||||
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
|
||||
|
@ -6784,6 +6784,10 @@ int ice_down(struct ice_vsi *vsi)
|
||||
ice_for_each_txq(vsi, i)
|
||||
ice_clean_tx_ring(vsi->tx_rings[i]);
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
ice_for_each_xdp_txq(vsi, i)
|
||||
ice_clean_tx_ring(vsi->xdp_rings[i]);
|
||||
|
||||
ice_for_each_rxq(vsi, i)
|
||||
ice_clean_rx_ring(vsi->rx_rings[i]);
|
||||
|
||||
|
@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
|
||||
*/
|
||||
ret_val = hw->nvm.ops.read(hw, last_word, 1,
|
||||
&eeprom_buff[last_word - first_word]);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Device's eeprom is always little-endian, word addressable */
|
||||
@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
|
||||
hw->nvm.ops.update(hw);
|
||||
|
||||
igb_set_fw_version(adapter);
|
||||
out:
|
||||
kfree(eeprom_buff);
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -6893,6 +6893,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct ptp_clock_event event;
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
|
||||
if (pin < 0 || pin >= IGB_N_SDP)
|
||||
return;
|
||||
@ -6900,9 +6901,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
if (hw->mac.type == e1000_82580 ||
|
||||
hw->mac.type == e1000_i354 ||
|
||||
hw->mac.type == e1000_i350) {
|
||||
s64 ns = rd32(auxstmpl);
|
||||
u64 ns = rd32(auxstmpl);
|
||||
|
||||
ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
|
||||
ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
ns = timecounter_cyc2time(&adapter->tc, ns);
|
||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||
ts = ns_to_timespec64(ns);
|
||||
} else {
|
||||
ts.tv_nsec = rd32(auxstmpl);
|
||||
|
@ -255,6 +255,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
|
||||
/* reset BQL for queue */
|
||||
netdev_tx_reset_queue(txring_txq(tx_ring));
|
||||
|
||||
/* Zero out the buffer ring */
|
||||
memset(tx_ring->tx_buffer_info, 0,
|
||||
sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
|
||||
|
||||
/* Zero out the descriptor ring */
|
||||
memset(tx_ring->desc, 0, tx_ring->size);
|
||||
|
||||
/* reset next_to_use and next_to_clean */
|
||||
tx_ring->next_to_use = 0;
|
||||
tx_ring->next_to_clean = 0;
|
||||
@ -268,7 +275,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
|
||||
*/
|
||||
void igc_free_tx_resources(struct igc_ring *tx_ring)
|
||||
{
|
||||
igc_clean_tx_ring(tx_ring);
|
||||
igc_disable_tx_ring(tx_ring);
|
||||
|
||||
vfree(tx_ring->tx_buffer_info);
|
||||
tx_ring->tx_buffer_info = NULL;
|
||||
@ -6678,6 +6685,9 @@ static void igc_remove(struct pci_dev *pdev)
|
||||
|
||||
igc_ptp_stop(adapter);
|
||||
|
||||
pci_disable_ptm(pdev);
|
||||
pci_clear_master(pdev);
|
||||
|
||||
set_bit(__IGC_DOWN, &adapter->state);
|
||||
|
||||
del_timer_sync(&adapter->watchdog_timer);
|
||||
|
@ -928,6 +928,9 @@ int octep_device_setup(struct octep_device *oct)
|
||||
oct->mmio[i].hw_addr =
|
||||
ioremap(pci_resource_start(oct->pdev, i * 2),
|
||||
pci_resource_len(oct->pdev, i * 2));
|
||||
if (!oct->mmio[i].hw_addr)
|
||||
goto unmap_prev;
|
||||
|
||||
oct->mmio[i].mapped = 1;
|
||||
}
|
||||
|
||||
@ -966,7 +969,9 @@ int octep_device_setup(struct octep_device *oct)
|
||||
return 0;
|
||||
|
||||
unsupported_dev:
|
||||
for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
|
||||
i = OCTEP_MMIO_REGIONS;
|
||||
unmap_prev:
|
||||
while (i--)
|
||||
iounmap(oct->mmio[i].hw_addr);
|
||||
|
||||
kfree(oct->conf);
|
||||
|
@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
|
||||
free_cnt = rvu_rsrc_free_count(&txsch->schq);
|
||||
}
|
||||
|
||||
if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
|
||||
if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
|
||||
req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
|
||||
return NIX_AF_ERR_TLX_ALLOC_FAIL;
|
||||
|
||||
/* If contiguous queues are needed, check for availability */
|
||||
@ -4069,10 +4070,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
|
||||
|
||||
static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
|
||||
{
|
||||
/* CN10k supports 72KB FIFO size and max packet size of 64k */
|
||||
if (rvu->hw->lbk_bufsize == 0x12000)
|
||||
return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
|
||||
|
||||
return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
|
||||
}
|
||||
|
||||
|
@ -1168,10 +1168,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
|
||||
{
|
||||
struct npc_exact_table *table;
|
||||
u16 *cnt, old_cnt;
|
||||
bool promisc;
|
||||
|
||||
table = rvu->hw->table;
|
||||
promisc = table->promisc_mode[drop_mcam_idx];
|
||||
|
||||
cnt = &table->cnt_cmd_rules[drop_mcam_idx];
|
||||
old_cnt = *cnt;
|
||||
@ -1183,16 +1181,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
|
||||
|
||||
*enable_or_disable_cam = false;
|
||||
|
||||
if (promisc)
|
||||
goto done;
|
||||
|
||||
/* If all rules are deleted and not already in promisc mode; disable cam */
|
||||
/* If all rules are deleted, disable cam */
|
||||
if (!*cnt && val < 0) {
|
||||
*enable_or_disable_cam = true;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* If rule got added and not already in promisc mode; enable cam */
|
||||
/* If rule got added, enable cam */
|
||||
if (!old_cnt && val > 0) {
|
||||
*enable_or_disable_cam = true;
|
||||
goto done;
|
||||
@ -1447,7 +1442,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
|
||||
u32 drop_mcam_idx;
|
||||
bool *promisc;
|
||||
bool rc;
|
||||
u32 cnt;
|
||||
|
||||
table = rvu->hw->table;
|
||||
|
||||
@ -1470,17 +1464,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
|
||||
return LMAC_AF_ERR_INVALID_PARAM;
|
||||
}
|
||||
*promisc = false;
|
||||
cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
|
||||
mutex_unlock(&table->lock);
|
||||
|
||||
/* If no dmac filter entries configured, disable drop rule */
|
||||
if (!cnt)
|
||||
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
|
||||
else
|
||||
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
|
||||
|
||||
dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
|
||||
__func__, cgx_id, lmac_id, cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1498,7 +1483,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
|
||||
u32 drop_mcam_idx;
|
||||
bool *promisc;
|
||||
bool rc;
|
||||
u32 cnt;
|
||||
|
||||
table = rvu->hw->table;
|
||||
|
||||
@ -1521,17 +1505,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
|
||||
return LMAC_AF_ERR_INVALID_PARAM;
|
||||
}
|
||||
*promisc = true;
|
||||
cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
|
||||
mutex_unlock(&table->lock);
|
||||
|
||||
/* If no dmac filter entries configured, disable drop rule */
|
||||
if (!cnt)
|
||||
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
|
||||
else
|
||||
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
|
||||
|
||||
dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
|
||||
__func__, cgx_id, lmac_id, cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -275,18 +275,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
|
||||
return pci_num_vf(dev->pdev) ? true : false;
|
||||
}
|
||||
|
||||
static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* LACP owner conditions:
|
||||
* 1) Function is physical.
|
||||
* 2) LAG is supported by FW.
|
||||
* 3) LAG is managed by driver (currently the only option).
|
||||
*/
|
||||
return MLX5_CAP_GEN(dev, vport_group_manager) &&
|
||||
(MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
|
||||
MLX5_CAP_GEN(dev, lag_master);
|
||||
}
|
||||
|
||||
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
|
||||
static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
|
||||
{
|
||||
|
@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
|
||||
efx->tx_channel_offset = 0;
|
||||
efx->n_xdp_channels = 0;
|
||||
efx->xdp_channel_offset = efx->n_channels;
|
||||
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
|
||||
rc = pci_enable_msi(efx->pci_dev);
|
||||
if (rc == 0) {
|
||||
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
|
||||
@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
|
||||
efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
|
||||
efx->n_xdp_channels = 0;
|
||||
efx->xdp_channel_offset = efx->n_channels;
|
||||
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
|
||||
efx->legacy_irq = efx->pci_dev->irq;
|
||||
}
|
||||
|
||||
|
@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
|
||||
efx->tx_channel_offset = 0;
|
||||
efx->n_xdp_channels = 0;
|
||||
efx->xdp_channel_offset = efx->n_channels;
|
||||
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
|
||||
rc = pci_enable_msi(efx->pci_dev);
|
||||
if (rc == 0) {
|
||||
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
|
||||
@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
|
||||
efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
|
||||
efx->n_xdp_channels = 0;
|
||||
efx->xdp_channel_offset = efx->n_channels;
|
||||
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
|
||||
efx->legacy_irq = efx->pci_dev->irq;
|
||||
}
|
||||
|
||||
|
@ -3865,7 +3865,6 @@ static int __stmmac_open(struct net_device *dev,
|
||||
|
||||
stmmac_hw_teardown(dev);
|
||||
init_error:
|
||||
free_dma_desc_resources(priv, &priv->dma_conf);
|
||||
phylink_disconnect_phy(priv->phylink);
|
||||
init_phy_error:
|
||||
pm_runtime_put(priv->device);
|
||||
@ -3883,6 +3882,9 @@ static int stmmac_open(struct net_device *dev)
|
||||
return PTR_ERR(dma_conf);
|
||||
|
||||
ret = __stmmac_open(dev, dma_conf);
|
||||
if (ret)
|
||||
free_dma_desc_resources(priv, dma_conf);
|
||||
|
||||
kfree(dma_conf);
|
||||
return ret;
|
||||
}
|
||||
@ -5607,12 +5609,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
|
||||
stmmac_release(dev);
|
||||
|
||||
ret = __stmmac_open(dev, dma_conf);
|
||||
kfree(dma_conf);
|
||||
if (ret) {
|
||||
free_dma_desc_resources(priv, dma_conf);
|
||||
kfree(dma_conf);
|
||||
netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
kfree(dma_conf);
|
||||
|
||||
stmmac_set_rx_mode(dev);
|
||||
}
|
||||
|
||||
|
@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
|
||||
|
||||
skb->dev = addr->master->dev;
|
||||
skb->skb_iif = skb->dev->ifindex;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (addr->atype == IPVL_IPV6)
|
||||
IP6CB(skb)->iif = skb->dev->ifindex;
|
||||
#endif
|
||||
len = skb->len + ETH_HLEN;
|
||||
ipvlan_count_rx(addr->master, len, true, false);
|
||||
out:
|
||||
|
@ -3981,17 +3981,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
|
||||
return -ENOMEM;
|
||||
|
||||
secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
|
||||
if (!secy->tx_sc.stats) {
|
||||
free_percpu(macsec->stats);
|
||||
if (!secy->tx_sc.stats)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
|
||||
if (!secy->tx_sc.md_dst) {
|
||||
free_percpu(secy->tx_sc.stats);
|
||||
free_percpu(macsec->stats);
|
||||
if (!secy->tx_sc.md_dst)
|
||||
/* macsec and secy percpu stats will be freed when unregistering
|
||||
* net_device in macsec_free_netdev()
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (sci == MACSEC_UNDEF_SCI)
|
||||
sci = dev_to_sci(dev, MACSEC_PORT_ES);
|
||||
|
@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_QSGMII:
|
||||
case PHY_INTERFACE_MODE_QUSGMII:
|
||||
case PHY_INTERFACE_MODE_SGMII:
|
||||
case PHY_INTERFACE_MODE_GMII:
|
||||
return SPEED_1000;
|
||||
@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface)
|
||||
case PHY_INTERFACE_MODE_10GBASER:
|
||||
case PHY_INTERFACE_MODE_10GKR:
|
||||
case PHY_INTERFACE_MODE_USXGMII:
|
||||
case PHY_INTERFACE_MODE_QUSGMII:
|
||||
return SPEED_10000;
|
||||
|
||||
case PHY_INTERFACE_MODE_25GBASER:
|
||||
@ -3263,6 +3263,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
|
||||
|
||||
/**
|
||||
* phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS
|
||||
* @state: a pointer to a struct phylink_link_state.
|
||||
* @lpa: a 16 bit value which stores the USGMII auto-negotiation word
|
||||
*
|
||||
* Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation
|
||||
* code word. Decode the USGMII code word and populate the corresponding fields
|
||||
* (speed, duplex) into the phylink_link_state structure. The structure for this
|
||||
* word is the same as the USXGMII word, except it only supports speeds up to
|
||||
* 1Gbps.
|
||||
*/
|
||||
static void phylink_decode_usgmii_word(struct phylink_link_state *state,
|
||||
uint16_t lpa)
|
||||
{
|
||||
switch (lpa & MDIO_USXGMII_SPD_MASK) {
|
||||
case MDIO_USXGMII_10:
|
||||
state->speed = SPEED_10;
|
||||
break;
|
||||
case MDIO_USXGMII_100:
|
||||
state->speed = SPEED_100;
|
||||
break;
|
||||
case MDIO_USXGMII_1000:
|
||||
state->speed = SPEED_1000;
|
||||
break;
|
||||
default:
|
||||
state->link = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (lpa & MDIO_USXGMII_FULL_DUPLEX)
|
||||
state->duplex = DUPLEX_FULL;
|
||||
else
|
||||
state->duplex = DUPLEX_HALF;
|
||||
}
|
||||
|
||||
/**
|
||||
* phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
|
||||
* @state: a pointer to a &struct phylink_link_state.
|
||||
@ -3299,9 +3334,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
|
||||
|
||||
case PHY_INTERFACE_MODE_SGMII:
|
||||
case PHY_INTERFACE_MODE_QSGMII:
|
||||
case PHY_INTERFACE_MODE_QUSGMII:
|
||||
phylink_decode_sgmii_word(state, lpa);
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_QUSGMII:
|
||||
phylink_decode_usgmii_word(state, lpa);
|
||||
break;
|
||||
|
||||
default:
|
||||
state->link = false;
|
||||
|
@ -1219,7 +1219,9 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
|
||||
{QMI_QUIRK_SET_DTR(0x05c6, 0x9091, 2)}, /* Compal RXM-G1 */
|
||||
{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
|
||||
{QMI_QUIRK_SET_DTR(0x05c6, 0x90db, 2)}, /* Compal RXM-G1 */
|
||||
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
|
||||
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
|
||||
|
@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev)
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (dev->type != ARPHRD_ETHER)
|
||||
return -EINVAL;
|
||||
|
||||
ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
|
||||
lapbeth_setup);
|
||||
if (!ndev)
|
||||
|
@ -3538,6 +3538,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
|
||||
|
@ -811,6 +811,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs)
|
||||
if (!fragment->target) {
|
||||
pr_err("symbols in overlay, but not in live tree\n");
|
||||
ret = -EINVAL;
|
||||
of_node_put(node);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
|
@ -5995,8 +5995,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency
|
||||
|
||||
#ifdef CONFIG_PCIE_DPC
|
||||
/*
|
||||
* Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
|
||||
* RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
|
||||
* Intel Ice Lake, Tiger Lake and Alder Lake BIOS has a bug that clears
|
||||
* the DPC RP PIO Log Size of the integrated Thunderbolt PCIe Root
|
||||
* Ports.
|
||||
*/
|
||||
static void dpc_log_size(struct pci_dev *dev)
|
||||
{
|
||||
@ -6019,6 +6020,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1d, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1f, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a21, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a23, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
|
||||
|
@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
|
||||
{ KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
|
||||
{ KE_IGNORE, 0x79, }, /* Charger type dectection notification */
|
||||
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
|
||||
{ KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
|
||||
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
|
||||
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
|
||||
{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
|
||||
@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
|
||||
{ KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
|
||||
{ KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
|
||||
{ KE_KEY, 0xB5, { KEY_CALC } },
|
||||
{ KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
|
||||
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
|
||||
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
|
||||
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
|
||||
|
@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
|
||||
*/
|
||||
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
|
||||
{
|
||||
struct ab8500_btemp *di = power_supply_get_drvdata(psy);
|
||||
|
||||
class_for_each_device(power_supply_class, NULL,
|
||||
di->btemp_psy, ab8500_btemp_get_ext_psy_data);
|
||||
class_for_each_device(power_supply_class, NULL, psy,
|
||||
ab8500_btemp_get_ext_psy_data);
|
||||
}
|
||||
|
||||
/* ab8500 btemp driver interrupts and their respective isr */
|
||||
|
@ -2407,10 +2407,8 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
|
||||
*/
|
||||
static void ab8500_fg_external_power_changed(struct power_supply *psy)
|
||||
{
|
||||
struct ab8500_fg *di = power_supply_get_drvdata(psy);
|
||||
|
||||
class_for_each_device(power_supply_class, NULL,
|
||||
di->fg_psy, ab8500_fg_get_ext_psy_data);
|
||||
class_for_each_device(power_supply_class, NULL, psy,
|
||||
ab8500_fg_get_ext_psy_data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
|
||||
return ret;
|
||||
|
||||
mutex_lock(&bq27xxx_list_lock);
|
||||
list_for_each_entry(di, &bq27xxx_battery_devices, list) {
|
||||
cancel_delayed_work_sync(&di->work);
|
||||
schedule_delayed_work(&di->work, 0);
|
||||
}
|
||||
list_for_each_entry(di, &bq27xxx_battery_devices, list)
|
||||
mod_delayed_work(system_wq, &di->work, 0);
|
||||
mutex_unlock(&bq27xxx_list_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -355,6 +355,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
|
||||
struct power_supply *psy = dev_get_drvdata(dev);
|
||||
unsigned int *count = data;
|
||||
|
||||
if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
|
||||
if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
|
||||
return 0;
|
||||
|
||||
(*count)++;
|
||||
if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY)
|
||||
if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE,
|
||||
@ -373,8 +377,8 @@ int power_supply_is_system_supplied(void)
|
||||
__power_supply_is_system_supplied);
|
||||
|
||||
/*
|
||||
* If no power class device was found at all, most probably we are
|
||||
* running on a desktop system, so assume we are on mains power.
|
||||
* If no system scope power class device was found at all, most probably we
|
||||
* are running on a desktop system, so assume we are on mains power.
|
||||
*/
|
||||
if (count == 0)
|
||||
return 1;
|
||||
|
@ -286,7 +286,8 @@ static ssize_t power_supply_show_property(struct device *dev,
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -ENODATA)
|
||||
dev_dbg(dev, "driver has no data for `%s' property\n",
|
||||
dev_dbg_ratelimited(dev,
|
||||
"driver has no data for `%s' property\n",
|
||||
attr->attr.name);
|
||||
else if (ret != -ENODEV && ret != -EAGAIN)
|
||||
dev_err_ratelimited(dev,
|
||||
|
@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
|
||||
{
|
||||
struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
|
||||
|
||||
power_supply_changed(data->battery);
|
||||
}
|
||||
|
||||
static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
|
||||
enum power_supply_property psp)
|
||||
{
|
||||
@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
|
||||
.num_properties = ARRAY_SIZE(sc27xx_fgu_props),
|
||||
.get_property = sc27xx_fgu_get_property,
|
||||
.set_property = sc27xx_fgu_set_property,
|
||||
.external_power_changed = sc27xx_fgu_external_power_changed,
|
||||
.external_power_changed = power_supply_changed,
|
||||
.property_is_writeable = sc27xx_fgu_property_is_writeable,
|
||||
.no_thermal = true,
|
||||
};
|
||||
|
@ -5257,7 +5257,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
|
||||
}
|
||||
|
||||
rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
|
||||
if (!rdev->debugfs) {
|
||||
if (IS_ERR(rdev->debugfs)) {
|
||||
rdev_warn(rdev, "Failed to create debugfs directory\n");
|
||||
return;
|
||||
}
|
||||
@ -6179,7 +6179,7 @@ static int __init regulator_init(void)
|
||||
ret = class_register(®ulator_class);
|
||||
|
||||
debugfs_root = debugfs_create_dir("regulator", NULL);
|
||||
if (!debugfs_root)
|
||||
if (IS_ERR(debugfs_root))
|
||||
pr_warn("regulator: Failed to create debugfs directory\n");
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -50,8 +50,6 @@
|
||||
#define LLCC_TRP_WRSC_EN 0x21f20
|
||||
#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c
|
||||
|
||||
#define BANK_OFFSET_STRIDE 0x80000
|
||||
|
||||
#define LLCC_VERSION_2_0_0_0 0x02000000
|
||||
#define LLCC_VERSION_2_1_0_0 0x02010000
|
||||
|
||||
@ -749,8 +747,8 @@ static int qcom_llcc_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
|
||||
const char *name)
|
||||
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
|
||||
const char *name)
|
||||
{
|
||||
void __iomem *base;
|
||||
struct regmap_config llcc_regmap_config = {
|
||||
@ -760,7 +758,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
|
||||
.fast_io = true,
|
||||
};
|
||||
|
||||
base = devm_platform_ioremap_resource_byname(pdev, name);
|
||||
base = devm_platform_ioremap_resource(pdev, index);
|
||||
if (IS_ERR(base))
|
||||
return ERR_CAST(base);
|
||||
|
||||
@ -778,6 +776,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
|
||||
const struct llcc_slice_config *llcc_cfg;
|
||||
u32 sz;
|
||||
u32 version;
|
||||
struct regmap *regmap;
|
||||
|
||||
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
|
||||
if (!drv_data) {
|
||||
@ -785,21 +784,51 @@ static int qcom_llcc_probe(struct platform_device *pdev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
|
||||
if (IS_ERR(drv_data->regmap)) {
|
||||
ret = PTR_ERR(drv_data->regmap);
|
||||
goto err;
|
||||
}
|
||||
|
||||
drv_data->bcast_regmap =
|
||||
qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
|
||||
if (IS_ERR(drv_data->bcast_regmap)) {
|
||||
ret = PTR_ERR(drv_data->bcast_regmap);
|
||||
/* Initialize the first LLCC bank regmap */
|
||||
regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base");
|
||||
if (IS_ERR(regmap)) {
|
||||
ret = PTR_ERR(regmap);
|
||||
goto err;
|
||||
}
|
||||
|
||||
cfg = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
num_banks &= LLCC_LB_CNT_MASK;
|
||||
num_banks >>= LLCC_LB_CNT_SHIFT;
|
||||
drv_data->num_banks = num_banks;
|
||||
|
||||
drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
|
||||
if (!drv_data->regmaps) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
drv_data->regmaps[0] = regmap;
|
||||
|
||||
/* Initialize rest of LLCC bank regmaps */
|
||||
for (i = 1; i < num_banks; i++) {
|
||||
char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
|
||||
|
||||
drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
|
||||
if (IS_ERR(drv_data->regmaps[i])) {
|
||||
ret = PTR_ERR(drv_data->regmaps[i]);
|
||||
kfree(base);
|
||||
goto err;
|
||||
}
|
||||
|
||||
kfree(base);
|
||||
}
|
||||
|
||||
drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
|
||||
if (IS_ERR(drv_data->bcast_regmap)) {
|
||||
ret = PTR_ERR(drv_data->bcast_regmap);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Extract version of the IP */
|
||||
ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
|
||||
&version);
|
||||
@ -808,15 +837,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
|
||||
|
||||
drv_data->version = version;
|
||||
|
||||
ret = regmap_read(drv_data->regmap, cfg->reg_offset[LLCC_COMMON_STATUS0],
|
||||
&num_banks);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
num_banks &= LLCC_LB_CNT_MASK;
|
||||
num_banks >>= LLCC_LB_CNT_SHIFT;
|
||||
drv_data->num_banks = num_banks;
|
||||
|
||||
llcc_cfg = cfg->sct_data;
|
||||
sz = cfg->size;
|
||||
|
||||
@ -824,16 +844,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
|
||||
if (llcc_cfg[i].slice_id > drv_data->max_slices)
|
||||
drv_data->max_slices = llcc_cfg[i].slice_id;
|
||||
|
||||
drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
if (!drv_data->offsets) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_banks; i++)
|
||||
drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
|
||||
|
||||
drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
|
||||
GFP_KERNEL);
|
||||
if (!drv_data->bitmap) {
|
||||
|
@ -1697,8 +1697,11 @@ static int cqspi_probe(struct platform_device *pdev)
|
||||
cqspi->slow_sram = true;
|
||||
|
||||
if (of_device_is_compatible(pdev->dev.of_node,
|
||||
"xlnx,versal-ospi-1.0"))
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
"xlnx,versal-ospi-1.0")) {
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (ret)
|
||||
goto probe_reset_failed;
|
||||
}
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
|
||||
|
@ -975,7 +975,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
|
||||
static int dspi_setup(struct spi_device *spi)
|
||||
{
|
||||
struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
|
||||
u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
|
||||
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
|
||||
u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
|
||||
u32 cs_sck_delay = 0, sck_cs_delay = 0;
|
||||
struct fsl_dspi_platform_data *pdata;
|
||||
unsigned char pasc = 0, asc = 0;
|
||||
@ -1003,6 +1005,19 @@ static int dspi_setup(struct spi_device *spi)
|
||||
sck_cs_delay = pdata->sck_cs_delay;
|
||||
}
|
||||
|
||||
/* Since tCSC and tASC apply to continuous transfers too, avoid SCK
|
||||
* glitches of half a cycle by never allowing tCSC + tASC to go below
|
||||
* half a SCK period.
|
||||
*/
|
||||
if (cs_sck_delay < quarter_period_ns)
|
||||
cs_sck_delay = quarter_period_ns;
|
||||
if (sck_cs_delay < quarter_period_ns)
|
||||
sck_cs_delay = quarter_period_ns;
|
||||
|
||||
dev_dbg(&spi->dev,
|
||||
"DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
|
||||
cs_sck_delay, sck_cs_delay);
|
||||
|
||||
clkrate = clk_get_rate(dspi->clk);
|
||||
hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
|
||||
|
||||
|
@ -504,6 +504,8 @@ target_setup_session(struct se_portal_group *tpg,
|
||||
|
||||
free_sess:
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(rc);
|
||||
|
||||
free_cnt:
|
||||
target_free_cmd_counter(cmd_cnt);
|
||||
return ERR_PTR(rc);
|
||||
|
@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt)
|
||||
}
|
||||
|
||||
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
|
||||
dt->tx_ring ? dt->tx_ring->hop : 0,
|
||||
dt->tx_ring ? dt->tx_ring->hop : -1,
|
||||
dt->rx_hopid,
|
||||
dt->rx_ring ? dt->rx_ring->hop : 0);
|
||||
dt->rx_ring ? dt->rx_ring->hop : -1);
|
||||
if (ret) {
|
||||
dma_test_free_rings(dt);
|
||||
return ret;
|
||||
@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt)
|
||||
tb_ring_stop(dt->tx_ring);
|
||||
|
||||
ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
|
||||
dt->tx_ring ? dt->tx_ring->hop : 0,
|
||||
dt->tx_ring ? dt->tx_ring->hop : -1,
|
||||
dt->rx_hopid,
|
||||
dt->rx_ring ? dt->rx_ring->hop : 0);
|
||||
dt->rx_ring ? dt->rx_ring->hop : -1);
|
||||
if (ret)
|
||||
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
|
||||
|
||||
|
@ -56,9 +56,14 @@ static int ring_interrupt_index(const struct tb_ring *ring)
|
||||
|
||||
static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
|
||||
{
|
||||
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
|
||||
return;
|
||||
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
|
||||
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
|
||||
u32 val;
|
||||
|
||||
val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
|
||||
iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
|
||||
} else {
|
||||
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
|
||||
}
|
||||
}
|
||||
|
||||
static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
|
||||
|
@ -607,6 +607,7 @@ static void tb_scan_port(struct tb_port *port)
|
||||
{
|
||||
struct tb_cm *tcm = tb_priv(port->sw->tb);
|
||||
struct tb_port *upstream_port;
|
||||
bool discovery = false;
|
||||
struct tb_switch *sw;
|
||||
int ret;
|
||||
|
||||
@ -674,8 +675,10 @@ static void tb_scan_port(struct tb_port *port)
|
||||
* tunnels and know which switches were authorized already by
|
||||
* the boot firmware.
|
||||
*/
|
||||
if (!tcm->hotplug_active)
|
||||
if (!tcm->hotplug_active) {
|
||||
dev_set_uevent_suppress(&sw->dev, true);
|
||||
discovery = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* At the moment Thunderbolt 2 and beyond (devices with LC) we
|
||||
@ -705,10 +708,14 @@ static void tb_scan_port(struct tb_port *port)
|
||||
* CL0s and CL1 are enabled and supported together.
|
||||
* Silently ignore CLx enabling in case CLx is not supported.
|
||||
*/
|
||||
ret = tb_switch_enable_clx(sw, TB_CL1);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
tb_sw_warn(sw, "failed to enable %s on upstream port\n",
|
||||
tb_switch_clx_name(TB_CL1));
|
||||
if (discovery) {
|
||||
tb_sw_dbg(sw, "discovery, not touching CL states\n");
|
||||
} else {
|
||||
ret = tb_switch_enable_clx(sw, TB_CL1);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
tb_sw_warn(sw, "failed to enable %s on upstream port\n",
|
||||
tb_switch_clx_name(TB_CL1));
|
||||
}
|
||||
|
||||
if (tb_switch_is_clx_enabled(sw, TB_CL1))
|
||||
/*
|
||||
|
@ -278,6 +278,7 @@ lqasc_err_int(int irq, void *_port)
|
||||
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
|
||||
|
||||
spin_lock_irqsave(<q_port->lock, flags);
|
||||
__raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR);
|
||||
/* clear any pending interrupts */
|
||||
asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
|
||||
ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
|
||||
|
@ -1982,6 +1982,11 @@ static int dwc3_remove(struct platform_device *pdev)
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
/*
|
||||
* HACK: Clear the driver data, which is currently accessed by parent
|
||||
* glue drivers, before allowing the parent to suspend.
|
||||
*/
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
|
||||
dwc3_free_event_buffers(dwc);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user