This is the 6.1.54 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmUJd8EACgkQONu9yGCS
 aT7crQ//ZsUDeoTMsQBU6lB2g32LODO3jVPXdGdRjLvpLVMMnKXXwl3uTC20CQ23
 mtlN1mku6OtyPHgorKK9nJoNVTG78v0wXL8iCe5GHEKri45FwmcKlCxtIqboGCcg
 bpRkLqfZ/cNVFeV/81n7kMFI/GHST2qym/lJfUkK0BIewXOrJozHMyCriLhG5uc/
 XPmXN3LlGmT7Gb2KwJeAgJ9IWrVu5ZEWH6CnpjnLPXMA3FGJiBiYPeGaWRsrdjth
 MvACPXKPu5tKAmEs6eyAhB1YbXbswKviDuY+YHeTMoOVYCfJY29VQTI16F6HBGeM
 XVCo1AovZV+B9OrgnzYA8x5iZIKCdk/PzUhBi+uUb3nLJhGpD8ha7wOuBjehINeo
 22YY+7fmB7lZVSAe14hDH7GjKNdYpxntPVpWCMa1yoCUtqKB1O44/10mj0OjZ5j4
 EXKXIe6ho+0Uatubd+3hWRXimz4jzlp7UY1QM9ge5MGp0wOmdLu5Q91T70CrCEJO
 RxXZSkHDKGxokXubl4oF0bYYpB1kRVgsNEc4H5i2k+OheyDBmVv3vRPMzT/2yim/
 BEqwX6x2sE7kvbsyCO5VxIIVsnAystJEKzdVlRxmrcqkV0FCdqHjwZ9cr0mpqOse
 ogdnQgXQpaGUyhdYcpo4U9f+WGi5AHXs3IMbKQN4SDZGDgJHrss=
 =XhWe
 -----END PGP SIGNATURE-----

Merge 6.1.54 into android14-6.1-lts

Changes in 6.1.54
	net/ipv6: SKB symmetric hash should incorporate transport ports
	mm: multi-gen LRU: rename lrugen->lists[] to lrugen->folios[]
	Multi-gen LRU: fix per-zone reclaim
	io_uring: always lock in io_apoll_task_func
	io_uring: revert "io_uring fix multishot accept ordering"
	io_uring/net: don't overflow multishot accept
	io_uring: break out of iowq iopoll on teardown
	io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used
	io_uring: Don't set affinity on a dying sqpoll thread
	drm/virtio: Conditionally allocate virtio_gpu_fence
	scsi: qla2xxx: Adjust IOCB resource on qpair create
	scsi: qla2xxx: Limit TMF to 8 per function
	scsi: qla2xxx: Fix deletion race condition
	scsi: qla2xxx: fix inconsistent TMF timeout
	scsi: qla2xxx: Fix command flush during TMF
	scsi: qla2xxx: Fix erroneous link up failure
	scsi: qla2xxx: Turn off noisy message log
	scsi: qla2xxx: Fix session hang in gnl
	scsi: qla2xxx: Fix TMF leak through
	scsi: qla2xxx: Remove unsupported ql2xenabledif option
	scsi: qla2xxx: Flush mailbox commands on chip reset
	scsi: qla2xxx: Fix smatch warn for qla_init_iocb_limit()
	scsi: qla2xxx: Error code did not return to upper layer
	scsi: qla2xxx: Fix firmware resource tracking
	null_blk: fix poll request timeout handling
	fbdev/ep93xx-fb: Do not assign to struct fb_info.dev
	clk: qcom: camcc-sc7180: fix async resume during probe
	drm/ast: Fix DRAM init on AST2200
	ASoC: tegra: Fix SFC conversion for few rates
	clk: qcom: turingcc-qcs404: fix missing resume during probe
	arm64: dts: renesas: rzg2l: Fix txdv-skew-psec typos
	send channel sequence number in SMB3 requests after reconnects
	memcg: drop kmem.limit_in_bytes
	mm: hugetlb_vmemmap: fix a race between vmemmap pmd split
	lib/test_meminit: allocate pages up to order MAX_ORDER
	parisc: led: Fix LAN receive and transmit LEDs
	parisc: led: Reduce CPU overhead for disk & lan LED computation
	cifs: update desired access while requesting for directory lease
	pinctrl: cherryview: fix address_space_handler() argument
	dt-bindings: clock: xlnx,versal-clk: drop select:false
	clk: imx: pll14xx: dynamically configure PLL for 393216000/361267200Hz
	clk: imx: pll14xx: align pdiv with reference manual
	clk: qcom: gcc-mdm9615: use proper parent for pll0_vote clock
	soc: qcom: qmi_encdec: Restrict string length in decode
	clk: qcom: dispcc-sm8450: fix runtime PM imbalance on probe errors
	clk: qcom: lpasscc-sc7280: fix missing resume during probe
	clk: qcom: q6sstop-qcs404: fix missing resume during probe
	clk: qcom: mss-sc7180: fix missing resume during probe
	NFS: Fix a potential data corruption
	NFSv4/pnfs: minor fix for cleanup path in nfs4_get_device_info
	bus: mhi: host: Skip MHI reset if device is in RDDM
	net: add SKB_HEAD_ALIGN() helper
	net: remove osize variable in __alloc_skb()
	net: factorize code in kmalloc_reserve()
	net: deal with integer overflows in kmalloc_reserve()
	kbuild: rpm-pkg: define _arch conditionally
	kbuild: do not run depmod for 'make modules_sign'
	tpm_crb: Fix an error handling path in crb_acpi_add()
	gfs2: Switch to wait_event in gfs2_logd
	gfs2: low-memory forced flush fixes
	mailbox: qcom-ipcc: fix incorrect num_chans counting
	kconfig: fix possible buffer overflow
	Input: iqs7222 - configure power mode before triggering ATI
	perf trace: Use zfree() to reduce chances of use after free
	perf trace: Really free the evsel->priv area
	pwm: atmel-tcb: Convert to platform remove callback returning void
	pwm: atmel-tcb: Harmonize resource allocation order
	pwm: atmel-tcb: Fix resource freeing in error path and remove
	backlight: gpio_backlight: Drop output GPIO direction check for initial power state
	Input: tca6416-keypad - always expect proper IRQ number in i2c client
	Input: tca6416-keypad - fix interrupt enable disbalance
	perf annotate bpf: Don't enclose non-debug code with an assert()
	x86/virt: Drop unnecessary check on extended CPUID level in cpu_has_svm()
	perf vendor events: Update the JSON/events descriptions for power10 platform
	perf vendor events: Drop some of the JSON/events for power10 platform
	perf vendor events: Drop STORES_PER_INST metric event for power10 platform
	perf top: Don't pass an ERR_PTR() directly to perf_session__delete()
	watchdog: intel-mid_wdt: add MODULE_ALIAS() to allow auto-load
	pwm: lpc32xx: Remove handling of PWM channels
	perf test stat_bpf_counters_cgrp: Fix shellcheck issue about logical operators
	perf test stat_bpf_counters_cgrp: Enhance perf stat cgroup BPF counter test
	drm/i915: mark requests for GuC virtual engines to avoid use-after-free
	blk-throttle: use calculate_io/bytes_allowed() for throtl_trim_slice()
	blk-throttle: consider 'carryover_ios/bytes' in throtl_trim_slice()
	cifs: use fs_context for automounts
	smb: propagate error code of extract_sharename()
	net/sched: fq_pie: avoid stalls in fq_pie_timer()
	sctp: annotate data-races around sk->sk_wmem_queued
	ipv4: annotate data-races around fi->fib_dead
	net: read sk->sk_family once in sk_mc_loop()
	net: fib: avoid warn splat in flow dissector
	xsk: Fix xsk_diag use-after-free error during socket cleanup
	drm/i915/gvt: Verify pfn is "valid" before dereferencing "struct page"
	drm/i915/gvt: Put the page reference obtained by KVM's gfn_to_pfn()
	drm/i915/gvt: Drop unused helper intel_vgpu_reset_gtt()
	net: use sk_forward_alloc_get() in sk_get_meminfo()
	net: annotate data-races around sk->sk_forward_alloc
	mptcp: annotate data-races around msk->rmem_fwd_alloc
	ipv4: ignore dst hint for multipath routes
	ipv6: ignore dst hint for multipath routes
	igb: disable virtualization features on 82580
	gve: fix frag_list chaining
	veth: Fixing transmit return status for dropped packets
	net: ipv6/addrconf: avoid integer underflow in ipv6_create_tempaddr
	net: phy: micrel: Correct bit assignments for phy_device flags
	bpf, sockmap: Fix skb refcnt race after locking changes
	af_unix: Fix data-races around user->unix_inflight.
	af_unix: Fix data-race around unix_tot_inflight.
	af_unix: Fix data-races around sk->sk_shutdown.
	af_unix: Fix data race around sk->sk_err.
	net: sched: sch_qfq: Fix UAF in qfq_dequeue()
	kcm: Destroy mutex in kcm_exit_net()
	octeontx2-af: Fix truncation of smq in CN10K NIX AQ enqueue mbox handler
	igc: Change IGC_MIN to allow set rx/tx value between 64 and 80
	igbvf: Change IGBVF_MIN to allow set rx/tx value between 64 and 80
	igb: Change IGB_MIN to allow set rx/tx value between 64 and 80
	s390/zcrypt: don't leak memory if dev_set_name() fails
	idr: fix param name in idr_alloc_cyclic() doc
	ip_tunnels: use DEV_STATS_INC()
	net: dsa: sja1105: fix bandwidth discrepancy between tc-cbs software and offload
	net: dsa: sja1105: fix -ENOSPC when replacing the same tc-cbs too many times
	net: dsa: sja1105: complete tc-cbs offload support on SJA1110
	bpf: Remove prog->active check for bpf_lsm and bpf_iter
	bpf: Invoke __bpf_prog_exit_sleepable_recur() on recursion in kern_sys_bpf().
	bpf: Assign bpf_tramp_run_ctx::saved_run_ctx before recursion check.
	netfilter: nftables: exthdr: fix 4-byte stack OOB write
	netfilter: nfnetlink_osf: avoid OOB read
	net: hns3: fix tx timeout issue
	net: hns3: fix byte order conversion issue in hclge_dbg_fd_tcam_read()
	net: hns3: fix debugfs concurrency issue between kfree buffer and read
	net: hns3: fix invalid mutex between tc qdisc and dcb ets command issue
	net: hns3: fix the port information display when sfp is absent
	net: hns3: remove GSO partial feature bit
	sh: boards: Fix CEU buffer size passed to dma_declare_coherent_memory()
	Multi-gen LRU: avoid race in inc_min_seq()
	net/mlx5: Free IRQ rmap and notifier on kernel shutdown
	ARC: atomics: Add compiler barrier to atomic operations...
	clocksource/drivers/arm_arch_timer: Disable timer before programming CVAL
	dmaengine: sh: rz-dmac: Fix destination and source data size setting
	jbd2: fix checkpoint cleanup performance regression
	jbd2: check 'jh->b_transaction' before removing it from checkpoint
	jbd2: correct the end of the journal recovery scan range
	ext4: add correct group descriptors and reserved GDT blocks to system zone
	ext4: fix memory leaks in ext4_fname_{setup_filename,prepare_lookup}
	f2fs: flush inode if atomic file is aborted
	f2fs: avoid false alarm of circular locking
	lib: test_scanf: Add explicit type cast to result initialization in test_number_prefix()
	hwspinlock: qcom: add missing regmap config for SFPB MMIO implementation
	ata: ahci: Add Elkhart Lake AHCI controller
	ata: pata_falcon: fix IO base selection for Q40
	ata: sata_gemini: Add missing MODULE_DESCRIPTION
	ata: pata_ftide010: Add missing MODULE_DESCRIPTION
	fuse: nlookup missing decrement in fuse_direntplus_link
	btrfs: zoned: do not zone finish data relocation block group
	btrfs: fix start transaction qgroup rsv double free
	btrfs: free qgroup rsv on io failure
	btrfs: don't start transaction when joining with TRANS_JOIN_NOSTART
	btrfs: set page extent mapped after read_folio in relocate_one_page
	btrfs: zoned: re-enable metadata over-commit for zoned mode
	btrfs: use the correct superblock to compare fsid in btrfs_validate_super
	drm/mxsfb: Disable overlay plane in mxsfb_plane_overlay_atomic_disable()
	mtd: rawnand: brcmnand: Fix crash during the panic_write
	mtd: rawnand: brcmnand: Fix potential out-of-bounds access in oob write
	mtd: spi-nor: Correct flags for Winbond w25q128
	mtd: rawnand: brcmnand: Fix potential false time out warning
	mtd: rawnand: brcmnand: Fix ECC level field setting for v7.2 controller
	drm/amd/display: enable cursor degamma for DCN3+ DRM legacy gamma
	drm/amd/display: prevent potential division by zero errors
	KVM: SVM: Take and hold ir_list_lock when updating vCPU's Physical ID entry
	KVM: SVM: Don't inject #UD if KVM attempts to skip SEV guest insn
	KVM: SVM: Get source vCPUs from source VM for SEV-ES intrahost migration
	KVM: nSVM: Check instead of asserting on nested TSC scaling support
	KVM: nSVM: Load L1's TSC multiplier based on L1 state, not L2 state
	KVM: SVM: Set target pCPU during IRTE update if target vCPU is running
	KVM: SVM: Skip VMSA init in sev_es_init_vmcb() if pointer is NULL
	MIPS: Fix CONFIG_CPU_DADDI_WORKAROUNDS `modules_install' regression
	perf hists browser: Fix hierarchy mode header
	perf test shell stat_bpf_counters: Fix test on Intel
	perf tools: Handle old data in PERF_RECORD_ATTR
	perf hists browser: Fix the number of entries for 'e' key
	drm/amd/display: always switch off ODM before committing more streams
	drm/amd/display: Remove wait while locked
	drm/amdgpu: register a dirty framebuffer callback for fbcon
	kunit: Fix wild-memory-access bug in kunit_free_suite_set()
	net: ipv4: fix one memleak in __inet_del_ifa()
	kselftest/runner.sh: Propagate SIGTERM to runner child
	selftests: Keep symlinks, when possible
	net/smc: use smc_lgr_list.lock to protect smc_lgr_list.list iterate in smcr_port_add
	net: stmmac: fix handling of zero coalescing tx-usecs
	net: ethernet: mvpp2_main: fix possible OOB write in mvpp2_ethtool_get_rxnfc()
	net: ethernet: mtk_eth_soc: fix possible NULL pointer dereference in mtk_hwlro_get_fdir_all()
	hsr: Fix uninit-value access in fill_frame_info()
	net: ethernet: adi: adin1110: use eth_broadcast_addr() to assign broadcast address
	net:ethernet:adi:adin1110: Fix forwarding offload
	net: dsa: sja1105: hide all multicast addresses from "bridge fdb show"
	net: dsa: sja1105: propagate exact error code from sja1105_dynamic_config_poll_valid()
	net: dsa: sja1105: fix multicast forwarding working only for last added mdb entry
	net: dsa: sja1105: serialize sja1105_port_mcast_flood() with other FDB accesses
	net: dsa: sja1105: block FDB accesses that are concurrent with a switch reset
	r8152: check budget for r8152_poll()
	kcm: Fix memory leak in error path of kcm_sendmsg()
	platform/mellanox: mlxbf-tmfifo: Drop the Rx packet if no more descriptors
	platform/mellanox: mlxbf-tmfifo: Drop jumbo frames
	platform/mellanox: mlxbf-pmc: Fix potential buffer overflows
	platform/mellanox: mlxbf-pmc: Fix reading of unprogrammed events
	platform/mellanox: NVSW_SN2201 should depend on ACPI
	net/tls: do not free tls_rec on async operation in bpf_exec_tx_verdict()
	net: macb: Enable PTP unicast
	net: macb: fix sleep inside spinlock
	ipv6: fix ip6_sock_set_addr_preferences() typo
	ipv6: Remove in6addr_any alternatives.
	tcp: Factorise sk_family-independent comparison in inet_bind2_bucket_match(_addr_any).
	tcp: Fix bind() regression for v4-mapped-v6 wildcard address.
	tcp: Fix bind() regression for v4-mapped-v6 non-wildcard address.
	ixgbe: fix timestamp configuration code
	kcm: Fix error handling for SOCK_DGRAM in kcm_sendmsg().
	MIPS: Only fiddle with CHECKFLAGS if `need-compiler'
	drm/amd/display: Fix a bug when searching for insert_above_mpcc
	Linux 6.1.54

Change-Id: I42dc80e7b812eb2bdd28575280b7b88169eb6d58
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-10-17 12:03:09 +00:00
commit 4f94769349
228 changed files with 2102 additions and 1319 deletions

View File

@ -91,8 +91,6 @@ Brief summary of control files.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
memory.kmem.limit_in_bytes This knob is deprecated and writing to
it will return -ENOTSUPP.
memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage
hits limits

View File

@ -16,8 +16,6 @@ description: |
reads required input clock frequencies from the devicetree and acts as clock
provider for all clock consumers of PS clocks.
select: false
properties:
compatible:
const: xlnx,versal-clk

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 53
SUBLEVEL = 54
EXTRAVERSION =
NAME = Curry Ramen
@ -1982,7 +1982,9 @@ quiet_cmd_depmod = DEPMOD $(MODLIB)
modules_install:
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
ifndef modules_sign_only
$(call cmd,depmod)
endif
else # CONFIG_MODULES

View File

@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
\
return val; \
}
@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
\
return orig; \
}

View File

@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); \
: "cc", "memory"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
: "cc", "memory"); \
\
return val; \
}
@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
: "cc", "memory"); \
\
return orig; \
}

View File

@ -100,7 +100,7 @@ phy0: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
txdv-skew-psec = <0>;
txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
@ -128,7 +128,7 @@ phy1: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
txdv-skew-psec = <0>;
txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;

View File

@ -77,7 +77,7 @@ phy0: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
txdv-skew-psec = <0>;
txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;

View File

@ -80,7 +80,7 @@ phy0: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
txdv-skew-psec = <0>;
txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
@ -107,7 +107,7 @@ phy1: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
txdv-skew-psec = <0>;
txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;

View File

@ -1655,13 +1655,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
struct bpf_prog *p = l->link.prog;
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
if (p->aux->sleepable) {
enter_prog = (u64)__bpf_prog_enter_sleepable;
exit_prog = (u64)__bpf_prog_exit_sleepable;
} else {
enter_prog = (u64)__bpf_prog_enter;
exit_prog = (u64)__bpf_prog_exit;
}
enter_prog = (u64)bpf_trampoline_enter(p);
exit_prog = (u64)bpf_trampoline_exit(p);
if (l->cookie == 0) {
/* if cookie is zero, one instruction is enough to store it */

View File

@ -308,8 +308,8 @@ ifdef CONFIG_64BIT
endif
endif
ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy)
cflags-y += -msym32 -DKBUILD_64BIT_SYM32
ifeq ($(KBUILD_SYM32), y)
cflags-$(KBUILD_SYM32) += -msym32 -DKBUILD_64BIT_SYM32
else
ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y)
$(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32)
@ -350,7 +350,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
ifdef need-compiler
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')

View File

@ -11,8 +11,8 @@
#define LED1 0x02
#define LED0 0x01 /* bottom (or furthest left) LED */
#define LED_LAN_TX LED0 /* for LAN transmit activity */
#define LED_LAN_RCV LED1 /* for LAN receive activity */
#define LED_LAN_RCV LED0 /* for LAN receive activity */
#define LED_LAN_TX LED1 /* for LAN transmit activity */
#define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */

View File

@ -530,7 +530,7 @@ static int __init ap325rxa_devices_setup(void)
device_initialize(&ap325rxa_ceu_device.dev);
dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&ap325rxa_ceu_device);

View File

@ -1454,15 +1454,13 @@ static int __init arch_setup(void)
device_initialize(&ecovec_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
ceu0_dma_membase +
CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[0]);
device_initialize(&ecovec_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
ceu1_dma_membase +
CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[1]);
gpiod_add_lookup_table(&cn12_power_gpiod_table);

View File

@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void)
device_initialize(&kfr2r09_ceu_device.dev);
dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&kfr2r09_ceu_device);

View File

@ -604,7 +604,7 @@ static int __init migor_devices_setup(void)
device_initialize(&migor_ceu_device.dev);
dma_declare_coherent_memory(&migor_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&migor_ceu_device);

View File

@ -940,15 +940,13 @@ static int __init devices_setup(void)
device_initialize(&ms7724se_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
ceu0_dma_membase +
CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[0]);
device_initialize(&ms7724se_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
ceu1_dma_membase +
CEU_BUFFER_MEMORY_SIZE - 1);
CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[1]);
return platform_add_devices(ms7724se_devices,

View File

@ -101,12 +101,6 @@ static inline int cpu_has_svm(const char **msg)
return 0;
}
if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
if (msg)
*msg = "can't execute cpuid_8000000a";
return 0;
}
if (!boot_cpu_has(X86_FEATURE_SVM)) {
if (msg)
*msg = "svm not available";

View File

@ -810,6 +810,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
int ret = 0;
unsigned long flags;
struct amd_svm_iommu_ir *ir;
u64 entry;
/**
* In some cases, the existing irte is updated and re-set,
@ -843,6 +844,18 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
ir->data = pi->ir_data;
spin_lock_irqsave(&svm->ir_list_lock, flags);
/*
* Update the target pCPU for IOMMU doorbells if the vCPU is running.
* If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
* will update the pCPU info when the vCPU awkened and/or scheduled in.
* See also avic_vcpu_load().
*/
entry = READ_ONCE(*(svm->avic_physical_id_cache));
if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
true, pi->ir_data);
list_add(&ir->node, &svm->ir_list);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
out:
@ -1022,10 +1035,11 @@ static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
int ret = 0;
unsigned long flags;
struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
lockdep_assert_held(&svm->ir_list_lock);
if (!kvm_arch_has_assigned_device(vcpu->kvm))
return 0;
@ -1033,19 +1047,15 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
* Here, we go through the per-vcpu ir_list to update all existing
* interrupt remapping table entry targeting this vcpu.
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
if (list_empty(&svm->ir_list))
goto out;
return 0;
list_for_each_entry(ir, &svm->ir_list, node) {
ret = amd_iommu_update_ga(cpu, r, ir->data);
if (ret)
break;
return ret;
}
out:
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
return ret;
return 0;
}
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@ -1053,6 +1063,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 entry;
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
lockdep_assert_preemption_disabled();
@ -1069,6 +1080,15 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_vcpu_is_blocking(vcpu))
return;
/*
* Grab the per-vCPU interrupt remapping lock even if the VM doesn't
* _currently_ have assigned devices, as that can change. Holding
* ir_list_lock ensures that either svm_ir_list_add() will consume
* up-to-date entry information, or that this task will wait until
* svm_ir_list_add() completes to set the new target pCPU.
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
entry = READ_ONCE(*(svm->avic_physical_id_cache));
entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
@ -1077,25 +1097,48 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
void avic_vcpu_put(struct kvm_vcpu *vcpu)
{
u64 entry;
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
lockdep_assert_preemption_disabled();
/*
* Note, reading the Physical ID entry outside of ir_list_lock is safe
* as only the pCPU that has loaded (or is loading) the vCPU is allowed
* to modify the entry, and preemption is disabled. I.e. the vCPU
* can't be scheduled out and thus avic_vcpu_{put,load}() can't run
* recursively.
*/
entry = READ_ONCE(*(svm->avic_physical_id_cache));
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
return;
/*
* Take and hold the per-vCPU interrupt remapping lock while updating
* the Physical ID entry even though the lock doesn't protect against
* multiple writers (see above). Holding ir_list_lock ensures that
* either svm_ir_list_add() will consume up-to-date entry information,
* or that this task will wait until svm_ir_list_add() completes to
* mark the vCPU as not running.
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)

View File

@ -660,10 +660,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
WARN_ON(!svm->tsc_scaling_enabled);
if (svm->tsc_scaling_enabled &&
svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
nested_svm_update_tsc_ratio_msr(vcpu);
}
vmcb02->control.int_ctl =
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
@ -1022,8 +1021,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
}
if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
WARN_ON(!svm->tsc_scaling_enabled);
if (kvm_caps.has_tsc_control &&
vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
}

View File

@ -1723,7 +1723,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
* Note, the source is not required to have the same number of
* vCPUs as the destination when migrating a vanilla SEV VM.
*/
src_vcpu = kvm_get_vcpu(dst_kvm, i);
src_vcpu = kvm_get_vcpu(src_kvm, i);
src_svm = to_svm(src_vcpu);
/*
@ -2951,9 +2951,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
* VMCB page. Do not include the encryption mask on the VMSA physical
* address since hardware will access it using the guest key.
* address since hardware will access it using the guest key. Note,
* the VMSA will be NULL if this vCPU is the destination for intrahost
* migration, and will be copied later.
*/
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
if (svm->sev_es.vmsa)
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);

View File

@ -366,6 +366,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
}
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len);
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
bool commit_side_effects)
@ -386,6 +388,14 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
}
if (!svm->next_rip) {
/*
* FIXME: Drop this when kvm_emulate_instruction() does the
* right thing and treats "can't emulate" as outright failure
* for EMULTYPE_SKIP.
*/
if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
return 0;
if (unlikely(!commit_side_effects))
old_rflags = svm->vmcb->save.rflags;
@ -4592,16 +4602,25 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
* and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
* decode garbage.
*
* Inject #UD if KVM reached this point without an instruction buffer.
* In practice, this path should never be hit by a well-behaved guest,
* e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
* is still theoretically reachable, e.g. via unaccelerated fault-like
* AVIC access, and needs to be handled by KVM to avoid putting the
* guest into an infinite loop. Injecting #UD is somewhat arbitrary,
* but its the least awful option given lack of insight into the guest.
* If KVM is NOT trying to simply skip an instruction, inject #UD if
* KVM reached this point without an instruction buffer. In practice,
* this path should never be hit by a well-behaved guest, e.g. KVM
* doesn't intercept #UD or #GP for SEV guests, but this path is still
* theoretically reachable, e.g. via unaccelerated fault-like AVIC
* access, and needs to be handled by KVM to avoid putting the guest
* into an infinite loop. Injecting #UD is somewhat arbitrary, but
* its the least awful option given lack of insight into the guest.
*
* If KVM is trying to skip an instruction, simply resume the guest.
* If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
* will attempt to re-inject the INT3/INTO and skip the instruction.
* In that scenario, retrying the INT3/INTO and hoping the guest will
* make forward progress is the only option that has a chance of
* success (and in practice it will work the vast majority of the time).
*/
if (unlikely(!insn)) {
kvm_queue_exception(vcpu, UD_VECTOR);
if (!(emul_type & EMULTYPE_SKIP))
kvm_queue_exception(vcpu, UD_VECTOR);
return false;
}

View File

@ -1813,10 +1813,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_link *l, int stack_size,
int run_ctx_off, bool save_ret)
{
void (*exit)(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
u64 (*enter)(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
u8 *prog = *pprog;
u8 *jmp_insn;
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
@ -1835,23 +1831,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
*/
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
if (p->aux->sleepable) {
enter = __bpf_prog_enter_sleepable;
exit = __bpf_prog_exit_sleepable;
} else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
enter = __bpf_prog_enter_struct_ops;
exit = __bpf_prog_exit_struct_ops;
} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
enter = __bpf_prog_enter_lsm_cgroup;
exit = __bpf_prog_exit_lsm_cgroup;
}
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
if (emit_call(&prog, enter, prog))
if (emit_call(&prog, bpf_trampoline_enter(p), prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@ -1896,7 +1881,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
if (emit_call(&prog, exit, prog))
if (emit_call(&prog, bpf_trampoline_exit(p), prog))
return -EINVAL;
*pprog = prog;

View File

@ -697,66 +697,6 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
return true;
}
/* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
unsigned long nr_slices, time_elapsed, io_trim;
u64 bytes_trim, tmp;
BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
/*
* If bps are unlimited (-1), then time slice don't get
* renewed. Don't try to trim the slice if slice is used. A new
* slice will start when appropriate.
*/
if (throtl_slice_used(tg, rw))
return;
/*
* A bio has been dispatched. Also adjust slice_end. It might happen
* that initially cgroup limit was very low resulting in high
* slice_end, but later limit was bumped up and bio was dispatched
* sooner, then we need to reduce slice_end. A high bogus slice_end
* is bad because it does not allow new slice to start.
*/
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
time_elapsed = jiffies - tg->slice_start[rw];
nr_slices = time_elapsed / tg->td->throtl_slice;
if (!nr_slices)
return;
tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
do_div(tmp, HZ);
bytes_trim = tmp;
io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
HZ;
if (!bytes_trim && !io_trim)
return;
if (tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;
if (tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
tg->io_disp[rw] = 0;
tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
throtl_log(&tg->service_queue,
"[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
tg->slice_start[rw], tg->slice_end[rw], jiffies);
}
static unsigned int calculate_io_allowed(u32 iops_limit,
unsigned long jiffy_elapsed)
{
@ -786,6 +726,67 @@ static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
}
/* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
unsigned long time_elapsed;
long long bytes_trim;
int io_trim;
BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
/*
* If bps are unlimited (-1), then time slice don't get
* renewed. Don't try to trim the slice if slice is used. A new
* slice will start when appropriate.
*/
if (throtl_slice_used(tg, rw))
return;
/*
* A bio has been dispatched. Also adjust slice_end. It might happen
* that initially cgroup limit was very low resulting in high
* slice_end, but later limit was bumped up and bio was dispatched
* sooner, then we need to reduce slice_end. A high bogus slice_end
* is bad because it does not allow new slice to start.
*/
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
time_elapsed = rounddown(jiffies - tg->slice_start[rw],
tg->td->throtl_slice);
if (!time_elapsed)
return;
bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
time_elapsed) +
tg->carryover_bytes[rw];
io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
tg->carryover_ios[rw];
if (bytes_trim <= 0 && io_trim <= 0)
return;
tg->carryover_bytes[rw] = 0;
if ((long long)tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;
tg->carryover_ios[rw] = 0;
if ((int)tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
tg->io_disp[rw] = 0;
tg->slice_start[rw] += time_elapsed;
throtl_log(&tg->service_queue,
"[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
jiffies);
}
static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
{
unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];

View File

@ -422,6 +422,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View File

@ -123,8 +123,8 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
void __iomem *base;
int irq = 0;
void __iomem *base, *ctl_base;
int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */
dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
@ -165,26 +165,34 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
base = (void __iomem *)base_mem_res->start;
/* N.B. this assumes data_addr will be used for word-sized I/O only */
ap->ioaddr.data_addr = base + 0 + 0 * 4;
ap->ioaddr.error_addr = base + 1 + 1 * 4;
ap->ioaddr.feature_addr = base + 1 + 1 * 4;
ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
ap->ioaddr.lbal_addr = base + 1 + 3 * 4;
ap->ioaddr.lbam_addr = base + 1 + 4 * 4;
ap->ioaddr.lbah_addr = base + 1 + 5 * 4;
ap->ioaddr.device_addr = base + 1 + 6 * 4;
ap->ioaddr.status_addr = base + 1 + 7 * 4;
ap->ioaddr.command_addr = base + 1 + 7 * 4;
ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
base = (void __iomem *)ctl_mem_res->start;
ap->ioaddr.altstatus_addr = base + 1;
ap->ioaddr.ctl_addr = base + 1;
if (base_res) { /* only Q40 has IO resources */
io_offset = 0x10000;
reg_shift = 0;
base = (void __iomem *)base_res->start;
ctl_base = (void __iomem *)ctl_res->start;
} else {
base = (void __iomem *)base_mem_res->start;
ctl_base = (void __iomem *)ctl_mem_res->start;
}
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
(unsigned long)base_mem_res->start,
(unsigned long)ctl_mem_res->start);
ap->ioaddr.error_addr = base + io_offset + (1 << reg_shift);
ap->ioaddr.feature_addr = base + io_offset + (1 << reg_shift);
ap->ioaddr.nsect_addr = base + io_offset + (2 << reg_shift);
ap->ioaddr.lbal_addr = base + io_offset + (3 << reg_shift);
ap->ioaddr.lbam_addr = base + io_offset + (4 << reg_shift);
ap->ioaddr.lbah_addr = base + io_offset + (5 << reg_shift);
ap->ioaddr.device_addr = base + io_offset + (6 << reg_shift);
ap->ioaddr.status_addr = base + io_offset + (7 << reg_shift);
ap->ioaddr.command_addr = base + io_offset + (7 << reg_shift);
ap->ioaddr.altstatus_addr = ctl_base + io_offset;
ap->ioaddr.ctl_addr = ctl_base + io_offset;
ata_port_desc(ap, "cmd %px ctl %px data %px",
base, ctl_base, ap->ioaddr.data_addr);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res && irq_res->start > 0) {

View File

@ -567,6 +567,7 @@ static struct platform_driver pata_ftide010_driver = {
};
module_platform_driver(pata_ftide010_driver);
MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);

View File

@ -428,6 +428,7 @@ static struct platform_driver gemini_sata_driver = {
};
module_platform_driver(gemini_sata_driver);
MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);

View File

@ -1585,9 +1585,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
struct request *rq;
spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
list_for_each_entry(rq, &list, queuelist)
blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) {
@ -1613,16 +1616,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
pr_info("rq %p timed out\n", rq);
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock);
/* The request may have completed meanwhile. */
if (blk_mq_request_completed(rq)) {
spin_unlock(&nq->poll_lock);
return BLK_EH_DONE;
}
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}
pr_info("rq %p timed out\n", rq);
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources

View File

@ -470,6 +470,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
/* Trigger MHI RESET so that the device will not access host memory */
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
/* Skip MHI RESET if in RDDM state */
if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
goto skip_mhi_reset;
dev_dbg(dev, "Triggering MHI Reset in device\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
@ -495,6 +499,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
}
}
skip_mhi_reset:
dev_dbg(dev,
"Waiting for all pending event ring processing to complete\n");
mhi_event = mhi_cntrl->mhi_event;

View File

@ -775,12 +775,13 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
return -EINVAL;
rc = -EINVAL;
goto out;
}
crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
rc = crb_map_pluton(dev, priv, buf, crb_pluton);
if (rc)
return rc;
goto out;
}
priv->sm = sm;

View File

@ -62,8 +62,6 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
};
struct imx_pll14xx_clk imx_1443x_pll = {
@ -137,11 +135,10 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
/*
* Fractional PLL constrains:
*
* a) 6MHz <= prate <= 25MHz
* b) 1 <= p <= 63 (1 <= p <= 4 prate = 24MHz)
* c) 64 <= m <= 1023
* d) 0 <= s <= 6
* e) -32768 <= k <= 32767
* a) 1 <= p <= 63
* b) 64 <= m <= 1023
* c) 0 <= s <= 6
* d) -32768 <= k <= 32767
*
* fvco = (m * 65536 + k) * prate / (p * 65536)
*/
@ -184,7 +181,7 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
}
/* Finally calculate best values */
for (pdiv = 1; pdiv <= 7; pdiv++) {
for (pdiv = 1; pdiv <= 63; pdiv++) {
for (sdiv = 0; sdiv <= 6; sdiv++) {
/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);

View File

@ -1664,7 +1664,7 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev)
return ret;
}
ret = pm_runtime_get(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
return ret;

View File

@ -1783,8 +1783,10 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
return ret;
regmap = qcom_cc_map(pdev, &disp_cc_sm8450_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
goto err_put_rpm;
}
clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
@ -1799,9 +1801,16 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
if (ret)
goto err_put_rpm;
pm_runtime_put(&pdev->dev);
return 0;
err_put_rpm:
pm_runtime_put_sync(&pdev->dev);
return ret;
}

View File

@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "pll0_vote",
.parent_names = (const char *[]){ "pll8" },
.parent_names = (const char *[]){ "pll0" },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},

View File

@ -115,9 +115,13 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
ret = pm_clk_add(&pdev->dev, "iface");
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire iface clock\n");
goto destroy_pm_clk;
goto err_destroy_pm_clk;
}
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
goto err_destroy_pm_clk;
if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
lpass_regmap_config.name = "qdsp6ss";
lpass_regmap_config.max_register = 0x3f;
@ -125,7 +129,7 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
ret = qcom_cc_probe_by_index(pdev, 0, desc);
if (ret)
goto destroy_pm_clk;
goto err_put_rpm;
}
lpass_regmap_config.name = "top_cc";
@ -134,11 +138,15 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
ret = qcom_cc_probe_by_index(pdev, 1, desc);
if (ret)
goto destroy_pm_clk;
goto err_put_rpm;
pm_runtime_put(&pdev->dev);
return 0;
destroy_pm_clk:
err_put_rpm:
pm_runtime_put_sync(&pdev->dev);
err_destroy_pm_clk:
pm_clk_destroy(&pdev->dev);
disable_pm_runtime:

View File

@ -87,11 +87,22 @@ static int mss_sc7180_probe(struct platform_device *pdev)
return ret;
}
ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
if (ret < 0)
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
return ret;
ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
if (ret < 0)
goto err_put_rpm;
pm_runtime_put(&pdev->dev);
return 0;
err_put_rpm:
pm_runtime_put_sync(&pdev->dev);
return ret;
}
static const struct dev_pm_ops mss_sc7180_pm_ops = {

View File

@ -174,21 +174,32 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
return ret;
}
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
return ret;
q6sstop_regmap_config.name = "q6sstop_tcsr";
desc = &tcsr_qcs404_desc;
ret = qcom_cc_probe_by_index(pdev, 1, desc);
if (ret)
return ret;
goto err_put_rpm;
q6sstop_regmap_config.name = "q6sstop_cc";
desc = &q6sstop_qcs404_desc;
ret = qcom_cc_probe_by_index(pdev, 0, desc);
if (ret)
return ret;
goto err_put_rpm;
pm_runtime_put(&pdev->dev);
return 0;
err_put_rpm:
pm_runtime_put_sync(&pdev->dev);
return ret;
}
static const struct dev_pm_ops q6sstopcc_pm_ops = {

View File

@ -125,11 +125,22 @@ static int turingcc_probe(struct platform_device *pdev)
return ret;
}
ret = qcom_cc_probe(pdev, &turingcc_desc);
if (ret < 0)
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
return ret;
ret = qcom_cc_probe(pdev, &turingcc_desc);
if (ret < 0)
goto err_put_rpm;
pm_runtime_put(&pdev->dev);
return 0;
err_put_rpm:
pm_runtime_put_sync(&pdev->dev);
return ret;
}
static const struct dev_pm_ops turingcc_pm_ops = {

View File

@ -773,6 +773,13 @@ static __always_inline void set_next_event_mem(const int access, unsigned long e
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
/* Timer must be disabled before programming CVAL */
if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;

View File

@ -9,6 +9,7 @@
* Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
*/
#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@ -145,8 +146,8 @@ struct rz_dmac {
#define CHCFG_REQD BIT(3)
#define CHCFG_SEL(bits) ((bits) & 0x07)
#define CHCFG_MEM_COPY (0x80400008)
#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
@ -609,13 +610,15 @@ static int rz_dmac_config(struct dma_chan *chan,
if (val == CHCFG_DS_INVALID)
return -EINVAL;
channel->chcfg |= CHCFG_FILL_DDS(val);
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
channel->chcfg |= CHCFG_FILL_SDS(val);
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
return 0;
}

View File

@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@ -493,11 +495,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, unsigned int num_clips)
{
if (file)
return -ENOSYS;
return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
num_clips);
}
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = amdgpu_dirtyfb
};
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@ -1100,7 +1120,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_framebuffer_init(dev, &rfb->base,
&amdgpu_fb_funcs_atomic);
else
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;

View File

@ -1269,6 +1269,13 @@ void handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;
/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
* legacy gamma setup.
*/
if (crtc_state->cm_is_degamma_srgb &&
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {

View File

@ -82,3 +82,4 @@ DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)

View File

@ -1977,12 +1977,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
/* Check for case where we are going from odm 2:1 to max
* pipe scenario. For these cases, we will call
* commit_minimal_transition_state() to exit out of odm 2:1
* first before processing new streams
/* ODM Combine 2:1 power optimization is only applied for single stream
* scenario, it uses extra pipes than needed to reduce power consumption
* We need to switch off this feature to make room for new streams.
*/
if (stream_count == dc->res_pool->pipe_count) {
if (stream_count > dc->current_state->stream_count &&
dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@ -3361,6 +3361,45 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
}
}
static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
* operations to complete. It should be invoked as a pre-amble prior
* to full update programming before asserting any HW locks.
*/
int pipe_idx;
int opp_inst;
int opp_count = dc->res_pool->pipe_count;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
continue;
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
}
}
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@ -3378,24 +3417,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL) {
/* wait for all double-buffer activity to clear on all pipes */
int pipe_idx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
}
}
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until

View File

@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;
while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc != insert_above_mpcc)
while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}

View File

@ -1515,17 +1515,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
int mpcc_inst = hubp->inst;
int opp_inst;
int opp_count = dc->res_pool->pipe_count;
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
hws->funcs.update_mpcc(dc, pipe_ctx);
}

View File

@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
if (mid_point_frames_ceil &&
(last_render_time_in_us / mid_point_frames_ceil) <
in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
if (last_render_time_in_us / frames_to_insert <
in_out_vrr->min_duration_in_us){
if (frames_to_insert &&
(last_render_time_in_us / frames_to_insert) <
in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}

View File

@ -291,7 +291,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
;
} while (ast_read32(ast, 0x10100) != 0xa8);
} else {/* AST2100/1100 */
if (ast->chip == AST2100 || ast->chip == 2200)
if (ast->chip == AST2100 || ast->chip == AST2200)
dram_reg_info = ast2100_dram_table_data;
else
dram_reg_info = ast1100_dram_table_data;

View File

@ -56,6 +56,7 @@ struct intel_breadcrumbs;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
struct intel_hw_status_page {
struct list_head timelines;

View File

@ -5111,6 +5111,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
ve->base.mask = VIRTUAL_ENGINES;
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {

View File

@ -1179,6 +1179,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
kvm_pfn_t pfn;
int ret;
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
@ -1188,7 +1189,13 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
return PageTransHuge(pfn_to_page(pfn));
if (!pfn_valid(pfn))
return -EINVAL;
ret = PageTransHuge(pfn_to_page(pfn));
kvm_release_pfn_clean(pfn);
return ret;
}
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
@ -2880,24 +2887,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_invalidate(gvt->gt);
}
/**
* intel_vgpu_reset_gtt - reset the all GTT related status
* @vgpu: a vGPU
*
* This function is called from vfio core to reset reset all
* GTT related status, including GGTT, PPGTT, scratch page.
*
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
/* Shadow pages are only created when there is no page
* table tracking data, so remove page tracking data after
* removing the shadow pages.
*/
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
}
/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device

View File

@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
void intel_gvt_clean_gtt(struct intel_gvt *gvt);
struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,

View File

@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
* Keep one request on each engine for reserved use under mempressure
* do not use with virtual engines as this really is only needed for
* kernel contexts.
* Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
if (!intel_engine_is_virtual(rq->engine) &&
is_power_of_2(rq->execution_mask) &&
if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;

View File

@ -611,6 +611,14 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
}
static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
writel(0, mxsfb->base + LCDC_AS_CTRL);
}
static bool mxsfb_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@ -626,6 +634,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_overlay_atomic_update,
.atomic_disable = mxsfb_plane_overlay_atomic_disable,
};
static const struct drm_plane_funcs mxsfb_plane_funcs = {

View File

@ -43,13 +43,9 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct virtio_gpu_fence *fence,
uint32_t ring_idx)
{
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence_event *e = NULL;
int ret;
if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
return 0;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
@ -121,6 +117,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence *out_fence;
bool drm_fence_event;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
@ -216,15 +213,24 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_memdup;
}
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
if(!out_fence) {
ret = -ENOMEM;
goto out_unresv;
}
if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
drm_fence_event = true;
else
drm_fence_event = false;
ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
if (ret)
goto out_unresv;
if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
exbuf->num_bo_handles ||
drm_fence_event)
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
else
out_fence = NULL;
if (drm_fence_event) {
ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
if (ret)
goto out_unresv;
}
if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);

View File

@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
static const struct regmap_config sfpb_mutex_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x100,
.fast_io = true,
};
static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
.offset = 0x4,
.stride = 0x4,
.regmap_config = &sfpb_mutex_config,
};
static const struct regmap_config tcsr_msm8226_mutex_config = {

View File

@ -148,7 +148,7 @@ static int tca6416_keys_open(struct input_dev *dev)
if (chip->use_polling)
schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
else
enable_irq(chip->irqnum);
enable_irq(chip->client->irq);
return 0;
}
@ -160,7 +160,7 @@ static void tca6416_keys_close(struct input_dev *dev)
if (chip->use_polling)
cancel_delayed_work_sync(&chip->dwork);
else
disable_irq(chip->irqnum);
disable_irq(chip->client->irq);
}
static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
@ -266,12 +266,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
goto fail1;
if (!chip->use_polling) {
if (pdata->irq_is_gpio)
chip->irqnum = gpio_to_irq(client->irq);
else
chip->irqnum = client->irq;
error = request_threaded_irq(chip->irqnum, NULL,
error = request_threaded_irq(client->irq, NULL,
tca6416_keys_isr,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT | IRQF_NO_AUTOEN,
@ -279,7 +274,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
if (error) {
dev_dbg(&client->dev,
"Unable to claim irq %d; error %d\n",
chip->irqnum, error);
client->irq, error);
goto fail1;
}
}
@ -297,10 +292,8 @@ static int tca6416_keypad_probe(struct i2c_client *client,
return 0;
fail2:
if (!chip->use_polling) {
free_irq(chip->irqnum, chip);
enable_irq(chip->irqnum);
}
if (!chip->use_polling)
free_irq(client->irq, chip);
fail1:
input_free_device(input);
kfree(chip);
@ -311,10 +304,8 @@ static void tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
if (!chip->use_polling) {
free_irq(chip->irqnum, chip);
enable_irq(chip->irqnum);
}
if (!chip->use_polling)
free_irq(client->irq, chip);
input_unregister_device(chip->input);
kfree(chip);
@ -324,10 +315,9 @@ static void tca6416_keypad_remove(struct i2c_client *client)
static int tca6416_keypad_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
if (device_may_wakeup(dev))
enable_irq_wake(chip->irqnum);
enable_irq_wake(client->irq);
return 0;
}
@ -335,10 +325,9 @@ static int tca6416_keypad_suspend(struct device *dev)
static int tca6416_keypad_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
if (device_may_wakeup(dev))
disable_irq_wake(chip->irqnum);
disable_irq_wake(client->irq);
return 0;
}

View File

@ -1381,9 +1381,6 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@ -1561,8 +1558,11 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
return error;
}
if (dir == READ)
if (dir == READ) {
iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
return 0;
}
return iqs7222_ati_trigger(iqs7222);
}

View File

@ -227,10 +227,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
ret = of_parse_phandle_with_args(client_dn, "mboxes",
"#mbox-cells", j, &curr_ph);
of_node_put(curr_ph.np);
if (!ret && curr_ph.np == controller_dn) {
if (!ret && curr_ph.np == controller_dn)
ipcc->num_chans++;
break;
}
}
}

View File

@ -272,6 +272,7 @@ struct brcmnand_controller {
const unsigned int *page_sizes;
unsigned int page_size_shift;
unsigned int max_oob;
u32 ecc_level_shift;
u32 features;
/* for low-power standby/resume only */
@ -596,6 +597,34 @@ enum {
INTFC_CTLR_READY = BIT(31),
};
/***********************************************************************
* NAND ACC CONTROL bitfield
*
* Some bits have remained constant throughout hardware revision, while
* others have shifted around.
***********************************************************************/
/* Constant for all versions (where supported) */
enum {
/* See BRCMNAND_HAS_CACHE_MODE */
ACC_CONTROL_CACHE_MODE = BIT(22),
/* See BRCMNAND_HAS_PREFETCH */
ACC_CONTROL_PREFETCH = BIT(23),
ACC_CONTROL_PAGE_HIT = BIT(24),
ACC_CONTROL_WR_PREEMPT = BIT(25),
ACC_CONTROL_PARTIAL_PAGE = BIT(26),
ACC_CONTROL_RD_ERASED = BIT(27),
ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
ACC_CONTROL_WR_ECC = BIT(30),
ACC_CONTROL_RD_ECC = BIT(31),
};
#define ACC_CONTROL_ECC_SHIFT 16
/* Only for v7.2 */
#define ACC_CONTROL_ECC_EXT_SHIFT 13
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;
/* v7.2 has different ecc level shift in the acc register */
if (ctrl->nand_version == 0x0702)
ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
else
ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
return 0;
}
@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
return 0;
}
/***********************************************************************
* NAND ACC CONTROL bitfield
*
* Some bits have remained constant throughout hardware revision, while
* others have shifted around.
***********************************************************************/
/* Constant for all versions (where supported) */
enum {
/* See BRCMNAND_HAS_CACHE_MODE */
ACC_CONTROL_CACHE_MODE = BIT(22),
/* See BRCMNAND_HAS_PREFETCH */
ACC_CONTROL_PREFETCH = BIT(23),
ACC_CONTROL_PAGE_HIT = BIT(24),
ACC_CONTROL_WR_PREEMPT = BIT(25),
ACC_CONTROL_PARTIAL_PAGE = BIT(26),
ACC_CONTROL_RD_ERASED = BIT(27),
ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
ACC_CONTROL_WR_ECC = BIT(30),
ACC_CONTROL_RD_ECC = BIT(31),
};
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(4, 0);
}
#define NAND_ACC_CONTROL_ECC_SHIFT 16
#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
mask <<= ACC_CONTROL_ECC_SHIFT;
/* v7.2 includes additional ECC levels */
if (ctrl->nand_version >= 0x0702)
mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
if (ctrl->nand_version == 0x0702)
mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
return mask;
}
@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
acc_control |= host->hwcfg.ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT;
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
cpu_relax();
} while (time_after(limit, jiffies));
/*
* do a final check after time out in case the CPU was busy and the driver
* did not get enough time to perform the polling to avoid false alarms
*/
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);
@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
int j;
int j, k = 0;
u32 last = 0xffffffff;
u8 *plast = (u8 *)&last;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
for (j = 0; j < tbytes; j += 4)
/*
* tbytes may not be multiple of words. Make sure we don't read out of
* the boundary and stop at last word.
*/
for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
/* handle the remaing bytes */
while (j < tbytes)
plast[k++] = oob[j++];
if (tbytes & 0x3)
oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
return tbytes;
}
@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
BUG_ON(ctrl->cmd_pending != 0);
/*
* If we came here through _panic_write and there is a pending
* command, try to wait for it. If it times out, rather than
* hitting BUG_ON, just return so we don't crash while crashing.
*/
if (oops_in_progress) {
if (ctrl->cmd_pending &&
bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
return;
} else
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
@ -2561,7 +2601,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp &= ~brcmnand_ecc_level_mask(ctrl);
tmp &= ~brcmnand_spare_area_mask(ctrl);
if (ctrl->nand_version >= 0x0302) {
tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp |= cfg->spare_area_size;
}
nand_writereg(ctrl, acc_control_offs, tmp);

View File

@ -120,8 +120,9 @@ static const struct flash_info winbond_nor_parts[] = {
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 0, 0)
PARSE_SFDP
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },

View File

@ -132,6 +132,8 @@ struct sja1105_info {
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
/* Every {port, TXQ} has its own CBS shaper */
bool fixed_cbs_mapping;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
@ -262,6 +264,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
/* Serializes accesses to the FDB */
struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;

View File

@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
struct sja1105_dyn_cmd cmd = {};
int rc;
/* We don't _need_ to read the full entry, just the command area which
* is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
* buffer that contains the full entry too. Additionally, our API
* doesn't really know how many bytes into the buffer does the command
* area really begin. So just read back the whole entry.
*/
/* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
memset(cmd, 0, sizeof(*cmd));
ops->cmd_packing(packed_buf, cmd, UNPACK);
ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
return cmd->valid ? -EAGAIN : 0;
if (cmd.valid)
return -EAGAIN;
if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
if (check_errors && cmd.errors)
return -EINVAL;
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
int rc;
int err, rc;
return read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, cmd, ops);
err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, ops, entry, check_valident,
check_errors);
return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
goto out;
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
out:
mutex_unlock(&priv->dynamic_config_lock);
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
goto out;
cmd = (struct sja1105_dyn_cmd) {0};
ops->cmd_packing(packed_buf, &cmd, UNPACK);
if (cmd.errors)
return -EINVAL;
rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
out:
mutex_unlock(&priv->dynamic_config_lock);
return 0;
return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)

View File

@ -1805,6 +1805,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (!vid) {
switch (db.type) {
@ -1819,12 +1820,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
}
return priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_lock(&priv->fdb_lock);
rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
@ -1844,6 +1849,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
mutex_lock(&priv->fdb_lock);
rc = __sja1105_fdb_del(ds, port, addr, vid, db);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@ -1875,13 +1894,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
/* We need to hide the FDB entry for unknown multicast */
if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
* only wants to see unicast
*/
if (is_multicast_ether_addr(macaddr))
continue;
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
@ -1905,6 +1925,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
};
int i;
mutex_lock(&priv->fdb_lock);
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@ -1918,7 +1940,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
return;
break;
}
if (!(l2_lookup.destports & BIT(port)))
@ -1930,14 +1952,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
return;
break;
}
}
mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
@ -2122,11 +2146,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
}
#define BYTES_PER_KBIT (1000LL / 8)
/* Port 0 (the uC port) does not have CBS shapers */
#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
int port, int prio)
{
int i;
if (priv->info->fixed_cbs_mapping) {
i = SJA1110_FIXED_CBS(port, prio);
if (i >= 0 && i < priv->info->num_cbs_shapers)
return i;
return -1;
}
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
return i;
return -1;
}
static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;
if (priv->info->fixed_cbs_mapping)
return -1;
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
@ -2157,14 +2206,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
s64 port_transmit_rate_kbps;
int index;
if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);
index = sja1105_find_unused_cbs_shaper(priv);
if (index < 0)
return -ENOSPC;
/* The user may be replacing an existing shaper */
index = sja1105_find_cbs_shaper(priv, port, offload->queue);
if (index < 0) {
/* That isn't the case - see if we can allocate a new one */
index = sja1105_find_unused_cbs_shaper(priv);
if (index < 0)
return -ENOSPC;
}
cbs = &priv->cbs[index];
cbs->port = port;
@ -2174,9 +2229,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
/* User space is in kbits/sec, hardware in bytes/sec */
cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
/* User space is in kbits/sec, while the hardware in bytes/sec times
* link speed. Since the given offload->sendslope is good only for the
* current link speed anyway, and user space is likely to reprogram it
* when that changes, don't even bother to track the port's link speed,
* but deduce the port transmit rate from idleslope - sendslope.
*/
port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
port_transmit_rate_kbps);
cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
@ -2241,6 +2304,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;
mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@ -2355,6 +2419,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
mutex_unlock(&priv->fdb_lock);
return rc;
}
@ -2924,7 +2989,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int match;
int match, rc;
mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@ -2937,7 +3004,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
return -ENOSPC;
rc = -ENOSPC;
goto out;
}
if (flags.val & BR_MCAST_FLOOD)
@ -2945,10 +3013,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);
return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match],
true);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match], true);
out:
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@ -3318,6 +3389,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);

View File

@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,

View File

@ -740,7 +740,7 @@ static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
u32 port_rules = 0;
u8 mask[ETH_ALEN];
memset(mask, 0xFF, ETH_ALEN);
eth_broadcast_addr(mask);
if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
port_rules = adin1110_port_rules(port_priv, true, true);
@ -761,7 +761,7 @@ static int adin1110_set_mac_address(struct net_device *netdev,
return -EADDRNOTAVAIL;
eth_hw_addr_set(netdev, dev_addr);
memset(mask, 0xFF, ETH_ALEN);
eth_broadcast_addr(mask);
mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
port_rules = adin1110_port_rules(port_priv, true, false);
@ -1251,7 +1251,7 @@ static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv
goto out;
/* Allow only BPDUs to be passed to the CPU */
memset(mask, 0xFF, ETH_ALEN);
eth_broadcast_addr(mask);
port_rules = adin1110_port_rules(port_priv, true, false);
ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
mask, port_rules);
@ -1365,8 +1365,8 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
return -ENOMEM;
other_port = priv->ports[!port_priv->nr];
port_rules = adin1110_port_rules(port_priv, false, true);
memset(mask, 0xFF, ETH_ALEN);
port_rules = adin1110_port_rules(other_port, false, true);
eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
mask, port_rules);

View File

@ -95,6 +95,8 @@
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@ -245,6 +247,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
#define MACB_PTPUNI_OFFSET 20 /* PTP Unicast packet enable */
#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */

View File

@ -288,6 +288,11 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
gem_writel(bp, RXPTPUNI, bottom);
gem_writel(bp, TXPTPUNI, bottom);
}
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@ -700,8 +705,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
macb_set_tx_clk(bp, speed);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@ -721,8 +724,15 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
/* Enable Rx and Tx */
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
macb_set_tx_clk(bp, speed);
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
if (gem_has_ptp(bp))
ctrl |= MACB_BIT(PTPUNI);
macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
netif_tx_wake_all_queues(ndev);
}

View File

@ -492,7 +492,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
if (rx->ctx.skb_tail == rx->ctx.skb_head)
skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
else
rx->ctx.skb_tail->next = skb;
rx->ctx.skb_tail = skb;
num_frags = 0;
}

View File

@ -797,6 +797,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
bool dcb_ets_active;
};
#define HNAE3_MAX_DSCP 64

View File

@ -1406,9 +1406,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
mutex_destroy(&handle->dbgfs_lock);
return ret;
}
@ -1416,6 +1416,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
{
u32 i;
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
if (handle->dbgfs_buf[i]) {
kvfree(handle->dbgfs_buf[i]);
@ -1423,8 +1426,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
}
mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)

View File

@ -2102,8 +2102,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
/* This smp_store_release() pairs with smp_load_aquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
hns3_tx_push_bd(ring, num);
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
return;
}
@ -2114,6 +2118,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
/* This smp_store_release() pairs with smp_load_aquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
if (ring->tqp->mem_base)
hns3_tx_mem_doorbell(ring);
else
@ -2121,7 +2130,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
@ -3307,8 +3315,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
@ -3562,9 +3568,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
int *bytes, int *pkts, int budget)
{
/* pair with ring->last_to_use update in hns3_tx_doorbell(),
* smp_store_release() is not used in hns3_tx_doorbell() because
* the doorbell operation already have the needed barrier operation.
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_tx_doorbell().
*/
int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;

View File

@ -776,7 +776,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
if (module_type == HNAE3_MODULE_TYPE_CR)
if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
cmd->base.port = PORT_OTHER;
else if (module_type == HNAE3_MODULE_TYPE_CR)
cmd->base.port = PORT_DA;
else
cmd->base.port = PORT_FIBRE;

View File

@ -251,7 +251,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
h->kinfo.tc_info.mqprio_active)
return -EINVAL;
ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
@ -267,10 +267,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
if (num_tc > 1)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@ -463,7 +460,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
if (h->kinfo.tc_info.mqprio_active)
return 0;
return hdev->dcbx_cap;
@ -587,7 +584,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return -EBUSY;
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
kinfo = &vport->nic.kinfo;
if (kinfo->tc_info.dcb_ets_active)
return -EINVAL;
ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
@ -601,7 +599,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
return ret;
kinfo = &vport->nic.kinfo;
memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
kinfo->tc_info.mqprio_active = tc > 0;
@ -610,13 +607,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
goto err_out;
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
if (tc > 1)
hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
return hclge_notify_init_up(hdev);
err_out:

View File

@ -1517,7 +1517,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
struct hclge_desc desc[3];
int pos = 0;
int ret, i;
u32 *req;
__le32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
@ -1542,22 +1542,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
"%08x\n", *req++);
"%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
"%08x\n", *req++);
"%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
"%08x\n", *req++);
"%08x\n", le32_to_cpu(*req++));
return ret;
}

View File

@ -11132,6 +11132,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
static void hclge_info_show(struct hclge_dev *hdev)
{
struct hnae3_handle *handle = &hdev->vport->nic;
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "PF info begin:\n");
@ -11148,9 +11149,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);

View File

@ -916,8 +916,6 @@ struct hclge_dev {
#define HCLGE_FLAG_MAIN BIT(0)
#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
#define HCLGE_FLAG_DCB_ENABLE BIT(2)
#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */

View File

@ -34,11 +34,11 @@ struct igb_adapter;
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
#define IGB_MIN_TXD 80
#define IGB_MIN_TXD 64
#define IGB_MAX_TXD 4096
#define IGB_DEFAULT_RXD 256
#define IGB_MIN_RXD 80
#define IGB_MIN_RXD 64
#define IGB_MAX_RXD 4096
#define IGB_DEFAULT_ITR 3 /* dynamic */

View File

@ -3877,8 +3877,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
/* Virtualization features not supported on i210 family. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
/* Virtualization features not supported on i210 and 82580 family. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
(hw->mac.type == e1000_82580))
return;
/* Of the below we really only want the effect of getting

View File

@ -39,11 +39,11 @@ enum latency_range {
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
#define IGBVF_MIN_TXD 80
#define IGBVF_MIN_TXD 64
#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
#define IGBVF_MIN_RXD 80
#define IGBVF_MIN_RXD 64
#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */

View File

@ -354,11 +354,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
#define IGC_MIN_TXD 80
#define IGC_MIN_TXD 64
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
#define IGC_MIN_RXD 80
#define IGC_MIN_RXD 64
#define IGC_MAX_RXD 4096
/* Supported Rx Buffer Sizes */

View File

@ -995,6 +995,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
@ -1012,20 +1013,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@ -1039,8 +1040,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@ -1051,7 +1052,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
@ -1062,8 +1063,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@ -1095,8 +1094,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@ -1129,6 +1128,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_WRITE_FLUSH(hw);
/* configure adapter flags only when HW is actually configured */
adapter->flags = aflags;
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);

View File

@ -5578,6 +5578,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
if (loc == info->rule_cnt) {
ret = -EMSGSIZE;
break;
}
if (port->rfs_rules[i])
rules[loc++] = i;
}

View File

@ -834,6 +834,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
u16 *smq, u16 *smq_mask)
{
struct nix_cn10k_aq_enq_req *aq_req;
if (!is_rvu_otx2(rvu)) {
aq_req = (struct nix_cn10k_aq_enq_req *)req;
*smq = aq_req->sq.smq;
*smq_mask = aq_req->sq_mask.smq;
} else {
*smq = req->sq.smq;
*smq_mask = req->sq_mask.smq;
}
}
static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
@ -845,6 +860,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
u16 smq, smq_mask;
void *ctx, *mask;
bool ena;
u64 cfg;
@ -916,13 +932,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rc)
return rc;
nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
req->sq_mask.ena && req->sq.ena && smq_mask))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
pcifunc, smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}

View File

@ -2698,6 +2698,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;

View File

@ -97,7 +97,6 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (ip_version == 6) {
int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
struct in6_addr zerov6 = {};
daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
@ -105,8 +104,8 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6);
memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size);
memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size);
if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) ||
!memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6)))
if (ipv6_addr_any(&tun_attr->dst_ip.v6) ||
ipv6_addr_any(&tun_attr->src_ip.v6))
return 0;
}
#endif

View File

@ -123,18 +123,32 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
return ret;
}
static void irq_release(struct mlx5_irq *irq)
/* mlx5_system_free_irq - Free an IRQ
* @irq: IRQ to free
*
* Free the IRQ and other resources such as rmap from the system.
* BUT doesn't free or remove reference from mlx5.
* This function is very important for the shutdown flow, where we need to
* cleanup system resoruces but keep mlx5 objects alive,
* see mlx5_irq_table_free_irqs().
*/
static void mlx5_system_free_irq(struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
/* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh);
}
static void irq_release(struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
mlx5_system_free_irq(irq);
free_cpumask_var(irq->mask);
kfree(irq);
}
@ -597,7 +611,7 @@ static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
unsigned long index;
xa_for_each(&pool->irqs, index, irq)
free_irq(irq->irqn, &irq->nh);
mlx5_system_free_irq(irq);
}
static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)

View File

@ -2692,9 +2692,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
stmmac_tx_timer_arm(priv, queue);
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
@ -2975,9 +2973,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 tx_coal_timer = priv->tx_coal_timer[queue];
if (!tx_coal_timer)
return;
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}

View File

@ -2628,6 +2628,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
if (!budget)
return 0;
work_done = rx_bottom(tp, budget);
if (work_done < budget) {

View File

@ -313,6 +313,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
int ret = NETDEV_TX_OK;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
@ -345,6 +346,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
drop:
atomic64_inc(&priv->dropped);
ret = NET_XMIT_DROP;
}
if (use_napi)
@ -352,7 +354,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
return NETDEV_TX_OK;
return ret;
}
static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)

View File

@ -56,8 +56,8 @@
static int led_type __read_mostly = -1;
static unsigned char lastleds; /* LED state from most recent update */
static unsigned int led_heartbeat __read_mostly = 1;
static unsigned int led_diskio __read_mostly = 1;
static unsigned int led_lanrxtx __read_mostly = 1;
static unsigned int led_diskio __read_mostly;
static unsigned int led_lanrxtx __read_mostly;
static char lcd_text[32] __read_mostly;
static char lcd_text_default[32] __read_mostly;
static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */

View File

@ -1698,7 +1698,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
struct intel_community_context *cctx;
struct intel_community *community;
struct device *dev = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
struct intel_pinctrl *pctrl;
acpi_status status;
unsigned int i;
@ -1766,7 +1765,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
status = acpi_install_address_space_handler(adev->handle,
status = acpi_install_address_space_handler(ACPI_HANDLE(dev),
community->acpi_space_id,
chv_pinctrl_mmio_access_handler,
NULL, pctrl);
@ -1783,7 +1782,7 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
const struct intel_community *community = &pctrl->communities[0];
acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev),
acpi_remove_address_space_handler(ACPI_HANDLE(&pdev->dev),
community->acpi_space_id,
chv_pinctrl_mmio_access_handler);

View File

@ -80,8 +80,8 @@ config MLXBF_PMC
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
depends on HWMON
depends on I2C
depends on HWMON && I2C
depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.

View File

@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
{ 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
{ 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
{ 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
{ 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
{ 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
{ 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
uint64_t perfmon_cfg, perfevt, perfctl;
uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
@ -904,25 +910,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
perfval_offset = perfcfg_offset +
pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFCTL);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
return -EFAULT;
/* Check if the counter is enabled */
if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
MLXBF_PMC_READ_REG_64, &perfctl))
return -EFAULT;
if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
return -EINVAL;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
} else
return -EINVAL;
return sprintf(buf, "0x%llx\n", value);
return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
return sprintf(buf, "No event being monitored\n");
return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
events[i].evt_name);
if (len > PAGE_SIZE)
len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
events[i].evt_num, events[i].evt_name);
if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
return sprintf(buf, "%d\n", value);
return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */

View File

@ -56,6 +56,7 @@ struct mlxbf_tmfifo;
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
* @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
@ -72,6 +73,7 @@ struct mlxbf_tmfifo_vring {
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
@ -83,6 +85,14 @@ struct mlxbf_tmfifo_vring {
struct mlxbf_tmfifo *fifo;
};
/* Check whether vring is in drop mode. */
#define IS_VRING_DROP(_r) ({ \
typeof(_r) (r) = (_r); \
(r->desc_head == &r->drop_desc ? true : false); })
/* A stub length to drop maximum length packet. */
#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
@ -195,7 +205,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
@ -243,6 +253,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
@ -348,7 +359,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
return len;
}
static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
@ -577,19 +588,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
if (is_rx)
memcpy(addr + vring->cur_len, &data, sizeof(u64));
else
memcpy(&data, addr + vring->cur_len, sizeof(u64));
if (!IS_VRING_DROP(vring)) {
if (is_rx)
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
else
memcpy(&data, addr + vring->cur_len,
sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
if (is_rx)
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
if (!IS_VRING_DROP(vring)) {
if (is_rx)
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
}
vring->cur_len = len;
}
@ -606,13 +623,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc,
struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
@ -632,8 +650,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
MLXBF_TMFIFO_NET_L2_OVERHEAD)
return;
MLXBF_TMFIFO_NET_L2_OVERHEAD)
drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
@ -648,16 +666,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (!tm_dev2)
return;
vring->desc = desc;
vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
if (drop_rx && !IS_VRING_DROP(vring)) {
if (vring->desc_head)
mlxbf_tmfifo_release_pkt(vring);
*desc = &vring->drop_desc;
vring->desc_head = *desc;
vring->desc = *desc;
}
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
@ -690,15 +717,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
if (!desc)
return false;
if (!desc) {
/* Drop next Rx packet to avoid stuck. */
if (is_rx) {
desc = &vring->drop_desc;
vring->desc_head = desc;
vring->desc = desc;
} else {
return false;
}
}
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
@ -724,17 +759,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
vring->rem_len -= len;
/* Get the next desc on the chain. */
if (vring->rem_len > 0 &&
if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
/* Done and release the pending packet. */
mlxbf_tmfifo_release_pending_pkt(vring);
/* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
if (!IS_VRING_DROP(vring)) {
mlxbf_tmfifo_release_pkt(vring);
} else {
vring->pkt_len = 0;
vring->desc_head = NULL;
vring->desc = NULL;
return false;
}
/*
* Make sure the load/store are in order before
@ -914,7 +956,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
/* Release the pending packet. */
if (vring->desc)
mlxbf_tmfifo_release_pending_pkt(vring);
mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;

View File

@ -422,13 +422,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
struct regmap *regmap;
struct clk *clk, *gclk = NULL;
struct clk *slow_clk;
char clk_name[] = "t0_clk";
int err;
int channel;
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL)
return -ENOMEM;
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
@ -437,49 +438,43 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return err;
}
regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
tcbpwm->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(tcbpwm->regmap))
return PTR_ERR(tcbpwm->regmap);
slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
if (IS_ERR(slow_clk))
return PTR_ERR(slow_clk);
tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
if (IS_ERR(tcbpwm->slow_clk))
return PTR_ERR(tcbpwm->slow_clk);
clk_name[1] += channel;
clk = of_clk_get_by_name(np->parent, clk_name);
if (IS_ERR(clk))
clk = of_clk_get_by_name(np->parent, "t0_clk");
if (IS_ERR(clk))
return PTR_ERR(clk);
tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
if (IS_ERR(tcbpwm->clk))
tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
if (IS_ERR(tcbpwm->clk)) {
err = PTR_ERR(tcbpwm->clk);
goto err_slow_clk;
}
match = of_match_node(atmel_tcb_of_match, np->parent);
config = match->data;
if (config->has_gclk) {
gclk = of_clk_get_by_name(np->parent, "gclk");
if (IS_ERR(gclk))
return PTR_ERR(gclk);
}
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL) {
err = -ENOMEM;
goto err_slow_clk;
tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
if (IS_ERR(tcbpwm->gclk)) {
err = PTR_ERR(tcbpwm->gclk);
goto err_clk;
}
}
tcbpwm->chip.dev = &pdev->dev;
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
tcbpwm->regmap = regmap;
tcbpwm->clk = clk;
tcbpwm->gclk = gclk;
tcbpwm->slow_clk = slow_clk;
tcbpwm->width = config->counter_width;
err = clk_prepare_enable(slow_clk);
err = clk_prepare_enable(tcbpwm->slow_clk);
if (err)
goto err_slow_clk;
goto err_gclk;
spin_lock_init(&tcbpwm->lock);
@ -494,23 +489,28 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
err_disable_clk:
clk_disable_unprepare(tcbpwm->slow_clk);
err_gclk:
clk_put(tcbpwm->gclk);
err_clk:
clk_put(tcbpwm->clk);
err_slow_clk:
clk_put(slow_clk);
clk_put(tcbpwm->slow_clk);
return err;
}
static int atmel_tcb_pwm_remove(struct platform_device *pdev)
static void atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->slow_clk);
clk_put(tcbpwm->gclk);
clk_put(tcbpwm->clk);
return 0;
clk_put(tcbpwm->slow_clk);
}
static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
@ -564,7 +564,7 @@ static struct platform_driver atmel_tcb_pwm_driver = {
.pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
.remove = atmel_tcb_pwm_remove,
.remove_new = atmel_tcb_pwm_remove,
};
module_platform_driver(atmel_tcb_pwm_driver);

View File

@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycles > 255)
duty_cycles = 255;
val = readl(lpc32xx->base + (pwm->hwpwm << 2));
val = readl(lpc32xx->base);
val &= ~0xFFFF;
val |= (period_cycles << 8) | duty_cycles;
writel(val, lpc32xx->base + (pwm->hwpwm << 2));
writel(val, lpc32xx->base);
return 0;
}
@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret)
return ret;
val = readl(lpc32xx->base + (pwm->hwpwm << 2));
val = readl(lpc32xx->base);
val |= PWM_ENABLE;
writel(val, lpc32xx->base + (pwm->hwpwm << 2));
writel(val, lpc32xx->base);
return 0;
}
@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;
val = readl(lpc32xx->base + (pwm->hwpwm << 2));
val = readl(lpc32xx->base);
val &= ~PWM_ENABLE;
writel(val, lpc32xx->base + (pwm->hwpwm << 2));
writel(val, lpc32xx->base);
clk_disable_unprepare(lpc32xx->clk);
}
@ -141,9 +141,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.npwm = 1;
/* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
writel(val, lpc32xx->base);
ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
if (ret < 0) {

View File

@ -441,6 +441,7 @@ static int zcdn_create(const char *name)
ZCRYPT_NAME "_%d", (int)MINOR(devt));
nodename[sizeof(nodename) - 1] = '\0';
if (dev_set_name(&zcdndev->device, nodename)) {
kfree(zcdndev);
rc = -EINVAL;
goto unlockout;
}

View File

@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION

View File

@ -18,7 +18,7 @@
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |

Some files were not shown because too many files have changed in this diff Show More