This is the 5.10.9 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmAHFpcACgkQONu9yGCS
 aT4Vhw/+JLscHnfK//hbS6Nx95MY95VMzy+p2ccADXRy3O/5nr0HwGKnXTKB4Bg+
 05S3Hv9ZU/XSszLWvgFQ0Z0peU241ASPz1uLTgtpziBT5plXa5eJULBZ+WknWMef
 dNKpvKPphpEbQ0yz6o/4sbNAdiI9BzyGCOicQ2dl9nY7R/JA9YHquUD7iHMnvbs+
 yxwwawNHVwszUT/fJT3iFzOAehHGAttHdf3z/bGPS1ogy2S7J5IluJgTAibd3P7G
 5o7OUUA5ujEtjBLIkA61fqeL2Qaci83Ff/8KEPEfF1JeLBbMHYcLHnz3RAwBaLZh
 nlM4smyTeekcnHIzyRGw16OmpoYwY3MQAt+UFLCzKhlnscB0UqCNkA9zQA9k/taw
 cy7/fe5hWFU9DRv4uTUT2H1tkP+pNQ5eIaejPHMtld5JlYXoDN4RyQq7sAyMQgBj
 CXADStYSR/f5sWWgRbRs1F7E0lrePsVpjOcqHXxbsS+52yN2CZSKazlOIJ9xArfM
 cTzzLUuYbMZoHjIDdMMkjA41VMmyJ+BKrqEgzu3LsJQs57o/ckjnQx4VV5YiHhci
 v35OL8oa9IZi8WQikB9bx2WZRWUChOGKwMNeeUwEFD4Zmye1OtyyHuzYQf9QSjRv
 zbf1owwsg3xnfkvLcfru8mNMgJkgG8RpuNNVPO8boWZ4pgPu2tk=
 =5K55
 -----END PGP SIGNATURE-----

Merge 5.10.9 into android12-5.10

Changes in 5.10.9
	btrfs: reloc: fix wrong file extent type check to avoid false ENOENT
	btrfs: prevent NULL pointer dereference in extent_io_tree_panic
	ALSA: hda/realtek: fix right sounds and mute/micmute LEDs for HP machines
	ALSA: doc: Fix reference to mixart.rst
	ASoC: AMD Renoir - add DMI entry for Lenovo ThinkPad X395
	ASoC: dapm: remove widget from dirty list on free
	x86/hyperv: check cpu mask after interrupt has been disabled
	drm/amdgpu: add green_sardine device id (v2)
	drm/amdgpu: fix DRM_INFO flood if display core is not supported (bug 210921)
	Revert "drm/amd/display: Fixed Intermittent blue screen on OLED panel"
	drm/amdgpu: add new device id for Renior
	drm/i915: Allow the sysadmin to override security mitigations
	drm/i915/gt: Limit VFE threads based on GT
	drm/i915/backlight: fix CPU mode backlight takeover on LPT
	drm/bridge: sii902x: Refactor init code into separate function
	dt-bindings: display: sii902x: Add supply bindings
	drm/bridge: sii902x: Enable I/O and core VCC supplies if present
	tracing/kprobes: Do the notrace functions check without kprobes on ftrace
	tools/bootconfig: Add tracing_on support to helper scripts
	ext4: use IS_ERR instead of IS_ERR_OR_NULL and set inode null when IS_ERR
	ext4: fix wrong list_splice in ext4_fc_cleanup
	ext4: fix bug for rename with RENAME_WHITEOUT
	cifs: check pointer before freeing
	cifs: fix interrupted close commands
	riscv: Drop a duplicated PAGE_KERNEL_EXEC
	riscv: return -ENOSYS for syscall -1
	riscv: Fixup CONFIG_GENERIC_TIME_VSYSCALL
	riscv: Fix KASAN memory mapping.
	mips: fix Section mismatch in reference
	mips: lib: uncached: fix non-standard usage of variable 'sp'
	MIPS: boot: Fix unaligned access with CONFIG_MIPS_RAW_APPENDED_DTB
	MIPS: Fix malformed NT_FILE and NT_SIGINFO in 32bit coredumps
	MIPS: relocatable: fix possible boot hangup with KASLR enabled
	RDMA/ocrdma: Fix use after free in ocrdma_dealloc_ucontext_pd()
	ACPI: scan: Harden acpi_device_add() against device ID overflows
	xen/privcmd: allow fetching resource sizes
	compiler.h: Raise minimum version of GCC to 5.1 for arm64
	mm/vmalloc.c: fix potential memory leak
	mm/hugetlb: fix potential missing huge page size info
	mm/process_vm_access.c: include compat.h
	dm raid: fix discard limits for raid1
	dm snapshot: flush merged data before committing metadata
	dm integrity: fix flush with external metadata device
	dm integrity: fix the maximum number of arguments
	dm crypt: use GFP_ATOMIC when allocating crypto requests from softirq
	dm crypt: do not wait for backlogged crypto request completion in softirq
	dm crypt: do not call bio_endio() from the dm-crypt tasklet
	dm crypt: defer decryption to a tasklet if interrupts disabled
	stmmac: intel: change all EHL/TGL to auto detect phy addr
	r8152: Add Lenovo Powered USB-C Travel Hub
	btrfs: tree-checker: check if chunk item end overflows
	ext4: don't leak old mountpoint samples
	io_uring: don't take files/mm for a dead task
	io_uring: drop mm and files after task_work_run
	ARC: build: remove non-existing bootpImage from KBUILD_IMAGE
	ARC: build: add uImage.lzma to the top-level target
	ARC: build: add boot_targets to PHONY
	ARC: build: move symlink creation to arch/arc/Makefile to avoid race
	ARM: omap2: pmic-cpcap: fix maximum voltage to be consistent with defaults on xt875
	ath11k: fix crash caused by NULL rx_channel
	netfilter: ipset: fixes possible oops in mtype_resize
	ath11k: qmi: try to allocate a big block of DMA memory first
	btrfs: fix async discard stall
	btrfs: merge critical sections of discard lock in workfn
	btrfs: fix transaction leak and crash after RO remount caused by qgroup rescan
	regulator: bd718x7: Add enable times
	ethernet: ucc_geth: fix definition and size of ucc_geth_tx_global_pram
	ARM: dts: ux500/golden: Set display max brightness
	habanalabs: adjust pci controller init to new firmware
	habanalabs/gaudi: retry loading TPC f/w on -EINTR
	habanalabs: register to pci shutdown callback
	staging: spmi: hisi-spmi-controller: Fix some error handling paths
	spi: altera: fix return value for altera_spi_txrx()
	habanalabs: Fix memleak in hl_device_reset
	hwmon: (pwm-fan) Ensure that calculation doesn't discard big period values
	lib/raid6: Let $(UNROLL) rules work with macOS userland
	kconfig: remove 'kvmconfig' and 'xenconfig' shorthands
	spi: fix the divide by 0 error when calculating xfer waiting time
	io_uring: drop file refs after task cancel
	bfq: Fix computation of shallow depth
	arch/arc: add copy_user_page() to <asm/page.h> to fix build error on ARC
	misdn: dsp: select CONFIG_BITREVERSE
	net: ethernet: fs_enet: Add missing MODULE_LICENSE
	selftests: fix the return value for UDP GRO test
	nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN
	nvme: avoid possible double fetch in handling CQE
	nvmet-rdma: Fix list_del corruption on queue establishment failure
	drm/amd/display: fix sysfs amdgpu_current_backlight_pwm NULL pointer issue
	drm/amdgpu: fix a GPU hang issue when remove device
	drm/amd/pm: fix the failure when change power profile for renoir
	drm/amdgpu: fix potential memory leak during navi12 deinitialization
	usb: typec: Fix copy paste error for NVIDIA alt-mode description
	iommu/vt-d: Fix lockdep splat in sva bind()/unbind()
	ACPI: scan: add stub acpi_create_platform_device() for !CONFIG_ACPI
	drm/msm: Call msm_init_vram before binding the gpu
	ARM: picoxcell: fix missing interrupt-parent properties
	poll: fix performance regression due to out-of-line __put_user()
	rcu-tasks: Move RCU-tasks initialization to before early_initcall()
	bpf: Simplify task_file_seq_get_next()
	bpf: Save correct stopping point in file seq iteration
	x86/sev-es: Fix SEV-ES OUT/IN immediate opcode vc handling
	cfg80211: select CONFIG_CRC32
	nvme-fc: avoid calling _nvme_fc_abort_outstanding_ios from interrupt context
	iommu/vt-d: Update domain geometry in iommu_ops.at(de)tach_dev
	net/mlx5e: CT: Use per flow counter when CT flow accounting is enabled
	net/mlx5: Fix passing zero to 'PTR_ERR'
	net/mlx5: E-Switch, fix changing vf VLANID
	blk-mq-debugfs: Add decode for BLK_MQ_F_TAG_HCTX_SHARED
	mm: fix clear_refs_write locking
	mm: don't play games with pinned pages in clear_page_refs
	mm: don't put pinned pages into the swap cache
	perf intel-pt: Fix 'CPU too large' error
	dump_common_audit_data(): fix racy accesses to ->d_name
	ASoC: meson: axg-tdm-interface: fix loopback
	ASoC: meson: axg-tdmin: fix axg skew offset
	ASoC: Intel: fix error code cnl_set_dsp_D0()
	nvmet-rdma: Fix NULL deref when setting pi_enable and traddr INADDR_ANY
	nvme: don't intialize hwmon for discovery controllers
	nvme-tcp: fix possible data corruption with bio merges
	nvme-tcp: Fix warning with CONFIG_DEBUG_PREEMPT
	NFS4: Fix use-after-free in trace_event_raw_event_nfs4_set_lock
	pNFS: We want return-on-close to complete when evicting the inode
	pNFS: Mark layout for return if return-on-close was not sent
	pNFS: Stricter ordering of layoutget and layoutreturn
	NFS: Adjust fs_context error logging
	NFS/pNFS: Don't call pnfs_free_bucket_lseg() before removing the request
	NFS/pNFS: Don't leak DS commits in pnfs_generic_retry_commit()
	NFS/pNFS: Fix a leak of the layout 'plh_outstanding' counter
	NFS: nfs_delegation_find_inode_server must first reference the superblock
	NFS: nfs_igrab_and_active must first reference the superblock
	scsi: ufs: Fix possible power drain during system suspend
	ext4: fix superblock checksum failure when setting password salt
	RDMA/restrack: Don't treat as an error allocation ID wrapping
	RDMA/usnic: Fix memleak in find_free_vf_and_create_qp_grp
	bnxt_en: Improve stats context resource accounting with RDMA driver loaded.
	RDMA/mlx5: Fix wrong free of blue flame register on error
	IB/mlx5: Fix error unwinding when set_has_smi_cap fails
	umount(2): move the flag validity checks first
	dm zoned: select CONFIG_CRC32
	drm/i915/dsi: Use unconditional msleep for the panel_on_delay when there is no reset-deassert MIPI-sequence
	drm/i915/icl: Fix initing the DSI DSC power refcount during HW readout
	drm/i915/gt: Restore clear-residual mitigations for Ivybridge, Baytrail
	mm, slub: consider rest of partial list if acquire_slab() fails
	riscv: Trace irq on only interrupt is enabled
	iommu/vt-d: Fix unaligned addresses for intel_flush_svm_range_dev()
	net: sunrpc: interpret the return value of kstrtou32 correctly
	selftests: netfilter: Pass family parameter "-f" to conntrack tool
	dm: eliminate potential source of excessive kernel log noise
	ALSA: fireface: Fix integer overflow in transmit_midi_msg()
	ALSA: firewire-tascam: Fix integer overflow in midi_port_work()
	netfilter: conntrack: fix reading nf_conntrack_buckets
	netfilter: nf_nat: Fix memleak in nf_nat_init
	Linux 5.10.9

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I609e501511889081e03d2d18ee7e1be95406f396
This commit is contained in:
Greg Kroah-Hartman 2021-01-19 18:49:54 +01:00
commit 88e2d5fd10
151 changed files with 1537 additions and 627 deletions

View File

@ -8,6 +8,8 @@ Optional properties:
- interrupts: describe the interrupt line used to inform the host
about hotplug events.
- reset-gpios: OF device-tree gpio specification for RST_N pin.
- iovcc-supply: I/O Supply Voltage (1.8V or 3.3V)
- cvcc12-supply: Digital Core Supply Voltage (1.2V)
HDMI audio properties:
- #sound-dai-cells: <0> or <1>. <0> if only i2s or spdif pin
@ -54,6 +56,8 @@ Example:
compatible = "sil,sii9022";
reg = <0x39>;
reset-gpios = <&pioA 1 0>;
iovcc-supply = <&v3v3_hdmi>;
cvcc12-supply = <&v1v2_hdmi>;
#sound-dai-cells = <0>;
sil,i2s-data-lanes = < 0 1 2 >;

View File

@ -1501,7 +1501,7 @@ Module for Digigram miXart8 sound cards.
This module supports multiple cards.
Note: One miXart8 board will be represented as 4 alsa cards.
See MIXART.txt for details.
See Documentation/sound/cards/mixart.rst for details.
When the driver is compiled as a module and the hotplug firmware
is supported, the firmware data is loaded via hotplug automatically.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 8
SUBLEVEL = 9
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -102,16 +102,22 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
#default target for make without any arguments.
KBUILD_IMAGE := $(boot)/bootpImage
all: bootpImage
bootpImage: vmlinux
boot_targets += uImage uImage.bin uImage.gz
boot_targets := uImage.bin uImage.gz uImage.lzma
PHONY += $(boot_targets)
$(boot_targets): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
uimage-default-y := uImage.bin
uimage-default-$(CONFIG_KERNEL_GZIP) := uImage.gz
uimage-default-$(CONFIG_KERNEL_LZMA) := uImage.lzma
PHONY += uImage
uImage: $(uimage-default-y)
@ln -sf $< $(boot)/uImage
@$(kecho) ' Image $(boot)/uImage is ready'
CLEAN_FILES += $(boot)/uImage
archclean:
$(Q)$(MAKE) $(clean)=$(boot)

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
targets := vmlinux.bin vmlinux.bin.gz uImage
targets := vmlinux.bin vmlinux.bin.gz
# uImage build relies on mkimage being availble on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
@ -13,11 +13,6 @@ LINUX_START_TEXT = $$(readelf -h vmlinux | \
UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_LZMA) := lzma
targets += uImage
targets += uImage.bin
targets += uImage.gz
targets += uImage.lzma
@ -42,7 +37,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'

View File

@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
struct vm_area_struct;

View File

@ -45,18 +45,21 @@ paxi {
emac: gem@30000 {
compatible = "cadence,gem";
reg = <0x30000 0x10000>;
interrupt-parent = <&vic0>;
interrupts = <31>;
};
dmac1: dmac@40000 {
compatible = "snps,dw-dmac";
reg = <0x40000 0x10000>;
interrupt-parent = <&vic0>;
interrupts = <25>;
};
dmac2: dmac@50000 {
compatible = "snps,dw-dmac";
reg = <0x50000 0x10000>;
interrupt-parent = <&vic0>;
interrupts = <26>;
};
@ -233,6 +236,7 @@ ebi@50000000 {
axi2pico@c0000000 {
compatible = "picochip,axi2pico-pc3x2";
reg = <0xc0000000 0x10000>;
interrupt-parent = <&vic0>;
interrupts = <13 14 15 16 17 18 19 20 21>;
};
};

View File

@ -326,6 +326,7 @@ dsi-controller@a0351000 {
panel@0 {
compatible = "samsung,s6e63m0";
reg = <0>;
max-brightness = <15>;
vdd3-supply = <&panel_reg_3v0>;
vci-supply = <&panel_reg_1v8>;
reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;

View File

@ -71,7 +71,7 @@ static struct omap_voltdm_pmic omap_cpcap_iva = {
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
.vddmin = 900000,
.vddmax = 1350000,
.vddmax = 1375000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = 0x44,
.volt_reg_addr = 0x0,

View File

@ -13,6 +13,7 @@
#include <linux/libfdt.h>
#include <asm/addrspace.h>
#include <asm/unaligned.h>
/*
* These two variables specify the free mem region
@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
dtb_size = fdt_totalsize((void *)&__appended_dtb);
/* last four bytes is always image size in little endian */
image_size = le32_to_cpup((void *)&__image_end - 4);
image_size = get_unaligned_le32((void *)&__image_end - 4);
/* copy dtb to where the booted kernel will expect it */
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,

View File

@ -103,4 +103,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef ns_to_kernel_old_timeval
#define ns_to_kernel_old_timeval ns_to_old_timeval32
/*
* Some data types as stored in coredump.
*/
#define user_long_t compat_long_t
#define user_siginfo_t compat_siginfo_t
#define copy_siginfo_to_external copy_siginfo_to_external32
#include "../../../fs/binfmt_elf.c"

View File

@ -106,4 +106,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef ns_to_kernel_old_timeval
#define ns_to_kernel_old_timeval ns_to_old_timeval32
/*
* Some data types as stored in coredump.
*/
#define user_long_t compat_long_t
#define user_siginfo_t compat_siginfo_t
#define copy_siginfo_to_external copy_siginfo_to_external32
#include "../../../fs/binfmt_elf.c"

View File

@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
size_t i;
unsigned long *ptr = (unsigned long *)area;
const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
size_t diff, i;
diff = (void *)ptr - area;
if (unlikely(size < diff + sizeof(hash)))
return hash;
size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */

View File

@ -37,10 +37,12 @@
*/
unsigned long run_uncached(void *func)
{
register long sp __asm__("$sp");
register long ret __asm__("$2");
long lfunc = (long)func, ufunc;
long usp;
long sp;
__asm__("move %0, $sp" : "=r" (sp));
if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
usp = CKSEG1ADDR(sp);

View File

@ -1609,7 +1609,7 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
static void __init loongson3_sc_init(void)
static void loongson3_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config2, lsize;

View File

@ -146,7 +146,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
return 1;
}
static int __init mips_sc_probe_cm3(void)
static int mips_sc_probe_cm3(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned long cfg = read_gcr_l2_config();
@ -180,7 +180,7 @@ static int __init mips_sc_probe_cm3(void)
return 0;
}
static inline int __init mips_sc_probe(void)
static inline int mips_sc_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config1, config2;

View File

@ -99,7 +99,6 @@
| _PAGE_DIRTY)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \

View File

@ -10,7 +10,7 @@
#include <linux/types.h>
#ifndef GENERIC_TIME_VSYSCALL
#ifndef CONFIG_GENERIC_TIME_VSYSCALL
struct vdso_data {
};
#endif

View File

@ -124,15 +124,15 @@ skip_context_tracking:
REG_L a1, (a1)
jr a1
1:
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_on
#endif
/*
* Exceptions run with interrupts enabled or disabled depending on the
* state of SR_PIE in m/sstatus.
*/
andi t0, s1, SR_PIE
beqz t0, 1f
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_on
#endif
csrs CSR_STATUS, SR_IE
1:
@ -186,14 +186,7 @@ check_syscall_nr:
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
bge a7, t0, 1f
/*
* Check if syscall is rejected by tracer, i.e., a7 == -1.
* If yes, we pretend it was executed.
*/
li t1, -1
beq a7, t1, ret_from_syscall_rejected
blt a7, t1, 1f
bgeu a7, t0, 1f
/* Call syscall */
la s0, sys_call_table
slli t0, a7, RISCV_LGPTR

View File

@ -12,7 +12,7 @@
#include <linux/binfmts.h>
#include <linux/err.h>
#include <asm/page.h>
#ifdef GENERIC_TIME_VSYSCALL
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
#include <asm/vdso.h>

View File

@ -93,8 +93,8 @@ void __init kasan_init(void)
VMALLOC_END));
for_each_mem_range(i, &_start, &_end) {
void *start = (void *)_start;
void *end = (void *)_end;
void *start = (void *)__va(_start);
void *end = (void *)__va(_end);
if (start >= end)
break;

View File

@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (!hv_hypercall_pg)
goto do_native;
if (cpumask_empty(cpus))
return;
local_irq_save(flags);
/*
* Only check the mask _after_ interrupt has been disabled to avoid the
* mask changing under our feet.
*/
if (cpumask_empty(cpus)) {
local_irq_restore(flags);
return;
}
flush_pcpu = (struct hv_tlb_flush **)
this_cpu_ptr(hyperv_pcpu_input_arg);

View File

@ -305,14 +305,14 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0xe4:
case 0xe5:
*exitinfo |= IOIO_TYPE_IN;
*exitinfo |= (u64)insn->immediate.value << 16;
*exitinfo |= (u8)insn->immediate.value << 16;
break;
/* OUT immediate opcodes */
case 0xe6:
case 0xe7:
*exitinfo |= IOIO_TYPE_OUT;
*exitinfo |= (u64)insn->immediate.value << 16;
*exitinfo |= (u8)insn->immediate.value << 16;
break;
/* IN register opcodes */

View File

@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)

View File

@ -246,6 +246,7 @@ static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(BLOCKING),
HCTX_FLAG_NAME(NO_SCHED),
HCTX_FLAG_NAME(STACKING),
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
};
#undef HCTX_FLAG_NAME

View File

@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
struct acpi_device_bus_id {
char bus_id[15];
const char *bus_id;
unsigned int instance_no;
struct list_head node;
};

View File

@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
acpi_device_bus_id->instance_no--;
else {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@ -674,7 +675,14 @@ int acpi_device_add(struct acpi_device *device,
}
if (!found) {
acpi_device_bus_id = new_bus_id;
strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
acpi_device_bus_id->bus_id =
kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
if (!acpi_device_bus_id->bus_id) {
pr_err(PREFIX "Memory allocation error for bus id\n");
result = -ENOMEM;
goto err_free_new_bus_id;
}
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
@ -709,6 +717,11 @@ int acpi_device_add(struct acpi_device *device,
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
err_free_new_bus_id:
if (!found)
kfree(new_bus_id);
mutex_unlock(&acpi_device_lock);
err_detach:

View File

@ -2524,11 +2524,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_amdkfd_device_fini(adev);
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@ -3008,7 +3008,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#endif
default:
if (amdgpu_dc > 0)
DRM_INFO("Display Core has been requested via kernel parameter "
DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
}

View File

@ -1076,6 +1076,8 @@ static const struct pci_device_id pciidlist[] = {
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},

View File

@ -1283,8 +1283,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->hdcp_context.hdcp_initialized)
if (!psp->hdcp_context.hdcp_initialized) {
if (psp->hdcp_context.hdcp_shared_buf)
goto out;
else
return 0;
}
ret = psp_hdcp_unload(psp);
if (ret)
@ -1292,6 +1296,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@ -1430,8 +1435,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->dtm_context.dtm_initialized)
if (!psp->dtm_context.dtm_initialized) {
if (psp->dtm_context.dtm_shared_buf)
goto out;
else
return 0;
}
ret = psp_dtm_unload(psp);
if (ret)
@ -1439,6 +1448,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,

View File

@ -1242,7 +1242,8 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
if (adev->pdev->device == 0x1636)
if ((adev->pdev->device == 0x1636) ||
(adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;

View File

@ -2471,9 +2471,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
struct dc *dc = link->ctx->dc;
struct dc *dc = NULL;
struct abm *abm = NULL;
if (!link || !link->ctx)
return NULL;
dc = link->ctx->dc;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;

View File

@ -2635,15 +2635,16 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&

View File

@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;

View File

@ -222,6 +222,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;

View File

@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
@ -168,6 +169,7 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
struct regulator_bulk_data supplies[2];
/*
* Mutex protects audio and video functions from interfering
* each other, by keeping their i2c command sequences atomic.
@ -954,13 +956,73 @@ static const struct drm_bridge_timings default_sii902x_timings = {
| DRM_BUS_FLAG_DE_HIGH,
};
static int sii902x_init(struct sii902x *sii902x)
{
struct device *dev = &sii902x->i2c->dev;
unsigned int status = 0;
u8 chipid[4];
int ret;
sii902x_reset(sii902x);
ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
if (ret)
return ret;
ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
&chipid, 4);
if (ret) {
dev_err(dev, "regmap_read failed %d\n", ret);
return ret;
}
if (chipid[0] != 0xb0) {
dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
chipid[0]);
return -EINVAL;
}
/* Clear all pending interrupts */
regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
if (sii902x->i2c->irq > 0) {
regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
SII902X_HOTPLUG_EVENT);
ret = devm_request_threaded_irq(dev, sii902x->i2c->irq, NULL,
sii902x_interrupt,
IRQF_ONESHOT, dev_name(dev),
sii902x);
if (ret)
return ret;
}
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev);
i2c_set_clientdata(sii902x->i2c, sii902x);
sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
1, 0, I2C_MUX_GATE,
sii902x_i2c_bypass_select,
sii902x_i2c_bypass_deselect);
if (!sii902x->i2cmux)
return -ENOMEM;
sii902x->i2cmux->priv = sii902x;
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
unsigned int status = 0;
struct sii902x *sii902x;
u8 chipid[4];
int ret;
ret = i2c_check_functionality(client->adapter,
@ -989,61 +1051,29 @@ static int sii902x_probe(struct i2c_client *client,
mutex_init(&sii902x->mutex);
sii902x_reset(sii902x);
ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
if (ret)
sii902x->supplies[0].supply = "iovcc";
sii902x->supplies[1].supply = "cvcc12";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
if (ret < 0)
return ret;
ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
&chipid, 4);
if (ret) {
dev_err(dev, "regmap_read failed %d\n", ret);
ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to enable supplies");
return ret;
}
if (chipid[0] != 0xb0) {
dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
chipid[0]);
return -EINVAL;
ret = sii902x_init(sii902x);
if (ret < 0) {
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
}
/* Clear all pending interrupts */
regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
if (client->irq > 0) {
regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
SII902X_HOTPLUG_EVENT);
ret = devm_request_threaded_irq(dev, client->irq, NULL,
sii902x_interrupt,
IRQF_ONESHOT, dev_name(dev),
sii902x);
if (ret)
return ret;
}
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev);
i2c_set_clientdata(client, sii902x);
sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
1, 0, I2C_MUX_GATE,
sii902x_i2c_bypass_select,
sii902x_i2c_bypass_deselect);
if (!sii902x->i2cmux)
return -ENOMEM;
sii902x->i2cmux->priv = sii902x;
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
static int sii902x_remove(struct i2c_client *client)
{
@ -1051,6 +1081,8 @@ static int sii902x_remove(struct i2c_client *client)
i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
return 0;
}

View File

@ -38,6 +38,7 @@ i915-y += i915_drv.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
i915_mitigations.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \

View File

@ -1585,10 +1585,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder));
if (crtc_state->dsc.compression_enable)
intel_display_power_get(i915,
intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,

View File

@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
if (cpu_mode) {
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
return 0;
}

View File

@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
/* Deassert reset */
/*
* Give the panel time to power-on and then deassert its reset.
* Depending on the VBT MIPI sequences version the deassert-seq
* may contain the necessary delay, intel_dsi_msleep() will skip
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
msleep(intel_dsi->panel_on_delay);
}
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);

View File

@ -7,8 +7,6 @@
#include "i915_drv.h"
#include "intel_gpu_commands.h"
#define MAX_URB_ENTRIES 64
#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
@ -34,38 +32,59 @@ struct batch_chunk {
};
struct batch_vals {
u32 max_primitives;
u32 max_urb_entries;
u32 cmd_size;
u32 state_size;
u32 max_threads;
u32 state_start;
u32 batch_size;
u32 surface_start;
u32 surface_height;
u32 surface_width;
u32 scratch_size;
u32 max_size;
u32 size;
};
static inline int num_primitives(const struct batch_vals *bv)
{
/*
* We need to saturate the GPU with work in order to dispatch
* a shader on every HW thread, and clear the thread-local registers.
* In short, we have to dispatch work faster than the shaders can
* run in order to fill the EU and occupy each HW thread.
*/
return bv->max_threads;
}
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
bv->max_primitives = 280;
bv->max_urb_entries = MAX_URB_ENTRIES;
switch (INTEL_INFO(i915)->gt) {
default:
case 1:
bv->max_threads = 70;
break;
case 2:
bv->max_threads = 140;
break;
case 3:
bv->max_threads = 280;
break;
}
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
bv->max_primitives = 128;
bv->max_urb_entries = MAX_URB_ENTRIES / 2;
switch (INTEL_INFO(i915)->gt) {
default:
case 1: /* including vlv */
bv->max_threads = 36;
break;
case 2:
bv->max_threads = 128;
break;
}
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
bv->cmd_size = bv->max_primitives * 4096;
bv->state_size = STATE_SIZE;
bv->state_start = bv->cmd_size;
bv->batch_size = bv->cmd_size + bv->state_size;
bv->scratch_size = bv->surface_height * bv->surface_width;
bv->max_size = bv->batch_size + bv->scratch_size;
bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
bv->surface_start = bv->state_start + SZ_4K;
bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
u32 surface_start =
gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
@ -214,9 +234,9 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
u32 *cs = batch_alloc_items(batch, 0, 12);
u32 *cs = batch_alloc_items(batch, 0, 10);
*cs++ = STATE_BASE_ADDRESS | (12 - 2);
*cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size,
u32 mode)
{
u32 urb_entries = bv->max_urb_entries;
u32 threads = bv->max_primitives - 1;
u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
*cs++ = threads << 16 | urb_entries << 8 | mode << 2;
*cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
unsigned int inline_data_size;
unsigned int media_batch_size;
unsigned int i;
unsigned int pkt = 6 + 3;
u32 *cs;
inline_data_size = 112 * 8;
media_batch_size = inline_data_size + 6;
cs = batch_alloc_items(batch, 8, pkt);
cs = batch_alloc_items(batch, 8, media_batch_size);
*cs++ = MEDIA_OBJECT | (media_batch_size - 2);
*cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0;
/* inline */
*cs++ = (y_offset << 16) | (x_offset);
*cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
for (i = 3; i < inline_data_size; i++)
*cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 5);
u32 *cs = batch_alloc_items(batch, 0, 4);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
PIPE_CONTROL_GLOBAL_GTT_IVB;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 8);
/* ivb: Stall before STATE_CACHE_INVALIDATE */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int desc_count = 64;
const u32 urb_size = 112;
const unsigned int desc_count = 1;
const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
u32 interface_descriptor;
u32 descriptors;
unsigned int i;
batch_init(&cmds, vma, start, 0, bv->cmd_size);
batch_init(&state, vma, start, bv->state_start, bv->state_size);
batch_init(&cmds, vma, start, 0, bv->state_start);
batch_init(&state, vma, start, bv->state_start, SZ_4K);
interface_descriptor =
gen7_fill_interface_descriptor(&state, bv,
descriptors = gen7_fill_interface_descriptor(&state, bv,
IS_HASWELL(i915) ?
&cb_kernel_hsw :
&cb_kernel_ivb,
desc_count);
gen7_emit_pipeline_flush(&cmds);
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
gen7_emit_state_base_address(&cmds, interface_descriptor);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
gen7_emit_interface_descriptor_load(&cmds,
interface_descriptor,
desc_count);
for (i = 0; i < bv->max_primitives; i++)
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv);
if (!vma)
return bv.max_size;
return bv.size;
GEM_BUG_ON(vma->obj->base.size < bv.max_size);
GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);

View File

@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_mitigations.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
@ -885,7 +886,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce) {
if (engine->wa_ctx.vma->private != ce &&
i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
@ -1289,7 +1291,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;

View File

@ -0,0 +1,146 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "i915_drv.h"
#include "i915_mitigations.h"
static unsigned long mitigations __read_mostly = ~0UL;
enum {
CLEAR_RESIDUALS = 0,
};
static const char * const names[] = {
[CLEAR_RESIDUALS] = "residuals",
};
bool i915_mitigate_clear_residuals(void)
{
return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
}
static int mitigations_set(const char *val, const struct kernel_param *kp)
{
unsigned long new = ~0UL;
char *str, *sep, *tok;
bool first = true;
int err = 0;
BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
str = kstrdup(val, GFP_KERNEL);
if (!str)
return -ENOMEM;
for (sep = str; (tok = strsep(&sep, ","));) {
bool enable = true;
int i;
/* Be tolerant of leading/trailing whitespace */
tok = strim(tok);
if (first) {
first = false;
if (!strcmp(tok, "auto"))
continue;
new = 0;
if (!strcmp(tok, "off"))
continue;
}
if (*tok == '!') {
enable = !enable;
tok++;
}
if (!strncmp(tok, "no", 2)) {
enable = !enable;
tok += 2;
}
if (*tok == '\0')
continue;
for (i = 0; i < ARRAY_SIZE(names); i++) {
if (!strcmp(tok, names[i])) {
if (enable)
new |= BIT(i);
else
new &= ~BIT(i);
break;
}
}
if (i == ARRAY_SIZE(names)) {
pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
DRIVER_NAME, val, tok);
err = -EINVAL;
break;
}
}
kfree(str);
if (err)
return err;
WRITE_ONCE(mitigations, new);
return 0;
}
static int mitigations_get(char *buffer, const struct kernel_param *kp)
{
unsigned long local = READ_ONCE(mitigations);
int count, i;
bool enable;
if (!local)
return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
if (local & BIT(BITS_PER_LONG - 1)) {
count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
enable = false;
} else {
enable = true;
count = 0;
}
for (i = 0; i < ARRAY_SIZE(names); i++) {
if ((local & BIT(i)) != enable)
continue;
count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s%s,", enable ? "" : "!", names[i]);
}
buffer[count - 1] = '\n';
return count;
}
static const struct kernel_param_ops ops = {
.set = mitigations_set,
.get = mitigations_get,
};
module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
MODULE_PARM_DESC(mitigations,
"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
"\n"
" auto -- enables all mitigations required for the platform [default]\n"
" off -- disables all mitigations\n"
"\n"
"Individual mitigations can be enabled by passing a comma-separated string,\n"
"e.g. mitigations=residuals to enable only clearing residuals or\n"
"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
"disabling it.\n"
"\n"
"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
" residuals -- clear all thread-local registers between contexts"
);

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __I915_MITIGATIONS_H__
#define __I915_MITIGATIONS_H__
#include <linux/types.h>
bool i915_mitigate_clear_residuals(void);
#endif /* __I915_MITIGATIONS_H__ */

View File

@ -444,15 +444,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_init(ddev);
ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
goto err_msm_uninit;
dma_set_max_seg_size(dev, UINT_MAX);
msm_gem_shrinker_init(ddev);

View File

@ -324,8 +324,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
/* Set duty cycle to maximum allowed and enable PWM output */
pwm_init_state(ctx->pwm, &state);
/*
* __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
* long. Check this here to prevent the fan running at a too low
* frequency.
*/
if (state.period > ULONG_MAX / MAX_PWM + 1) {
dev_err(dev, "Configured period too big\n");
return -EINVAL;
}
/* Set duty cycle to maximum allowed and enable PWM output */
state.duty_cycle = ctx->pwm->args.period - 1;
state.enabled = true;

View File

@ -244,6 +244,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
} else {
ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
&rt->next_id, GFP_KERNEL);
ret = (ret < 0) ? ret : 0;
}
if (!ret)

View File

@ -3950,7 +3950,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err = set_has_smi_cap(dev);
if (err)
return err;
goto err_mp;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@ -4362,7 +4362,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
return err;
}

View File

@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
kfree(uctx->cntxt_pd);
uctx->cntxt_pd = NULL;
_ocrdma_dealloc_pd(dev, pd);
kfree(pd);
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)

View File

@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
}
usnic_uiom_free_dev_list(dev_list);
dev_list = NULL;
}
/* Try to find resources on an unused vf */
@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) {
usnic_err("Failed to allocate qp_grp\n");
if (usnic_ib_share_vf)
usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
return qp_grp;

View File

@ -67,8 +67,8 @@
#define MAX_AGAW_WIDTH 64
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
to match. That way, we can use 'unsigned long' for PFNs with impunity. */
@ -739,6 +739,18 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
*/
if (domain->nid == NUMA_NO_NODE)
domain->nid = domain_update_device_node(domain);
/*
* First-level translation restricts the input-address to a
* canonical address (i.e., address bits 63:N have the same
* value as address bit [N-1], where N is 48-bits with 4-level
* paging and 57-bits with 5-level paging). Hence, skip bit
* [N-1].
*/
if (domain_use_first_level(domain))
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,

View File

@ -118,8 +118,10 @@ void intel_svm_check(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
}
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
unsigned long address, unsigned long pages, int ih)
static void __flush_svm_range_dev(struct intel_svm *svm,
struct intel_svm_dev *sdev,
unsigned long address,
unsigned long pages, int ih)
{
struct qi_desc desc;
@ -170,6 +172,22 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,
struct intel_svm_dev *sdev,
unsigned long address,
unsigned long pages, int ih)
{
unsigned long shift = ilog2(__roundup_pow_of_two(pages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
unsigned long start = ALIGN_DOWN(address, align);
unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
while (start < end) {
__flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
start += align;
}
}
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
unsigned long pages, int ih)
{
@ -281,6 +299,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@ -382,12 +401,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
spin_lock(&iommu->lock);
spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@ -487,6 +506,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
unsigned long iflags;
int pasid_max;
int ret;
@ -606,14 +626,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
spin_lock(&iommu->lock);
spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
@ -633,14 +653,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
spin_lock(&iommu->lock);
spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;

View File

@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
select BITREVERSE
help
Enable support for digital audio processing capability.

View File

@ -634,6 +634,7 @@ config DM_ZONED
tristate "Drive-managed zoned block device target support"
depends on BLK_DEV_DM
depends on BLK_DEV_ZONED
select CRC32
help
This device-mapper target takes a host-managed or host-aware zoned
block device and exposes most of its capacity as a regular block

View File

@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
{
return c->dm_io;
}
EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;

View File

@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);
static void crypt_alloc_req_skcipher(struct crypt_config *cc,
static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
if (!ctx->r.req)
ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
return -ENOMEM;
}
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
return 0;
}
static void crypt_alloc_req_aead(struct crypt_config *cc,
static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
if (!ctx->r.req_aead)
ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
if (!ctx->r.req)
return -ENOMEM;
}
aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
return 0;
}
static void crypt_alloc_req(struct crypt_config *cc,
static int crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_aead(cc))
crypt_alloc_req_aead(cc, ctx);
return crypt_alloc_req_aead(cc, ctx);
else
crypt_alloc_req_skcipher(cc, ctx);
return crypt_alloc_req_skcipher(cc, ctx);
}
static void crypt_free_req_skcipher(struct crypt_config *cc,
@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx, bool atomic)
struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;
/*
* if reset_pending is set we are dealing with the bio for the first time,
* else we're continuing to work on the previous bio, so don't mess with
* the cc_pending counter
*/
if (reset_pending)
atomic_set(&ctx->cc_pending, 1);
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx);
r = crypt_alloc_req(cc, ctx);
if (r) {
complete(&ctx->restart);
return BLK_STS_DEV_RESOURCE;
}
atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc))
@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
if (in_interrupt()) {
if (try_wait_for_completion(&ctx->restart)) {
/*
* we don't have to block to wait for completion,
* so proceed
*/
} else {
/*
* we can't wait for completion without blocking
* exit and continue processing in a workqueue
*/
ctx->r.req = NULL;
ctx->cc_sector += sector_step;
tag_offset++;
return BLK_STS_DEV_RESOURCE;
}
} else {
wait_for_completion(&ctx->restart);
}
reinit_completion(&ctx->restart);
fallthrough;
/*
@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
bio_endio(io->base_bio);
}
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
kfree(io->integrity_metadata);
base_bio->bi_status = error;
/*
* If we are running this function from our tasklet,
* we can't call bio_endio() here, because it will call
* clone_endio() from dm.c, which in turn will
* free the current struct dm_crypt_io structure with
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
if (tasklet_trylock(&io->tasklet)) {
tasklet_unlock(&io->tasklet);
bio_endio(base_bio);
return;
}
INIT_WORK(&io->work, kcryptd_io_bio_endio);
queue_work(cc->io_queue, &io->work);
}
/*
@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
}
}
static void kcryptd_crypt_write_continue(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
struct crypt_config *cc = io->cc;
struct convert_context *ctx = &io->ctx;
int crypt_finished;
sector_t sector = io->sector;
blk_status_t r;
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
r = crypt_convert(cc, &io->ctx, true, false);
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
/* Wait for completion signaled by kcryptd_async_done() */
wait_for_completion(&ctx->restart);
crypt_finished = 1;
}
/* Encryption was already finished, submit io now */
if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, 0);
io->sector = sector;
}
crypt_dec_pending(io);
}
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
* (TODO: is it actually possible to be in softirq in the write path?)
*/
if (r == BLK_STS_DEV_RESOURCE) {
INIT_WORK(&io->work, kcryptd_crypt_write_continue);
queue_work(cc->crypt_queue, &io->work);
return;
}
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
static void kcryptd_crypt_read_continue(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
struct crypt_config *cc = io->cc;
blk_status_t r;
wait_for_completion(&io->ctx.restart);
reinit_completion(&io->ctx.restart);
r = crypt_convert(cc, &io->ctx, true, false);
if (r)
io->error = r;
if (atomic_dec_and_test(&io->ctx.cc_pending))
kcryptd_crypt_read_done(io);
crypt_dec_pending(io);
}
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
*/
if (r == BLK_STS_DEV_RESOURCE) {
INIT_WORK(&io->work, kcryptd_crypt_read_continue);
queue_work(cc->crypt_queue, &io->work);
return;
}
if (r)
io->error = r;
@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
if (in_irq()) {
/* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
/*
* in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
if (in_irq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;

View File

@ -1379,12 +1379,52 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
#undef MAY_BE_HASH
}
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
struct flush_request {
struct dm_io_request io_req;
struct dm_io_region io_reg;
struct dm_integrity_c *ic;
struct completion comp;
};
static void flush_notify(unsigned long error, void *fr_)
{
struct flush_request *fr = fr_;
if (unlikely(error != 0))
dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
complete(&fr->comp);
}
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
struct flush_request fr;
if (!ic->meta_dev)
flush_data = false;
if (flush_data) {
fr.io_req.bi_op = REQ_OP_WRITE,
fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
fr.io_req.mem.type = DM_IO_KMEM,
fr.io_req.mem.ptr.addr = NULL,
fr.io_req.notify.fn = flush_notify,
fr.io_req.notify.context = &fr;
fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
fr.io_reg.bdev = ic->dev->bdev,
fr.io_reg.sector = 0,
fr.io_reg.count = 0,
fr.ic = ic;
init_completion(&fr.comp);
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
BUG_ON(r);
}
r = dm_bufio_write_dirty_buffers(ic->bufio);
if (unlikely(r))
dm_integrity_io_error(ic, "writing tags", r);
if (flush_data)
wait_for_completion(&fr.comp);
}
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@ -2110,7 +2150,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
integrity_metadata(&dio->work);
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, false);
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
@ -2195,7 +2235,7 @@ static void integrity_commit(struct work_struct *w)
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
spin_unlock_irq(&ic->endio_wait.lock);
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, true);
goto release_flush_bios;
}
@ -2409,7 +2449,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, true);
}
static void integrity_writer(struct work_struct *w)
@ -2451,7 +2491,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
{
int r;
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, false);
if (dm_integrity_failed(ic))
return;
@ -2654,7 +2694,7 @@ static void bitmap_flush_work(struct work_struct *work)
unsigned long limit;
struct bio *bio;
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, false);
range.logical_sector = 0;
range.n_sectors = ic->provided_data_sectors;
@ -2663,9 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
dm_integrity_flush_buffers(ic);
if (ic->meta_dev)
blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
dm_integrity_flush_buffers(ic, true);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@ -2934,11 +2972,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, true);
}
if (ic->mode == 'B') {
dm_integrity_flush_buffers(ic);
dm_integrity_flush_buffers(ic, true);
#if 1
/* set to 0 to test bitmap replay code */
init_journal(ic, 0, ic->journal_sections, 0);
@ -3754,7 +3792,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
{0, 9, "Invalid number of feature args"},
{0, 15, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;

View File

@ -3730,10 +3730,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
* RAID0 and RAID10 personalities require bio splitting,
* RAID1/4/5/6 don't and process large discard bios properly.
*/
if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}

View File

@ -141,6 +141,11 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
/*
* Flush data after merge.
*/
struct bio flush_bio;
};
/*
@ -1121,6 +1126,17 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
static void error_bios(struct bio *bio);
static int flush_data(struct dm_snapshot *s)
{
struct bio *flush_bio = &s->flush_bio;
bio_reset(flush_bio);
bio_set_dev(flush_bio, s->origin->bdev);
flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(flush_bio);
}
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
if (flush_data(s) < 0) {
DMERR("Flush after merge failed: shutting down merge");
goto shut;
}
if (s->store->type->commit_merge(s->store,
s->num_merging_chunks) < 0) {
DMERR("Write error in exception store: shutting down merge");
@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
bio_uninit(&s->flush_bio);
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);

View File

@ -563,7 +563,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
DMWARN_LIMIT(
DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;

View File

@ -1027,6 +1027,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
hl_mmu_fini(hdev);
goto out_err;
}
@ -1038,6 +1039,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
hl_mmu_fini(hdev);
goto out_err;
}
}

View File

@ -528,6 +528,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
.shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};

View File

@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
if (val & PCI_CONFIG_ELBI_STS_ERR) {
dev_err(hdev->dev, "Error writing to ELBI\n");
if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
}
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
/* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
* in case the firmware security is enabled
*/
hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
/* Return the DBI window to the default location */
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
/* Return the DBI window to the default location
* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
* in case the firmware security is enabled
*/
hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
/* Return the DBI window to the default location */
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
/* Return the DBI window to the default location
* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
* in case the firmware security is enabled
*/
hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}

View File

@ -754,11 +754,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
int rc;
int rc, count = 5;
again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
if (rc == -EINTR && count-- > 0) {
msleep(50);
goto again;
}
if (rc) {
dev_err(hdev->dev, "Firmware file %s is not found!\n",
dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@ -2893,7 +2899,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
RREG32(mmHW_STATE);
/* Set the access through PCI bars (Linux driver only) as
* secured
@ -2996,7 +3002,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
RREG32(mmHW_STATE);
return 0;

View File

@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#include <linux/coresight.h>
@ -876,7 +877,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
RREG32(mmHW_STATE);
return rc;
}

View File

@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
return BNXT_MIN_ROCE_STAT_CTXS;
}
return 0;
}

View File

@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
MODULE_LICENSE("GPL");

View File

@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
MODULE_LICENSE("GPL");

View File

@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
u8 res2[0x80 - 0x74];
u8 res2[0x78 - 0x74];
u64 snums_en;
u32 l2l3baseptr; /* top byte consists of a few other bit fields */
u16 mtu[8];
u8 res3[0xa8 - 0x94];
u32 wrrtablebase; /* top byte is reserved */
u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */

View File

@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
struct mlx5_ct_shared_counter {
struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
struct mlx5_ct_shared_counter *shared_counter;
struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
if (!refcount_dec_and_test(&entry->shared_counter->refcount))
if (entry->counter->is_shared &&
!refcount_dec_and_test(&entry->counter->refcount))
return;
mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
kfree(entry->shared_counter);
mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
kfree(entry->counter);
}
static void
@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
attr->counter = entry->shared_counter->counter;
attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@ -732,13 +734,34 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
static struct mlx5_ct_shared_counter *
static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
{
struct mlx5_ct_counter *counter;
int ret;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
counter->counter = mlx5_fc_create(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
kfree(counter);
return ERR_PTR(ret);
}
return counter;
}
static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
struct mlx5_ct_shared_counter *shared_counter;
struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
return rev_entry->shared_counter;
return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
if (!shared_counter)
return ERR_PTR(-ENOMEM);
shared_counter->counter = mlx5_fc_create(dev, true);
if (IS_ERR(shared_counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(shared_counter->counter);
kfree(shared_counter);
shared_counter = mlx5_tc_ct_counter_create(ct_priv);
if (IS_ERR(shared_counter)) {
ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
if (IS_ERR(entry->shared_counter)) {
err = PTR_ERR(entry->shared_counter);
ct_dbg("Failed to create counter for ct entry");
if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
entry->counter = mlx5_tc_ct_counter_create(ct_priv);
else
entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
if (IS_ERR(entry->counter)) {
err = PTR_ERR(entry->counter);
return err;
}
@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
mlx5_tc_ct_shared_counter_put(ct_priv, entry);
mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
mlx5_tc_ct_shared_counter_put(ct_priv, entry);
mlx5_tc_ct_counter_put(ct_priv, entry);
}
@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);

View File

@ -95,13 +95,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
if (!IS_ERR_OR_NULL(vport->egress.acl))
return 0;
if (!vport->egress.acl) {
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
if (IS_ERR_OR_NULL(vport->egress.acl)) {
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL;
goto out;
@ -110,6 +108,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
err = esw_acl_egress_lgcy_groups_create(esw, vport);
if (err)
goto out;
}
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",

View File

@ -173,7 +173,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++;
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR_OR_NULL(vport->egress.acl)) {
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL;
return err;

View File

@ -180,7 +180,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size);
if (IS_ERR_OR_NULL(vport->ingress.acl)) {
if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;

View File

@ -258,7 +258,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes);
if (IS_ERR_OR_NULL(vport->ingress.acl)) {
if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;

View File

@ -236,6 +236,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
int ret;
int i;
plat->phy_addr = -1;
plat->clk_csr = 5;
plat->has_gmac = 0;
plat->has_gmac4 = 1;
@ -345,7 +346,6 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
@ -362,7 +362,6 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
return ehl_common_data(pdev, plat);
@ -376,7 +375,6 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
plat->phy_addr = 1;
return ehl_common_data(pdev, plat);
}
@ -408,7 +406,6 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
plat->phy_addr = 1;
return ehl_common_data(pdev, plat);
}
@ -450,7 +447,6 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;

View File

@ -793,6 +793,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,

View File

@ -6893,6 +6893,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},

View File

@ -2272,6 +2272,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@ -2292,9 +2293,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
rx_status->band = ar->rx_channel->band;
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
ieee80211_frequency_to_channel(channel->center_freq);
}
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));

View File

@ -1654,6 +1654,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@ -1666,11 +1667,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
@ -1702,6 +1705,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
/* the error response is expected when
* target_mem_delayed is true.
*/
if (delayed && resp.resp.error == 0)
goto out;
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@ -1736,6 +1745,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
ab->qmi.target_mem_delayed = false;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@ -1743,6 +1754,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
if (ab->qmi.mem_seg_count <= 2) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi dma allocation failed (%d B type %u), will try later with small size\n",
chunk->size,
chunk->type);
ath11k_qmi_free_target_mem_chunk(ab);
ab->qmi.target_mem_delayed = true;
return 0;
}
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@ -2467,7 +2487,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
} else if (msg->mem_seg_len > 2) {
} else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",

View File

@ -121,6 +121,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;

View File

@ -2802,6 +2802,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
{
return ctrl->opts && ctrl->opts->discovery_nqn;
}
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@ -2821,7 +2826,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
}
if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
(ctrl->opts && ctrl->opts->discovery_nqn))
nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@ -3090,7 +3095,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@ -3130,7 +3135,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
if (!ctrl->identified) {
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;

View File

@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
}
static void
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ioerr_work);
nvme_fc_error_recovery(ctrl, "transport detected io error");
}
static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
@ -2046,7 +2056,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
check_error:
if (terminate_assoc)
nvme_fc_error_recovery(ctrl, "transport detected io error");
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);

View File

@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
__u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
command_id, le16_to_cpu(cqe->sq_id));
return;
}
@ -3201,7 +3202,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */

View File

@ -201,7 +201,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
return min_t(size_t, iov_iter_single_seg_count(&req->iter),
req->pdu_len - req->pdu_sent);
}
@ -286,7 +286,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
* directly, otherwise queue io_work. Also, only do that if we
* are on the same cpu, so we don't introduce contention.
*/
if (queue->io_cpu == smp_processor_id() &&
if (queue->io_cpu == __smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
nvme_tcp_send_all(queue);

View File

@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
}
ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
IB_DEVICE_INTEGRITY_HANDOVER)) {
pr_warn("T10-PI is not supported by device %s. Disabling it\n",
cm_id->device->name);
nport->pi_enable = false;
}
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
while (!list_empty(&queue->rsp_wait_list)) {
struct nvmet_rdma_rsp *rsp;
rsp = list_first_entry(&queue->rsp_wait_list,
struct nvmet_rdma_rsp,
wait_list);
list_del(&rsp->wait_list);
nvmet_rdma_put_rsp(rsp);
}
fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
if (port->nport->pi_enable &&
!(cm_id->device->attrs.device_cap_flags &
IB_DEVICE_INTEGRITY_HANDOVER)) {
pr_err("T10-PI is not supported for %pISpcs\n", addr);
ret = -EINVAL;
goto out_destroy_id;
}
port->cm_id = cm_id;
return 0;

View File

@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
/* Typical regulator startup times as per data sheet in uS */
#define BD71847_BUCK1_STARTUP_TIME 144
#define BD71847_BUCK2_STARTUP_TIME 162
#define BD71847_BUCK3_STARTUP_TIME 162
#define BD71847_BUCK4_STARTUP_TIME 240
#define BD71847_BUCK5_STARTUP_TIME 270
#define BD71847_BUCK6_STARTUP_TIME 200
#define BD71847_LDO1_STARTUP_TIME 440
#define BD71847_LDO2_STARTUP_TIME 370
#define BD71847_LDO3_STARTUP_TIME 310
#define BD71847_LDO4_STARTUP_TIME 400
#define BD71847_LDO5_STARTUP_TIME 530
#define BD71847_LDO6_STARTUP_TIME 400
#define BD71837_BUCK1_STARTUP_TIME 160
#define BD71837_BUCK2_STARTUP_TIME 180
#define BD71837_BUCK3_STARTUP_TIME 180
#define BD71837_BUCK4_STARTUP_TIME 180
#define BD71837_BUCK5_STARTUP_TIME 160
#define BD71837_BUCK6_STARTUP_TIME 240
#define BD71837_BUCK7_STARTUP_TIME 220
#define BD71837_BUCK8_STARTUP_TIME 200
#define BD71837_LDO1_STARTUP_TIME 440
#define BD71837_LDO2_STARTUP_TIME 370
#define BD71837_LDO3_STARTUP_TIME 310
#define BD71837_LDO4_STARTUP_TIME 400
#define BD71837_LDO5_STARTUP_TIME 310
#define BD71837_LDO6_STARTUP_TIME 400
#define BD71837_LDO7_STARTUP_TIME 530
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {

View File

@ -8896,7 +8896,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
hba->curr_dev_pwr_mode) &&
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
hba->uic_link_state))
hba->uic_link_state) &&
!hba->dev_info.b_rpm_dev_flush_capable)
goto out;
if (pm_runtime_suspended(hba->dev)) {

View File

@ -189,7 +189,10 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
} else {
return 1;
}
while (hw->count < hw->len) {
altera_spi_tx_word(hw);
@ -204,9 +207,8 @@ static int altera_spi_txrx(struct spi_master *master,
altera_spi_rx_word(hw);
}
spi_finalize_current_transfer(master);
}
return t->len;
return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)

View File

@ -1100,6 +1100,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@ -1108,8 +1109,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
if (!speed_hz)
speed_hz = 100000;
ms = 8LL * 1000LL * xfer->len;
do_div(ms, xfer->speed_hz);
do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)

View File

@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
return -EINVAL;
ret = -EINVAL;
goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
return -EADDRNOTAVAIL;
ret = -EADDRNOTAVAIL;
goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
return -ENODEV;
ret = -ENODEV;
goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
if (ret)
dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
if (ret) {
dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
goto err_put_controller;
}
return 0;
err_put_controller:
spmi_controller_put(ctrl);
return ret;
}
@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
kfree(ctrl);
spmi_controller_put(ctrl);
return 0;
}

View File

@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
module will be called typec_displayport.
module will be called typec_nvidia.
endmenu

View File

@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
static long privcmd_ioctl_mmap_resource(struct file *file,
struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
struct xen_mem_acquire_resource xdata;
struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
/* Both fields must be set or unset */
if (!!kdata.addr != !!kdata.num)
return -EINVAL;
xdata.domid = kdata.dom;
xdata.type = kdata.type;
xdata.id = kdata.id;
if (!kdata.addr && !kdata.num) {
/* Query the size of the resource. */
rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
if (rc)
return rc;
return __put_user(xdata.nr_frames, &udata->num);
}
mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
memset(&xdata, 0, sizeof(xdata));
xdata.domid = kdata.dom;
xdata.type = kdata.type;
xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);

View File

@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
static struct btrfs_block_group *peek_discard_list(
struct btrfs_discard_ctl *discard_ctl,
enum btrfs_discard_state *discard_state,
int *discard_index)
int *discard_index, u64 now)
{
struct btrfs_block_group *block_group;
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
again:
block_group = find_next_block_group(discard_ctl, now);
if (block_group && now > block_group->discard_eligible_time) {
if (block_group && now >= block_group->discard_eligible_time) {
if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
block_group->used != 0) {
if (btrfs_is_block_group_data_only(block_group))
@ -222,12 +221,11 @@ static struct btrfs_block_group *peek_discard_list(
block_group->discard_state = BTRFS_DISCARD_EXTENTS;
}
discard_ctl->block_group = block_group;
}
if (block_group) {
*discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
} else {
block_group = NULL;
}
spin_unlock(&discard_ctl->lock);
return block_group;
@ -330,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
btrfs_discard_schedule_work(discard_ctl, false);
}
/**
* btrfs_discard_schedule_work - responsible for scheduling the discard work
* @discard_ctl: discard control
* @override: override the current timer
*
* Discards are issued by a delayed workqueue item. @override is used to
* update the current delay as the baseline delay interval is reevaluated on
* transaction commit. This is also maxed with any other rate limit.
*/
void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
bool override)
static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
u64 now, bool override)
{
struct btrfs_block_group *block_group;
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
if (!btrfs_run_discard_work(discard_ctl))
goto out;
return;
if (!override && delayed_work_pending(&discard_ctl->work))
goto out;
return;
block_group = find_next_block_group(discard_ctl, now);
if (block_group) {
@ -384,7 +369,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
mod_delayed_work(discard_ctl->discard_workers,
&discard_ctl->work, delay);
}
out:
}
/*
* btrfs_discard_schedule_work - responsible for scheduling the discard work
* @discard_ctl: discard control
* @override: override the current timer
*
* Discards are issued by a delayed workqueue item. @override is used to
* update the current delay as the baseline delay interval is reevaluated on
* transaction commit. This is also maxed with any other rate limit.
*/
void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
bool override)
{
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
__btrfs_discard_schedule_work(discard_ctl, now, override);
spin_unlock(&discard_ctl->lock);
}
@ -429,13 +431,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
int discard_index = 0;
u64 trimmed = 0;
u64 minlen = 0;
u64 now = ktime_get_ns();
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
block_group = peek_discard_list(discard_ctl, &discard_state,
&discard_index);
&discard_index, now);
if (!block_group || !btrfs_run_discard_work(discard_ctl))
return;
if (now < block_group->discard_eligible_time) {
btrfs_discard_schedule_work(discard_ctl, false);
return;
}
/* Perform discarding */
minlen = discard_minlen[discard_index];
@ -484,9 +491,8 @@ static void btrfs_discard_workfn(struct work_struct *work)
spin_lock(&discard_ctl->lock);
discard_ctl->block_group = NULL;
__btrfs_discard_schedule_work(discard_ctl, now, false);
spin_unlock(&discard_ctl->lock);
btrfs_discard_schedule_work(discard_ctl, false);
}
/**

View File

@ -676,9 +676,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
struct inode *inode = tree->private_data;
btrfs_panic(btrfs_sb(inode->i_sb), err,
btrfs_panic(tree->fs_info, err,
"locking error: extent tree was modified by another thread while locked");
}

View File

@ -3224,6 +3224,12 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
return ret;
}
static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{
return btrfs_fs_closing(fs_info) ||
test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
}
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@ -3232,6 +3238,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
struct btrfs_trans_handle *trans = NULL;
int err = -ENOMEM;
int ret = 0;
bool stopped = false;
path = btrfs_alloc_path();
if (!path)
@ -3244,7 +3251,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
path->skip_locking = 1;
err = 0;
while (!err && !btrfs_fs_closing(fs_info)) {
while (!err && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
@ -3287,7 +3294,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
}
mutex_lock(&fs_info->qgroup_rescan_lock);
if (!btrfs_fs_closing(fs_info))
if (!stopped)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) {
ret = update_qgroup_status_item(trans);
@ -3306,7 +3313,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
btrfs_end_transaction(trans);
if (btrfs_fs_closing(fs_info)) {
if (stopped) {
btrfs_info(fs_info, "qgroup scan paused");
} else if (err >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",

View File

@ -3027,11 +3027,16 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
return 0;
for (i = 0; i < btrfs_header_nritems(leaf); i++) {
u8 type;
btrfs_item_key_to_cpu(leaf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
type = btrfs_file_extent_type(leaf, ei);
if ((type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) &&
btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
found = true;
space_cache_ino = key.objectid;

View File

@ -1895,6 +1895,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
/*
* Pause the qgroup rescan worker if it is running. We don't want
* it to be still running after we are in RO mode, as after that,
* by the time we unmount, it might have left a transaction open,
* so we would leak the transaction and/or crash.
*/
btrfs_qgroup_wait_for_completion(fs_info, false);
ret = btrfs_commit_super(fs_info);
if (ret)
goto restore;

View File

@ -754,6 +754,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
u64 length;
u64 chunk_end;
u64 stripe_len;
u16 num_stripes;
u16 sub_stripes;
@ -808,6 +809,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
chunk_err(leaf, chunk, logical,
"invalid chunk logical start and length, have logical start %llu length %llu",
logical, length);
return -EUCLEAN;
}
if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
chunk_err(leaf, chunk, logical,
"invalid chunk stripe length: %llu",

View File

@ -1317,6 +1317,7 @@ void dfs_cache_del_vol(const char *fullpath)
vi = find_vol(fullpath);
spin_unlock(&vol_list_lock);
if (!IS_ERR(vi))
kref_put(&vi->refcnt, vol_release);
}

View File

@ -3248,7 +3248,7 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
free_rsp_buf(resp_buftype, rsp);
/* retry close in a worker thread if this one is interrupted */
if (rc == -EINTR) {
if (is_interrupt_error(rc)) {
int tmp_rc;
tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,

Some files were not shown because too many files have changed in this diff Show More