This is the 5.4.47 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl7qK2gACgkQONu9yGCS
 aT4kaw/8Cknc9G64S1SWchxFj4LnSYzrpeFGXOZBlHBtcCyD12JNGjA6SG6DoGLY
 /s/1fjBwJWrWpkI36CGNZowNPYtzRMOhyBGj7QHTpEj0vjMTKYXhVieMquQSHwRP
 zcrYl8f/xWJKo6XNjR1YLh5PWeD6B9dWD1RcvBZ0jCez84jyVT1EyWMWGiodkLMx
 gmJITO2DBaekelU0yyZJIFePO5DJpcXspf5lrPgkPG9u/U4hZf94AAGMYbXJZ5Rn
 BDcqGEGQe+mtcG6lq6DGRDH5VVMG4k13MrZJBfrVAhkGU+g6nKQbOXCcdcRjoM6N
 9NL8RbcpL0NhphNmrKwjNcYd4kZxYgTQb87FZi+qDxwGQIWIxcET6gL5t/vqAev1
 v/uKFLlt5x/2tNtpC8aY8EwdyGcXfeBXEl9AjP7HUWC/KzB8I9vLnokcMvOMYDIg
 3wNIsKMYLcLzuLL8oJ7FvDkBO+H/RjSvF4UvQyLOPOJtWSV5uKbLfKIU9sw90G3i
 t8qo3lNC/J4saJ+jx+O7XoHjFw6biJFATvs0+HtpCkwi0aJm2SGW+OvXuZCGZPnz
 TW2YsGFCCwL/RPtceJVkGfV3kr7SUB5AGXjueXdC+4QWfmi3POWojjgheQrleS+3
 OLxRsUVbQ6hOqEgLAaV6HhzvykkTjDj2Gq8P3I+1Y/eiRHjlpdU=
 =WYnW
 -----END PGP SIGNATURE-----

Merge 5.4.47 into android-5.4-stable

Changes in 5.4.47
	ipv6: fix IPV6_ADDRFORM operation logic
	mlxsw: core: Use different get_trend() callbacks for different thermal zones
	net_failover: fixed rollback in net_failover_open()
	tun: correct header offsets in napi frags mode
	bridge: Avoid infinite loop when suppressing NS messages with invalid options
	vxlan: Avoid infinite loop when suppressing NS messages with invalid options
	bpf: Support llvm-objcopy for vmlinux BTF
	elfnote: mark all .note sections SHF_ALLOC
	Input: mms114 - fix handling of mms345l
	ARM: 8977/1: ptrace: Fix mask for thumb breakpoint hook
	sched/fair: Don't NUMA balance for kthreads
	Input: synaptics - add a second working PNP_ID for Lenovo T470s
	csky: Fixup abiv2 syscall_trace break a4 & a5
	gfs2: Even more gfs2_find_jhead fixes
	drivers/net/ibmvnic: Update VNIC protocol version reporting
	powerpc/xive: Clear the page tables for the ESB IO mapping
	spi: dw: Fix native CS being unset
	ath9k_htc: Silence undersized packet warnings
	smack: avoid unused 'sip' variable warning
	RDMA/uverbs: Make the event_queue fds return POLLERR when disassociated
	padata: add separate cpuhp node for CPUHP_PADATA_DEAD
	s390/pci: Log new handle in clp_disable_fh()
	x86/cpu/amd: Make erratum #1054 a legacy erratum
	KVM: x86: only do L1TF workaround on affected processors
	PCI/PM: Adjust pcie_wait_for_link_delay() for caller delay
	perf probe: Accept the instance number of kretprobe event
	mm: add kvfree_sensitive() for freeing sensitive data objects
	selftests: fix flower parent qdisc
	fanotify: fix ignore mask logic for events on child and on dir
	aio: fix async fsync creds
	ipv4: fix a RCU-list lock in fib_triestat_seq_show
	iwlwifi: mvm: fix NVM check for 3168 devices
	sctp: fix possibly using a bad saddr with a given dst
	sctp: fix refcount bug in sctp_wfree
	x86_64: Fix jiffies ODR violation
	x86/PCI: Mark Intel C620 MROMs as having non-compliant BARs
	x86/speculation: Prevent rogue cross-process SSBD shutdown
	x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS.
	x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches.
	x86/reboot/quirks: Add MacBook6,1 reboot quirk
	perf/x86/intel: Add more available bits for OFFCORE_RESPONSE of Intel Tremont
	KVM: x86/mmu: Set mmio_value to '0' if reserved #PF can't be generated
	KVM: x86: respect singlestep when emulating instruction
	KVM: x86: Fix APIC page invalidation race
	powerpc/ptdump: Properly handle non standard page size
	ASoC: max9867: fix volume controls
	io_uring: use kvfree() in io_sqe_buffer_register()
	efi/efivars: Add missing kobject_put() in sysfs entry creation error path
	smb3: fix incorrect number of credits when ioctl MaxOutputResponse > 64K
	smb3: add indatalen that can be a non-zero value to calculation of credit charge in smb2 ioctl
	watchdog: imx_sc_wdt: Fix reboot on crash
	ALSA: es1688: Add the missed snd_card_free()
	ALSA: fireface: fix configuration error for nominal sampling transfer frequency
	ALSA: hda/realtek - add a pintbl quirk for several Lenovo machines
	ALSA: pcm: disallow linking stream to itself
	ALSA: pcm: fix snd_pcm_link() lockdep splat
	ALSA: usb-audio: Fix inconsistent card PM state after resume
	ALSA: usb-audio: Add vendor, product and profile name for HP Thunderbolt Dock
	ACPI: sysfs: Fix reference count leak in acpi_sysfs_add_hotplug_profile()
	ACPI: CPPC: Fix reference count leak in acpi_cppc_processor_probe()
	ACPI: GED: add support for _Exx / _Lxx handler methods
	ACPI: PM: Avoid using power resources if there are none for D0
	arm64: acpi: fix UBSAN warning
	lib/lzo: fix ambiguous encoding bug in lzo-rle
	nilfs2: fix null pointer dereference at nilfs_segctor_do_construct()
	spi: dw: Fix controller unregister order
	spi: Fix controller unregister order
	spi: pxa2xx: Fix controller unregister order
	spi: pxa2xx: Fix runtime PM ref imbalance on probe error
	spi: bcm2835: Fix controller unregister order
	spi: bcm2835aux: Fix controller unregister order
	spi: bcm-qspi: Handle clock probe deferral
	spi: bcm-qspi: when tx/rx buffer is NULL set to 0
	PM: runtime: clk: Fix clk_pm_runtime_get() error path
	gup: document and work around "COW can break either way" issue
	crypto: cavium/nitrox - Fix 'nitrox_get_first_device()' when ndevlist is fully iterated
	crypto: algapi - Avoid spurious modprobe on LOADED
	crypto: drbg - fix error return code in drbg_alloc_state()
	x86/{mce,mm}: Unmap the entire page if the whole page is affected and poisoned
	firmware: imx: warn on unexpected RX
	firmware: imx-scu: Support one TX and one RX
	firmware: imx: scu: Fix corruption of header
	crypto: virtio: Fix use-after-free in virtio_crypto_skcipher_finalize_req()
	crypto: virtio: Fix src/dst scatterlist calculation in __virtio_crypto_skcipher_do_req()
	crypto: virtio: Fix dest length calculation in __virtio_crypto_skcipher_do_req()
	dccp: Fix possible memleak in dccp_init and dccp_fini
	selftests/net: in rxtimestamp getopt_long needs terminating null entry
	net/mlx5: drain health workqueue in case of driver load error
	net/mlx5: Fix fatal error handling during device load
	net/mlx5e: Fix repeated XSK usage on one channel
	ovl: initialize error in ovl_copy_xattr
	proc: Use new_inode not new_inode_pseudo
	remoteproc: Fall back to using parent memory pool if no dedicated available
	remoteproc: Fix and restore the parenting hierarchy for vdev
	cpufreq: Fix up cpufreq_boost_set_sw()
	EDAC/skx: Use the mcmtr register to retrieve close_pg/bank_xor_enable
	video: vt8500lcdfb: fix fallthrough warning
	video: fbdev: w100fb: Fix a potential double free.
	KVM: nVMX: Skip IBPB when switching between vmcs01 and vmcs02
	KVM: nSVM: fix condition for filtering async PF
	KVM: nSVM: leave ASID aside in copy_vmcb_control_area
	KVM: nVMX: Consult only the "basic" exit reason when routing nested exit
	KVM: MIPS: Define KVM_ENTRYHI_ASID to cpu_asid_mask(&boot_cpu_data)
	KVM: MIPS: Fix VPN2_MASK definition for variable cpu_vmbits
	KVM: arm64: Stop writing aarch32's CSSELR into ACTLR
	KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts
	scsi: megaraid_sas: TM command refire leads to controller firmware crash
	scsi: lpfc: Fix negation of else clause in lpfc_prep_node_fc4type
	selftests/ftrace: Return unsupported if no error_log file
	ath9k: Fix use-after-free Read in htc_connect_service
	ath9k: Fix use-after-free Read in ath9k_wmi_ctrl_rx
	ath9k: Fix use-after-free Write in ath9k_htc_rx_msg
	ath9x: Fix stack-out-of-bounds Write in ath9k_hif_usb_rx_cb
	ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb
	Smack: slab-out-of-bounds in vsscanf
	drm/vkms: Hold gem object while still in-use
	mm/slub: fix a memory leak in sysfs_slab_add()
	fat: don't allow to mount if the FAT length == 0
	perf: Add cond_resched() to task_function_call()
	agp/intel: Reinforce the barrier after GTT updates
	mmc: sdhci-msm: Clear tuning done flag while hs400 tuning
	mmc: mmci_sdmmc: fix DMA API warning overlapping mappings
	mmc: tmio: Further fixup runtime PM management at remove
	mmc: uniphier-sd: call devm_request_irq() after tmio_mmc_host_probe()
	ARM: dts: at91: sama5d2_ptc_ek: fix sdmmc0 node description
	mmc: sdio: Fix potential NULL pointer error in mmc_sdio_init_card()
	mmc: sdio: Fix several potential memory leaks in mmc_sdio_init_card()
	block/floppy: fix contended case in floppy_queue_rq()
	xen/pvcalls-back: test for errors when calling backend_connect()
	KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
	KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
	Linux 5.4.47

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I3fb3216abdbc080b4ac7b827b35ff6a813e28eb8
This commit is contained in:
Greg Kroah-Hartman 2020-06-18 10:05:17 +02:00
commit eaaa29ec5a
149 changed files with 1043 additions and 551 deletions

View File

@ -159,11 +159,15 @@ Byte sequences
distance = 16384 + (H << 14) + D
state = S (copy S literals after this block)
End of stream is reached if distance == 16384
In version 1 only, to prevent ambiguity with the RLE case when
((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the
compressor must not emit block copies where distance and length
meet these conditions.
In version 1 only, this instruction is also used to encode a run of
zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
In this case, it is followed by a fourth byte, X.
run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4.
run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4
0 0 1 L L L L L (32..63)
Copy of small block within 16kB distance (preferably less than 34B)

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 46
SUBLEVEL = 47
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -125,8 +125,6 @@
bus-width = <8>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
non-removable;
mmc-ddr-1_8v;
status = "okay";
};

View File

@ -363,6 +363,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
}
}
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
#endif /* __ARM_KVM_EMULATE_H__ */

View File

@ -421,4 +421,6 @@ static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
return true;
}
#define kvm_arm_vcpu_loaded(vcpu) (false)
#endif /* __ARM_KVM_HOST_H__ */

View File

@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = {
};
static struct undef_hook thumb_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xde01,
.instr_mask = 0xffffffff,
.instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,

View File

@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
#include <linux/stddef.h>
#include <asm/cputype.h>
#include <asm/io.h>
@ -31,14 +32,14 @@
* is therefore used to delimit the MADT GICC structure minimum length
* appropriately.
*/
#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \
#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \
struct acpi_madt_generic_interrupt, efficiency_class)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
(unsigned long)(entry) + (entry)->header.length > (end))
#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \
#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
spe_interrupt) + sizeof(u16))
/* Basic configuration for ACPI */

View File

@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
}
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
{
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
}
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;

View File

@ -392,8 +392,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
@ -677,4 +679,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu)
#endif /* __ARM64_KVM_HOST_H__ */

View File

@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
#define __ptrauth_save_key(regs, key) \
({ \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})
/*
* Handle the guest trying to use a ptrauth instruction, or trying to access a
* ptrauth register.
*/
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *ctxt;
if (vcpu_has_ptrauth(vcpu)) {
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_enable(vcpu);
ctxt = vcpu->arch.host_cpu_context;
__ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA);
__ptrauth_save_key(ctxt->sys_regs, APDB);
__ptrauth_save_key(ctxt->sys_regs, APGA);
} else {
else
kvm_inject_undefined(vcpu);
}
}
/*

View File

@ -1280,10 +1280,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
int reg = r->reg;
/* See the 32bit mapping in kvm_host.h */
if (p->is_aarch32)
reg = r->reg / 2;
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
vcpu_write_sys_reg(vcpu, p->regval, reg);
else
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
p->regval = vcpu_read_sys_reg(vcpu, reg);
return true;
}

View File

@ -13,6 +13,8 @@
#define LSAVE_A1 28
#define LSAVE_A2 32
#define LSAVE_A3 36
#define LSAVE_A4 40
#define LSAVE_A5 44
#define KSPTOUSP
#define USPTOKSP

View File

@ -170,8 +170,10 @@ csky_syscall_trace:
ldw a3, (sp, LSAVE_A3)
#if defined(__CSKYABIV2__)
subi sp, 8
stw r5, (sp, 0x4)
stw r4, (sp, 0x0)
ldw r9, (sp, LSAVE_A4)
stw r9, (sp, 0x0)
ldw r9, (sp, LSAVE_A5)
stw r9, (sp, 0x4)
#else
ldw r6, (sp, LSAVE_A4)
ldw r7, (sp, LSAVE_A5)

View File

@ -274,8 +274,12 @@ enum emulation_result {
#define MIPS3_PG_SHIFT 6
#define MIPS3_PG_FRAME 0x3fffffc0
#if defined(CONFIG_64BIT)
#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
#else
#define VPN2_MASK 0xffffe000
#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
#endif
#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)

View File

@ -326,12 +326,6 @@ SECTIONS
*(.branch_lt)
}
#ifdef CONFIG_DEBUG_INFO_BTF
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
*(.BTF)
}
#endif
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
KEEP(*(.opd))

View File

@ -58,6 +58,7 @@ struct pg_state {
unsigned long start_address;
unsigned long start_pa;
unsigned long last_pa;
unsigned long page_size;
unsigned int level;
u64 current_flags;
bool check_wx;
@ -155,9 +156,9 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#endif
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
delta = PAGE_SIZE >> 10;
delta = st->page_size >> 10;
} else {
pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
delta = (addr - st->start_address) >> 10;
@ -188,7 +189,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
}
static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val)
unsigned int level, u64 val, unsigned long page_size)
{
u64 flag = val & pg_level[level].mask;
u64 pa = val & PTE_RPN_MASK;
@ -200,6 +201,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
st->page_size = page_size;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
/*
* Dump the section of virtual memory when:
@ -211,7 +213,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
*/
} else if (flag != st->current_flags || level != st->level ||
addr >= st->marker[1].start_address ||
(pa != st->last_pa + PAGE_SIZE &&
(pa != st->last_pa + st->page_size &&
(pa != st->start_pa || st->start_pa != st->last_pa))) {
/* Check the PTE flags */
@ -239,6 +241,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
st->page_size = page_size;
st->current_flags = flag;
st->level = level;
} else {
@ -254,7 +257,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
addr = start + i * PAGE_SIZE;
note_page(st, addr, 4, pte_val(*pte));
note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE);
}
}
@ -271,7 +274,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
/* pmd exists */
walk_pte(st, pmd, addr);
else
note_page(st, addr, 3, pmd_val(*pmd));
note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE);
}
}
@ -287,7 +290,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
/* pud exists */
walk_pmd(st, pud, addr);
else
note_page(st, addr, 2, pud_val(*pud));
note_page(st, addr, 2, pud_val(*pud), PUD_SIZE);
}
}
@ -306,7 +309,7 @@ static void walk_pagetables(struct pg_state *st)
/* pgd exists */
walk_pud(st, pgd, addr);
else
note_page(st, addr, 1, pgd_val(*pgd));
note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE);
}
}
@ -361,7 +364,7 @@ static int ptdump_show(struct seq_file *m, void *v)
/* Traverse kernel page tables */
walk_pagetables(&st);
note_page(&st, 0, 0, 0);
note_page(&st, 0, 0, 0, 0);
return 0;
}

View File

@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <linux/vmalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
@ -1013,12 +1014,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
if (xd->eoi_mmio) {
unmap_kernel_range((unsigned long)xd->eoi_mmio,
1u << xd->esb_shift);
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
xd->trig_mmio = NULL;
xd->eoi_mmio = NULL;
}
if (xd->trig_mmio) {
unmap_kernel_range((unsigned long)xd->trig_mmio,
1u << xd->esb_shift);
iounmap(xd->trig_mmio);
xd->trig_mmio = NULL;
}

View File

@ -309,14 +309,13 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
int clp_disable_fh(struct zpci_dev *zdev)
{
u32 fh = zdev->fh;
int rc;
if (!zdev_enabled(zdev))
return 0;
rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}

View File

@ -1892,8 +1892,8 @@ static __initconst const u64 tnt_hw_cache_extra_regs
static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
EVENT_EXTRA_END
};

View File

@ -85,28 +85,35 @@ void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#ifdef CONFIG_X86_64
static inline int set_mce_nospec(unsigned long pfn)
/*
* Prevent speculative access to the page by either unmapping
* it (if we do not require access to any part of the page) or
* marking it uncacheable (if we want to try to retrieve data
* from non-poisoned lines in the page).
*/
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;
/*
* Mark the linear address as UC to make sure we don't log more
* errors because of speculative access to the page.
* We would like to just call:
* set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
* set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
* This relies on set_memory_uc() properly sanitizing any __pa()
* This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
rc = set_memory_uc(decoy_addr, 1);
if (unmap)
rc = set_memory_np(decoy_addr, 1);
else
rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;

View File

@ -1117,8 +1117,7 @@ static const int amd_erratum_383[] =
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{

View File

@ -581,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE
@ -727,15 +729,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
break;
}
/*
* At this point, an STIBP mode other than "off" has been set.
* If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/
if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
/* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
@ -758,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
spectre_v2_user_ibpb = mode;
}
/* If enhanced IBRS is enabled no STIBP required */
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
/*
* If enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
*/
if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
* If SMT is not possible or STIBP is not available clear the STIBP
* mode.
* At this point, an STIBP mode other than "off" has been set.
* If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
/*
* If STIBP is not available, clear the STIBP mode.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;
spectre_v2_user_stibp = mode;
set_mode:
spectre_v2_user = mode;
/* Only print the STIBP mode when SMT possible */
if (smt_possible)
pr_info("%s\n", spectre_v2_user_strings[mode]);
pr_info("%s\n", spectre_v2_user_strings[mode]);
}
static const char * const spectre_v2_strings[] = {
@ -1007,7 +1013,7 @@ void cpu_bugs_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
switch (spectre_v2_user) {
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
@ -1250,14 +1256,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
/*
* Indirect branch speculation is always disabled in strict
* mode.
* mode. It can neither be enabled if it was force-disabled
* by a previous prctl call.
*/
if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
task_spec_ib_force_disable(task))
return -EPERM;
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
@ -1268,10 +1279,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0;
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
@ -1302,7 +1315,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@ -1333,22 +1347,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;
switch (spectre_v2_user) {
case SPECTRE_V2_USER_NONE:
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return PR_SPEC_DISABLE;
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case SPECTRE_V2_USER_STRICT:
case SPECTRE_V2_USER_STRICT_PREFERRED:
return PR_SPEC_DISABLE;
default:
} else
return PR_SPEC_NOT_AFFECTED;
}
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@ -1587,7 +1603,7 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";
switch (spectre_v2_user) {
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:

View File

@ -533,6 +533,13 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
static bool whole_page(struct mce *m)
{
if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
return true;
return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
}
bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
@ -601,7 +608,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
if (!memory_failure(pfn, 0))
set_mce_nospec(pfn);
set_mce_nospec(pfn, whole_page(mce));
}
return NOTIFY_OK;
@ -1103,7 +1110,7 @@ static int do_memory_failure(struct mce *m)
if (ret)
pr_err("Memory error not recovered");
else
set_mce_nospec(m->addr >> PAGE_SHIFT);
set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m));
return ret;
}

View File

@ -428,28 +428,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled();
/*
* If TIF_SSBD is different, select the proper mitigation
* method. Note that if SSBD mitigation is disabled or permanentely
* enabled this branch can't be taken because nothing can set
* TIF_SSBD.
*/
if (tif_diff & _TIF_SSBD) {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
/* Handle change of TIF_SSBD depending on the mitigation method. */
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
msr |= ssbd_tif_to_spec_ctrl(tifn);
updmsr = true;
}
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
updmsr |= !!(tif_diff & _TIF_SSBD);
msr |= ssbd_tif_to_spec_ctrl(tifn);
}
/*
* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
* otherwise avoid the MSR write.
*/
/* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);

View File

@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
{ /* Handle problems with rebooting on Apple MacBook6,1 */
.callback = set_pci_reboot,
.ident = "Apple MacBook6,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
},
},
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",

View File

@ -343,6 +343,8 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
{
BUG_ON((u64)(unsigned)access_mask != access_mask);
BUG_ON((mmio_mask & mmio_value) != mmio_value);
WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
shadow_mmio_access_mask = access_mask;
@ -580,16 +582,15 @@ static void kvm_mmu_reset_all_pte_masks(void)
* the most significant bits of legal physical address space.
*/
shadow_nonpresent_or_rsvd_mask = 0;
low_phys_bits = boot_cpu_data.x86_cache_bits;
if (boot_cpu_data.x86_cache_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) {
low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_has_bug(X86_BUG_L1TF) &&
!WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
52 - shadow_nonpresent_or_rsvd_mask_len)) {
low_phys_bits = boot_cpu_data.x86_cache_bits
- shadow_nonpresent_or_rsvd_mask_len;
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_cache_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_cache_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
} else
WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
}
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
@ -6247,25 +6248,16 @@ static void kvm_set_mmio_spte_mask(void)
u64 mask;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
* Set a reserved PA bit in MMIO SPTEs to generate page faults with
* PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
* paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
* 52-bit physical addresses then there are no reserved PA bits in the
* PTEs and so the reserved PA approach must be disabled.
*/
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (shadow_phys_bits == 52)
mask &= ~1ull;
if (shadow_phys_bits < 52)
mask = BIT_ULL(51) | PT_PRESENT_MASK;
else
mask = 0;
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
}

View File

@ -3237,8 +3237,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
/* When we're shadowing, trap PFs, but not async PF */
if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
/* Trap async PF even if not shadowing */
if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
return NESTED_EXIT_HOST;
break;
default:
@ -3327,7 +3327,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
dst->asid = from->asid;
/* asid not copied, it is handled manually for svm->vmcb. */
dst->tlb_ctl = from->tlb_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;

View File

@ -302,7 +302,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
cpu = get_cpu();
prev = vmx->loaded_vmcs;
vmx->loaded_vmcs = vmcs;
vmx_vcpu_load_vmcs(vcpu, cpu);
vmx_vcpu_load_vmcs(vcpu, cpu, prev);
vmx_sync_vmcs_host_state(vmx, prev);
put_cpu();
@ -5357,7 +5357,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
switch (exit_reason) {
switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (is_nmi(intr_info))
return false;

View File

@ -1286,10 +1286,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
pi_set_on(pi_desc);
}
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
struct loaded_vmcs *buddy)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
struct vmcs *prev;
if (!already_loaded) {
loaded_vmcs_clear(vmx->loaded_vmcs);
@ -1308,10 +1310,18 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
local_irq_enable();
}
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
prev = per_cpu(current_vmcs, cpu);
if (prev != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
indirect_branch_prediction_barrier();
/*
* No indirect branch prediction barrier needed when switching
* the active VMCS within a guest, e.g. on nested VM-Enter.
* The L1 VMM can protect itself with retpolines, IBPB or IBRS.
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
indirect_branch_prediction_barrier();
}
if (!already_loaded) {
@ -1356,7 +1366,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx_vcpu_load_vmcs(vcpu, cpu);
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vmx_vcpu_pi_load(vcpu, cpu);

View File

@ -304,7 +304,8 @@ struct kvm_vmx {
};
bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
struct loaded_vmcs *buddy);
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
int allocate_vpid(void);
void free_vpid(int vpid);

View File

@ -6833,7 +6833,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
kvm_rip_write(vcpu, ctxt->eip);
if (r && ctxt->tf)
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
r = kvm_vcpu_do_singlestep(vcpu);
__kvm_set_rflags(vcpu, ctxt->eflags);
}
@ -7978,9 +7978,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end,
bool blockable)
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
unsigned long apic_address;
@ -7991,8 +7990,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (start <= apic_address && apic_address < end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
return 0;
}
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)

View File

@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]

View File

@ -374,7 +374,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
if (!err)
crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
out:
crypto_larval_kill(&larval->alg);

View File

@ -1294,8 +1294,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
if (!drbg->prev)
if (!drbg->prev) {
ret = -ENOMEM;
goto fini;
}
drbg->fips_primed = false;
}

View File

@ -865,6 +865,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
"acpi_cppc");
if (ret) {
per_cpu(cpc_desc_ptr, pr->id) = NULL;
kobject_put(&cpc_ptr->kobj);
goto out_free;
}

View File

@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
/* If _PR3 is not available, use D3hot as the target state. */
/* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {

View File

@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
struct resource r;
struct acpi_resource_irq *p = &ares->data.irq;
struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
char ev_name[5];
u8 trigger;
if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
return AE_OK;
@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
dev_err(dev, "unable to parse IRQ resource\n");
return AE_ERROR;
}
if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
gsi = p->interrupts[0];
else
trigger = p->triggering;
} else {
gsi = pext->interrupts[0];
trigger = p->triggering;
}
irq = r.start;
if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
switch (gsi) {
case 0 ... 255:
sprintf(ev_name, "_%c%02hhX",
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
break;
/* fall through */
default:
if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
break;
dev_err(dev, "cannot locate _EVT method\n");
return AE_ERROR;
}

View File

@ -919,12 +919,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
if (buffer.length && package
&& package->type == ACPI_TYPE_PACKAGE
&& package->package.count) {
int err = acpi_extract_power_resources(package, 0,
&ps->resources);
if (!err)
device->power.flags.power_resources = 1;
}
&& package->package.count)
acpi_extract_power_resources(package, 0, &ps->resources);
ACPI_FREE(buffer.pointer);
}
@ -971,14 +968,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
/* Set defaults for D0 and D3hot states (always valid) */
/* Set the defaults for D0 and D3hot (always supported). */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
/*
* Use power resources only if the D0 list of them is populated, because
* some platforms may provide _PR3 only to indicate D3cold support and
* in those cases the power resources list returned by it may be bogus.
*/
if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
device->power.flags.power_resources = 1;
/*
* D3cold is supported if the D3hot list of power resources is
* not empty.
*/
if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
}
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}

View File

@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error)
if (error) {
kobject_put(&hotplug->kobj);
goto err_out;
}
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;

View File

@ -2902,17 +2902,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
(unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
spin_lock_irq(&floppy_lock);
list_add_tail(&bd->rq->queuelist, &floppy_reqs);
spin_unlock_irq(&floppy_lock);
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return BLK_STS_OK;
return BLK_STS_RESOURCE;
}
spin_lock_irq(&floppy_lock);
list_add_tail(&bd->rq->queuelist, &floppy_reqs);
spin_unlock_irq(&floppy_lock);
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);

View File

@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
wmb();
readl(intel_private.gtt + j - 1);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
static void i9xx_chipset_flush(void)
{
wmb();
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}

View File

@ -116,7 +116,11 @@ static int clk_pm_runtime_get(struct clk_core *core)
return 0;
ret = pm_runtime_get_sync(core->dev);
return ret < 0 ? ret : 0;
if (ret < 0) {
pm_runtime_put_noidle(core->dev);
return ret;
}
return 0;
}
static void clk_pm_runtime_put(struct clk_core *core)

View File

@ -2526,26 +2526,27 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
static int cpufreq_boost_set_sw(int state)
{
struct cpufreq_policy *policy;
int ret = -EINVAL;
for_each_active_policy(policy) {
int ret;
if (!policy->freq_table)
continue;
return -ENXIO;
ret = cpufreq_frequency_table_cpuinfo(policy,
policy->freq_table);
if (ret) {
pr_err("%s: Policy frequency update failed\n",
__func__);
break;
return ret;
}
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
break;
return ret;
}
return ret;
return 0;
}
int cpufreq_boost_trigger_state(int state)

View File

@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
struct nitrox_device *ndev = NULL;
struct nitrox_device *ndev;
mutex_lock(&devlist_lock);
list_for_each_entry(ndev, &ndevlist, list) {
@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
break;
}
mutex_unlock(&devlist_lock);
if (!ndev)
if (&ndev->list == &ndevlist)
return NULL;
refcount_inc(&ndev->refcnt);

View File

@ -353,13 +353,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
int i;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
struct scatterlist *sg;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
pr_err("Invalid number of src SG.\n");
return src_nents;
}
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@ -405,6 +410,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
goto free;
}
dst_len = min_t(unsigned int, req->nbytes, dst_len);
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->nbytes, dst_len);
@ -445,12 +451,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
vc_sym_req->iv = iv;
/* Source data */
for (i = 0; i < src_nents; i++)
sgs[num_out++] = &req->src[i];
for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
sgs[num_out++] = sg;
/* Destination data */
for (i = 0; i < dst_nents; i++)
sgs[num_out + num_in++] = &req->dst[i];
for (sg = req->dst; sg; sg = sg_next(sg))
sgs[num_out + num_in++] = sg;
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
@ -580,10 +586,11 @@ static void virtio_crypto_ablkcipher_finalize_req(
scatterwalk_map_and_copy(req->info, req->dst,
req->nbytes - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
req, err);
}
static struct virtio_crypto_algo virtio_crypto_algs[] = { {

View File

@ -162,7 +162,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
mtr, mcddrtcfg, imc->mc, i, j);
if (IS_DIMM_PRESENT(mtr))
ndimms += skx_get_dimm_info(mtr, 0, dimm,
ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
imc, i, j);
else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,

View File

@ -151,27 +151,23 @@ static const struct x86_cpu_id skx_cpuids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
#define SKX_GET_MTMTR(dev, reg) \
pci_read_config_dword((dev), 0x87c, &(reg))
static bool skx_check_ecc(struct pci_dev *pdev)
static bool skx_check_ecc(u32 mcmtr)
{
u32 mtmtr;
SKX_GET_MTMTR(pdev, mtmtr);
return !!GET_BITFIELD(mtmtr, 2, 2);
return !!GET_BITFIELD(mcmtr, 2, 2);
}
static int skx_get_dimm_config(struct mem_ctl_info *mci)
{
struct skx_pvt *pvt = mci->pvt_info;
u32 mtr, mcmtr, amap, mcddrtcfg;
struct skx_imc *imc = pvt->imc;
u32 mtr, amap, mcddrtcfg;
struct dimm_info *dimm;
int i, j;
int ndimms;
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
for (i = 0; i < SKX_NUM_CHANNELS; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
@ -182,14 +178,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j);
ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
} else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
nvdimm_count++;
}
}
if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
if (ndimms && !skx_check_ecc(mcmtr)) {
skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
return -ENODEV;
}

View File

@ -283,7 +283,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno)
{
int banks = 16, ranks, rows, cols, npages;
@ -303,8 +303,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
imc->chan[chan].dimms[dimmno].rowbits = rows;
imc->chan[chan].dimms[dimmno].colbits = cols;

View File

@ -126,7 +126,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type,
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno);
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,

View File

@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
NULL, "%s", short_name);
kfree(short_name);
if (ret)
if (ret) {
kobject_put(&new_var->kobj);
return ret;
}
kobject_uevent(&new_var->kobj, KOBJ_ADD);
if (efivar_entry_add(new_var, &efivar_sysfs_list)) {

View File

@ -38,6 +38,7 @@ struct imx_sc_ipc {
struct device *dev;
struct mutex lock;
struct completion done;
bool fast_ipc;
/* temporarily store the SCU msg */
u32 *msg;
@ -115,6 +116,26 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
struct imx_sc_rpc_msg *hdr;
u32 *data = msg;
int i;
if (!sc_ipc->msg) {
dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
sc_chan->idx, *data);
return;
}
if (sc_ipc->fast_ipc) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
sc_ipc->msg[0] = *data++;
for (i = 1; i < sc_ipc->rx_size; i++)
sc_ipc->msg[i] = *data++;
complete(&sc_ipc->done);
return;
}
if (sc_chan->idx == 0) {
hdr = msg;
@ -137,20 +158,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
{
struct imx_sc_rpc_msg *hdr = msg;
struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
struct imx_sc_chan *sc_chan;
u32 *data = msg;
int ret;
int size;
int i;
/* Check size */
if (hdr->size > IMX_SC_RPC_MAX_MSG)
if (hdr.size > IMX_SC_RPC_MAX_MSG)
return -EINVAL;
dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc,
hdr->func, hdr->size);
dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
hdr.func, hdr.size);
for (i = 0; i < hdr->size; i++) {
size = sc_ipc->fast_ipc ? 1 : hdr.size;
for (i = 0; i < size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
/*
@ -162,8 +185,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
* Wait for tx_done before every send to ensure that no
* queueing happens at the mailbox channel level.
*/
wait_for_completion(&sc_chan->tx_done);
reinit_completion(&sc_chan->tx_done);
if (!sc_ipc->fast_ipc) {
wait_for_completion(&sc_chan->tx_done);
reinit_completion(&sc_chan->tx_done);
}
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
@ -187,7 +212,8 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
mutex_lock(&sc_ipc->lock);
reinit_completion(&sc_ipc->done);
sc_ipc->msg = msg;
if (have_resp)
sc_ipc->msg = msg;
sc_ipc->count = 0;
ret = imx_scu_ipc_write(sc_ipc, msg);
if (ret < 0) {
@ -209,6 +235,7 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
}
out:
sc_ipc->msg = NULL;
mutex_unlock(&sc_ipc->lock);
dev_dbg(sc_ipc->dev, "RPC SVC done\n");
@ -224,6 +251,8 @@ static int imx_scu_probe(struct platform_device *pdev)
struct imx_sc_chan *sc_chan;
struct mbox_client *cl;
char *chan_name;
struct of_phandle_args args;
int num_channel;
int ret;
int i;
@ -231,11 +260,20 @@ static int imx_scu_probe(struct platform_device *pdev)
if (!sc_ipc)
return -ENOMEM;
for (i = 0; i < SCU_MU_CHAN_NUM; i++) {
if (i < 4)
ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
"#mbox-cells", 0, &args);
if (ret)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
if (i < num_channel / 2)
chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
else
chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4);
chan_name = kasprintf(GFP_KERNEL, "rx%d",
i - num_channel / 2);
if (!chan_name)
return -ENOMEM;
@ -247,13 +285,15 @@ static int imx_scu_probe(struct platform_device *pdev)
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
/* Initial tx_done completion as "done" */
cl->tx_done = imx_scu_tx_done;
init_completion(&sc_chan->tx_done);
complete(&sc_chan->tx_done);
if (!sc_ipc->fast_ipc) {
/* Initial tx_done completion as "done" */
cl->tx_done = imx_scu_tx_done;
init_completion(&sc_chan->tx_done);
complete(&sc_chan->tx_done);
}
sc_chan->sc_ipc = sc_ipc;
sc_chan->idx = i % 4;
sc_chan->idx = i % (num_channel / 2);
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(sc_chan->ch)) {
ret = PTR_ERR(sc_chan->ch);

View File

@ -619,6 +619,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_KERNEL |
__GFP_NORETRY |
__GFP_NOWARN);
/*
* Using __get_user_pages_fast() with a read-only
* access is questionable. A read-only page may be
* COW-broken, and then this might end up giving
* the wrong side of the COW..
*
* We may or may not care.
*/
if (pvec) /* defer to worker if malloc fails */
pinned = __get_user_pages_fast(obj->userptr.ptr,
num_pages,

View File

@ -121,11 +121,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index);
/* Gem stuff */
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
struct drm_file *file,
u32 *handle,
u64 size);
vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,

View File

@ -95,10 +95,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
return ret;
}
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
struct drm_file *file,
u32 *handle,
u64 size)
static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
struct drm_file *file,
u32 *handle,
u64 size)
{
struct vkms_gem_object *obj;
int ret;
@ -111,7 +111,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->gem, handle);
drm_gem_object_put_unlocked(&obj->gem);
if (ret)
return ERR_PTR(ret);
@ -140,6 +139,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_obj->size;
args->pitch = pitch;
drm_gem_object_put_unlocked(gem_obj);
DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
return 0;

View File

@ -307,6 +307,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
if (!list_empty(&ev_queue->event_list))
pollflags = EPOLLIN | EPOLLRDNORM;
else if (ev_queue->is_closed)
pollflags = EPOLLERR;
spin_unlock_irq(&ev_queue->lock);
return pollflags;

View File

@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN006c", /* T470s */
"LEN007a", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */

View File

@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
BUG();
/* Write register: use repeated start */
/* Write register */
xfer[0].addr = client->addr;
xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
xfer[0].flags = client->flags & I2C_M_TEN;
xfer[0].len = 1;
xfer[0].buf = &buf;
/* Read data */
xfer[1].addr = client->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
xfer[1].len = len;
xfer[1].buf = val;
@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client,
const void *match_data;
int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"Need i2c bus that supports protocol mangling\n");
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "Not supported I2C adapter\n");
return -ENODEV;
}

View File

@ -584,7 +584,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
*/
err = mmc_send_io_op_cond(host, ocr, &rocr);
if (err)
goto err;
return err;
/*
* For SPI, enable CRC as appropriate.
@ -592,17 +592,15 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
goto err;
return err;
}
/*
* Allocate card structure.
*/
card = mmc_alloc_card(host, NULL);
if (IS_ERR(card)) {
err = PTR_ERR(card);
goto err;
}
if (IS_ERR(card))
return PTR_ERR(card);
if ((rocr & R4_MEMORY_PRESENT) &&
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
@ -610,19 +608,15 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
mmc_remove_card(card);
pr_debug("%s: Perhaps the card was replaced\n",
mmc_hostname(host));
return -ENOENT;
err = -ENOENT;
goto mismatch;
}
} else {
card->type = MMC_TYPE_SDIO;
if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
mmc_remove_card(card);
pr_debug("%s: Perhaps the card was replaced\n",
mmc_hostname(host));
return -ENOENT;
err = -ENOENT;
goto mismatch;
}
}
@ -677,7 +671,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_get_csd(host, card);
if (err)
return err;
goto remove;
mmc_decode_cid(card);
}
@ -704,7 +698,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
}
goto finish;
if (oldcard)
mmc_remove_card(card);
else
host->card = card;
return 0;
}
/*
@ -718,9 +717,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
/* Retry init sequence, but without R4_18V_PRESENT. */
retries = 0;
goto try_again;
} else {
goto remove;
}
return err;
}
/*
@ -731,16 +729,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto remove;
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device);
mmc_remove_card(card);
if (!same) {
pr_debug("%s: Perhaps the card was replaced\n",
mmc_hostname(host));
return -ENOENT;
if (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device) {
mmc_remove_card(card);
card = oldcard;
} else {
err = -ENOENT;
goto mismatch;
}
card = oldcard;
}
card->ocr = ocr_card;
mmc_fixup_device(card, sdio_fixup_methods);
@ -801,16 +797,15 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
err = -EINVAL;
goto remove;
}
finish:
if (!oldcard)
host->card = card;
host->card = card;
return 0;
mismatch:
pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host));
remove:
if (!oldcard)
if (oldcard != card)
mmc_remove_card(card);
err:
return err;
}

View File

@ -162,6 +162,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
}
static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)

View File

@ -1112,6 +1112,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Clock-Data-Recovery used to dynamically adjust RX sampling point */
msm_host->use_cdr = true;
/*
* Clear tuning_done flag before tuning to ensure proper
* HS400 settings.
*/
msm_host->tuning_done = 0;
/*
* For HS400 tuning in HS200 timing requires:
* - select MCLK/2 in VENDOR_SPEC

View File

@ -1285,12 +1285,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
cancel_work_sync(&host->done);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (host->native_hotplug)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);

View File

@ -614,11 +614,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
}
}
ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
dev_name(dev), host);
if (ret)
goto free_host;
if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
host->dma_ops = &uniphier_sd_internal_dma_ops;
else
@ -646,8 +641,15 @@ static int uniphier_sd_probe(struct platform_device *pdev)
if (ret)
goto free_host;
ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
dev_name(dev), host);
if (ret)
goto remove_host;
return 0;
remove_host:
tmio_mmc_host_remove(host);
free_host:
tmio_mmc_host_free(host);

View File

@ -4536,12 +4536,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
break;
}
dev_info(dev, "Partner protocol version is %d\n",
crq->version_exchange_rsp.version);
if (be16_to_cpu(crq->version_exchange_rsp.version) <
ibmvnic_version)
ibmvnic_version =
ibmvnic_version =
be16_to_cpu(crq->version_exchange_rsp.version);
dev_info(dev, "Partner protocol version is %d\n",
ibmvnic_version);
send_cap_queries(adapter);
break;
case QUERY_CAPABILITY_RSP:

View File

@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
mlx5e_close_cq(&c->xskicosq.cq);
mlx5e_close_xdpsq(&c->xsksq);
mlx5e_close_cq(&c->xsksq.cq);
memset(&c->xskrq, 0, sizeof(c->xskrq));
memset(&c->xsksq, 0, sizeof(c->xsksq));
memset(&c->xskicosq, 0, sizeof(c->xskicosq));
}
void mlx5e_activate_xsk(struct mlx5e_channel *c)

View File

@ -193,15 +193,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
bool err_detected = false;
/* Mark the device as fatal in order to abort FW commands */
if ((check_fatal_sensors(dev) || force) &&
dev->state == MLX5_DEVICE_STATE_UP) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
err_detected = true;
}
mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock;
if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock;/* a previous error is still being handled */
if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
goto unlock;
}
if (check_fatal_sensors(dev) || force) {
if (check_fatal_sensors(dev) || force) { /* protected state setting */
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_cmd_flush(dev);
}

View File

@ -794,6 +794,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
static void mlx5_pci_close(struct mlx5_core_dev *dev)
{
/* health work might still be active, and it needs pci bar in
* order to know the NIC state. Therefore, drain the health WQ
* before removing the pci bars
*/
mlx5_drain_health_wq(dev);
iounmap(dev->iseg);
pci_clear_master(dev->pdev);
release_bar(dev->pdev);

View File

@ -390,8 +390,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
int trip, enum thermal_trend *trend)
{
struct mlxsw_thermal_module *tz = tzdev->devdata;
struct mlxsw_thermal *thermal = tz->parent;
struct mlxsw_thermal *thermal = tzdev->devdata;
if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
@ -592,6 +591,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0;
}
static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
int trip, enum thermal_trend *trend)
{
struct mlxsw_thermal_module *tz = tzdev->devdata;
struct mlxsw_thermal *thermal = tz->parent;
if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
if (tzdev == thermal->tz_highest_dev)
return 1;
*trend = THERMAL_TREND_STABLE;
return 0;
}
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
@ -603,7 +618,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
.get_trend = mlxsw_thermal_trend_get,
.get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@ -642,7 +657,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
.get_trend = mlxsw_thermal_trend_get,
.get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,

View File

@ -61,7 +61,8 @@ static int net_failover_open(struct net_device *dev)
return 0;
err_standby_open:
dev_close(primary_dev);
if (primary_dev)
dev_close(primary_dev);
err_primary_open:
netif_tx_disable(dev);
return err;

View File

@ -1908,8 +1908,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb->dev = tun->dev;
break;
case IFF_TAP:
if (!frags)
skb->protocol = eth_type_trans(skb, tun->dev);
if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
err = -ENOMEM;
goto drop;
}
skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
@ -1966,9 +1969,12 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (frags) {
u32 headlen;
/* Exercise flow dissector code path. */
u32 headlen = eth_get_headlen(tun->dev, skb->data,
skb_headlen(skb));
skb_push(skb, ETH_HLEN);
headlen = eth_get_headlen(tun->dev, skb->data,
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);

View File

@ -1924,6 +1924,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
ns_olen = request->len - skb_network_offset(request) -
sizeof(struct ipv6hdr) - sizeof(*ns);
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
if (!ns->opt[i + 1]) {
kfree_skb(reply);
return NULL;
}
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;

View File

@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
if (pool_index == MAX_PKT_NUM_IN_TRANSFER) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: over RX MAX_PKT_NUM\n");
goto err;
}
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@ -638,9 +643,9 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@ -680,14 +685,15 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
return;
free:
kfree_skb(skb);
kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@ -745,6 +751,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
free:
kfree_skb(skb);
kfree(rx_buf);
urb->context = NULL;
}
@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@ -880,6 +897,8 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
if (!rx_buf) {
ret = -ENOMEM;
goto err_rxb;
}
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
rx_buf->hif_dev = hif_dev;
rx_buf->skb = skb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, skb, 1);
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@ -943,6 +972,8 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
err_skb:
usb_free_urb(urb);
err_urb:
kfree(rx_buf);
err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}
@ -973,7 +1004,7 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
return -ENOMEM;
}
static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
ath9k_htc_hw_free(hif_dev->htc_handle);
}
usb_set_intfdata(interface, NULL);

View File

@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
struct rx_buf {
struct sk_buff *skb;
struct hif_device_usb *hif_dev;
};
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)
@ -133,5 +138,6 @@ struct hif_device_usb {
int ath9k_hif_usb_init(void);
void ath9k_hif_usb_exit(void);
void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev);
#endif /* HTC_USB_H */

View File

@ -931,8 +931,9 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
u16 devid, char *product, u32 drv_info)
{
struct ieee80211_hw *hw;
struct hif_device_usb *hif_dev;
struct ath9k_htc_priv *priv;
struct ieee80211_hw *hw;
int ret;
hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
return 0;
err_init:
ath9k_deinit_wmi(priv);
ath9k_stop_wmi(priv);
hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
ath9k_hif_usb_dealloc_urbs(hif_dev);
ath9k_destoy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(htc_handle->drv_priv);
ath9k_deinit_wmi(htc_handle->drv_priv);
ath9k_stop_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}

View File

@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* which are not PHY_ERROR (short radar pulses have a length of 3)
*/
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
ath_warn(common,
"Short RX data len, dropping (dlen: %d)\n",
rs_datalen);
ath_dbg(common, ANY,
"Short RX data len, dropping (dlen: %d)\n",
rs_datalen);
goto rx_next;
}

View File

@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target,
if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
epid = svc_rspmsg->endpoint_id;
if (epid < 0 || epid >= ENDPOINT_MAX)
return;
service_id = be16_to_cpu(svc_rspmsg->service_id);
max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
endpoint = &target->endpoint[epid];
@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC credit config timeout\n");
kfree_skb(skb);
return -ETIMEDOUT;
}
@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC start timeout\n");
kfree_skb(skb);
return -ETIMEDOUT;
}
@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target,
if (!time_left) {
dev_err(target->dev, "Service connection timeout for: %d\n",
service_connreq->service_id);
kfree_skb(skb);
return -ETIMEDOUT;
}

View File

@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
return wmi;
}
void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
{
struct wmi *wmi = priv->wmi;
mutex_lock(&wmi->op_mutex);
wmi->stopped = true;
mutex_unlock(&wmi->op_mutex);
}
void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
{
kfree(priv->wmi);
}
@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
return -ETIMEDOUT;
}

View File

@ -179,7 +179,6 @@ struct wmi {
};
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
enum htc_endpoint_id *wmi_ctrl_epid);
int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
void ath9k_wmi_event_tasklet(unsigned long data);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \

View File

@ -281,7 +281,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
int regulatory_type;
/* Checking for required sections */
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");

View File

@ -4622,10 +4622,10 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* Some controllers might not implement link active reporting. In this
* case, we wait for 1000 + 100 ms.
* case, we wait for 1000 ms + any delay requested by the caller.
*/
if (!pdev->link_active_reporting) {
msleep(1100);
msleep(timeout + delay);
return true;
}

View File

@ -511,7 +511,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* Initialise vdev subdevice */
snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
rvdev->dev.parent = rproc->dev.parent;
rvdev->dev.parent = &rproc->dev;
rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
rvdev->dev.release = rproc_rvdev_release;
dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);

View File

@ -375,6 +375,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
goto out;
}
}
} else {
struct device_node *np = rproc->dev.parent->of_node;
/*
* If we don't have dedicated buffer, just attempt to re-assign
* the reserved memory from our parent. A default memory-region
* at index 0 from the parent's memory-regions is assigned for
* the rvdev dev to allocate from. Failure is non-critical and
* the allocations will fall back to global pools, so don't
* check return value either.
*/
of_reserved_mem_device_init_by_idx(dev, np, 0);
}
/* Allocate virtio device */

View File

@ -462,7 +462,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
(fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);

View File

@ -4227,6 +4227,7 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
bool refire_cmd = 0;
u8 result;
@ -4284,6 +4285,11 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
break;
}
scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
cmd_fusion->io_request;
if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
result = RETURN_CMD;
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@ -4481,7 +4487,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);

View File

@ -670,7 +670,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
if (buf)
buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
dev_dbg(&qspi->pdev->dev, "RD %02x\n",
buf ? buf[tp.byte] : 0xff);
buf ? buf[tp.byte] : 0x0);
} else {
u16 *buf = tp.trans->rx_buf;
@ -678,7 +678,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
slot);
dev_dbg(&qspi->pdev->dev, "RD %04x\n",
buf ? buf[tp.byte] : 0xffff);
buf ? buf[tp.byte / 2] : 0x0);
}
update_qspi_trans_byte_count(qspi, &tp,
@ -733,13 +733,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
while (!tstatus && slot < MSPI_NUM_CDRAM) {
if (tp.trans->bits_per_word <= 8) {
const u8 *buf = tp.trans->tx_buf;
u8 val = buf ? buf[tp.byte] : 0xff;
u8 val = buf ? buf[tp.byte] : 0x00;
write_txram_slot_u8(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
} else {
const u16 *buf = tp.trans->tx_buf;
u16 val = buf ? buf[tp.byte / 2] : 0xffff;
u16 val = buf ? buf[tp.byte / 2] : 0x0000;
write_txram_slot_u16(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
@ -1220,6 +1220,11 @@ int bcm_qspi_probe(struct platform_device *pdev,
}
qspi = spi_master_get_devdata(master);
qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(qspi->clk))
return PTR_ERR(qspi->clk);
qspi->pdev = pdev;
qspi->trans_pos.trans = NULL;
qspi->trans_pos.byte = 0;
@ -1332,13 +1337,6 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->soc_intc = NULL;
}
qspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(qspi->clk)) {
dev_warn(dev, "unable to get clock\n");
ret = PTR_ERR(qspi->clk);
goto qspi_probe_err;
}
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");

View File

@ -1330,7 +1330,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
err = devm_spi_register_controller(&pdev->dev, ctlr);
err = spi_register_controller(ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
@ -1355,6 +1355,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
bcm2835_debugfs_remove(bs);
spi_unregister_controller(ctlr);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);

View File

@ -569,7 +569,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
err = devm_spi_register_master(&pdev->dev, master);
err = spi_register_master(master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_clk_disable;
@ -593,6 +593,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
bcm2835aux_debugfs_remove(bs);
spi_unregister_master(master);
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */

View File

@ -128,12 +128,20 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
bool cs_high = !!(spi->mode & SPI_CS_HIGH);
/* Chip select logic is inverted from spi_set_cs() */
if (chip && chip->cs_control)
chip->cs_control(!enable);
if (!enable)
/*
* DW SPI controller demands any native CS being set in order to
* proceed with data transfer. So in order to activate the SPI
* communications we must set a corresponding bit in the Slave
* Enable register no matter whether the SPI core is configured to
* support active-high or active-low CS level.
*/
if (cs_high == enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
@ -524,7 +532,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
}
}
ret = devm_spi_register_controller(dev, master);
ret = spi_register_controller(master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_dma_exit;
@ -548,6 +556,8 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
spi_unregister_controller(dws->master);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);

View File

@ -1880,7 +1880,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = devm_spi_register_controller(&pdev->dev, controller);
status = spi_register_controller(controller);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi controller\n");
goto out_error_pm_runtime_enabled;
@ -1889,7 +1889,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return status;
out_error_pm_runtime_enabled:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
out_error_clock_enabled:
@ -1916,6 +1915,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
spi_unregister_controller(drv_data->controller);
/* Disable the SSP at the peripheral and SOC level */
pxa2xx_spi_write(drv_data, SSCR0, 0);
clk_disable_unprepare(ssp->clk);

View File

@ -2581,6 +2581,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
struct spi_controller *found;
int id = ctlr->bus_num;
device_for_each_child(&ctlr->dev, NULL, __unregister);
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
found = idr_find(&spi_master_idr, id);
@ -2593,7 +2595,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
list_del(&ctlr->list);
mutex_unlock(&board_lock);
device_for_each_child(&ctlr->dev, NULL, __unregister);
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);

View File

@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
for (i = 0; i < 256; i++)
vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
fallthrough;
case FB_BLANK_UNBLANK:
if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)

View File

@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
memsize=par->mach->mem->size;
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
vfree(par->saved_extmem);
par->saved_extmem = NULL;
}
if (par->saved_intmem) {
memsize=MEM_INT_SIZE;
@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
else
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
vfree(par->saved_intmem);
par->saved_intmem = NULL;
}
}

View File

@ -177,6 +177,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
wdog->timeout = DEFAULT_TIMEOUT;
watchdog_init_timeout(wdog, 0, dev);
ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout);
if (ret)
return ret;
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);

View File

@ -1087,7 +1087,8 @@ static void set_backend_state(struct xenbus_device *dev,
case XenbusStateInitialised:
switch (state) {
case XenbusStateConnected:
backend_connect(dev);
if (backend_connect(dev))
return;
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosing:

View File

@ -176,6 +176,7 @@ struct fsync_iocb {
struct file *file;
struct work_struct work;
bool datasync;
struct cred *creds;
};
struct poll_iocb {
@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
const struct cred *old_cred = override_creds(iocb->fsync.creds);
iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
revert_creds(old_cred);
put_cred(iocb->fsync.creds);
iocb_put(iocb);
}
@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
if (unlikely(!req->file->f_op->fsync))
return -EINVAL;
req->creds = prepare_creds();
if (!req->creds)
return -ENOMEM;
req->datasync = datasync;
INIT_WORK(&req->work, aio_fsync_work);
schedule_work(&req->work);

View File

@ -2747,7 +2747,9 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
* response size smaller.
*/
req->MaxOutputResponse = cpu_to_le32(max_response_size);
req->sync_hdr.CreditCharge =
cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
SMB2_MAX_BUFFER_SIZE));
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else

View File

@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b,
goto out;
}
if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus number of FAT sectors");
goto out;
}
error = 0;
out:

View File

@ -504,12 +504,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
unsigned int bsize = sdp->sd_sb.sb_bsize, off;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int max_bio_size = 2 * 1024 * 1024;
unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
struct gfs2_journal_extent *je;
int sz, ret = 0;
struct bio *bio = NULL;
struct page *page = NULL;
bool bio_chained = false, done = false;
bool done = false;
errseq_t since;
memset(head, 0, sizeof(*head));
@ -532,10 +532,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
off = 0;
}
if (!bio || (bio_chained && !off) ||
bio->bi_iter.bi_size >= max_bio_size) {
/* start new bio */
} else {
if (bio && (off || block < blocks_submitted + max_blocks)) {
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
@ -548,19 +545,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
(PAGE_SIZE - off) >> bsize_shift;
bio = gfs2_chain_bio(bio, blocks);
bio_chained = true;
goto add_block_to_new_bio;
}
}
if (bio) {
blocks_submitted = block + 1;
blocks_submitted = block;
submit_bio(bio);
}
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
bio_chained = false;
add_block_to_new_bio:
sz = bio_add_page(bio, page, bsize, off);
BUG_ON(sz != bsize);
@ -568,7 +563,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
off += bsize;
if (off == PAGE_SIZE)
page = NULL;
if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
if (blocks_submitted <= blocks_read + max_blocks) {
/* Keep at least one bio in flight */
continue;
}

View File

@ -3498,8 +3498,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
ret = 0;
if (!pages || nr_pages > got_pages) {
kfree(vmas);
kfree(pages);
kvfree(vmas);
kvfree(pages);
pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
vmas = kvmalloc_array(nr_pages,

View File

@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
if (!nilfs->ns_writer)
return -ENOMEM;
inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
err = nilfs_segctor_start_thread(nilfs->ns_writer);
if (err) {
kfree(nilfs->ns_writer);

View File

@ -171,6 +171,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
/* Apply ignore mask regardless of ISDIR and ON_CHILD flags */
marks_ignored_mask |= mark->ignored_mask;
/*
* If the event is on dir and this mark doesn't care about
* events on dir, don't send it!
@ -188,7 +192,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
continue;
marks_mask |= mark->mask;
marks_ignored_mask |= mark->ignored_mask;
}
test_mask = event_mask & marks_mask & ~marks_ignored_mask;

Some files were not shown because too many files have changed in this diff Show More