This is the 5.10.154 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmNtMXwACgkQONu9yGCS
 aT6WGQ/+JYZ1c0t82DJB9YhWhou6XbXVhjxm/9gvU4bm41Jb0+SJ9H6ytckJiYUE
 umBf9LzEXUmTmkRQ4ScNhXxrp3xIN4yw/KfLa8h8fCuQJi1LEqMKZ9F5TFE8qBid
 quYir2XgJTdJFkt3/8eyErgSrRHsPZwK1ZCLOSuhn9AdXKrgjbYZZxgYUmnLPQCb
 txchYV+7ThGOQyZL4LWjE29/iE80xSzrRSdcVNuLKLXgHwPvm+jpo18NR4abkhNb
 jNBPIlkx+TZ5lbnX3uMVS/ir+N6AqxIgSHBOZye0ANQr54NgXUPPANLf6yf0677S
 Wjmci8gd289JwPtfBmIWt4VjW3AUcNKE7RrNVKmvk/7qXoNMr7SgzNQmbAnEnzYR
 sl+hyla7IGtIsKycxSbkqIZxDGAVZZLc3WoE75vyE/tHfI+rJXF+GCZfU9jNgHrR
 jYx/LIXe/6MC7g7oxgIkWmoihu280AvIRRz90kfzohUXO14Qcdvhta9wlU1nfA6i
 l8HWKSs1Ayo2QQi6kfCjQiGCHS6vS8uJc71kPk9Qu6/YKR2mknve27mkfujVaqWD
 mmY0M5Tz1EgP+Cu3tCpjVJLHliY3+k91Qo7/dafLxfR7rSetLoIJVp74Zxb9MKkz
 S8MDUZHUW8SctXRaBZQrgEAnXeIm38PgkMEuucYUWA7Wvbnj6WE=
 =SHve
 -----END PGP SIGNATURE-----

Merge 5.10.154 into android12-5.10-lts

Changes in 5.10.154
	serial: 8250: Let drivers request full 16550A feature probing
	serial: ar933x: Deassert Transmit Enable on ->rs485_config()
	KVM: nVMX: Pull KVM L0's desired controls directly from vmcs01
	KVM: nVMX: Don't propagate vmcs12's PERF_GLOBAL_CTRL settings to vmcs02
	KVM: x86: Trace re-injected exceptions
	KVM: x86: Treat #DBs from the emulator as fault-like (code and DR7.GD=1)
	x86/topology: Set cpu_die_id only if DIE_TYPE found
	x86/topology: Fix multiple packages shown on a single-package system
	x86/topology: Fix duplicated core ID within a package
	KVM: x86: Protect the unused bits in MSR exiting flags
	KVM: x86: Copy filter arg outside kvm_vm_ioctl_set_msr_filter()
	KVM: x86: Add compat handler for KVM_X86_SET_MSR_FILTER
	RDMA/cma: Use output interface for net_dev check
	IB/hfi1: Correctly move list in sc_disable()
	NFSv4: Fix a potential state reclaim deadlock
	NFSv4.1: Handle RECLAIM_COMPLETE trunking errors
	NFSv4.1: We must always send RECLAIM_COMPLETE after a reboot
	nfs4: Fix kmemleak when allocate slot failed
	net: dsa: Fix possible memory leaks in dsa_loop_init()
	RDMA/core: Fix null-ptr-deref in ib_core_cleanup()
	RDMA/qedr: clean up work queue on failure in qedr_alloc_resources()
	nfc: fdp: drop ftrace-like debugging messages
	nfc: fdp: Fix potential memory leak in fdp_nci_send()
	NFC: nxp-nci: remove unnecessary labels
	nfc: nxp-nci: Fix potential memory leak in nxp_nci_send()
	nfc: s3fwrn5: Fix potential memory leak in s3fwrn5_nci_send()
	nfc: nfcmrvl: Fix potential memory leak in nfcmrvl_i2c_nci_send()
	net: fec: fix improper use of NETDEV_TX_BUSY
	ata: pata_legacy: fix pdc20230_set_piomode()
	net: sched: Fix use after free in red_enqueue()
	net: tun: fix bugs for oversize packet when napi frags enabled
	netfilter: nf_tables: release flow rule object from commit path
	ipvs: use explicitly signed chars
	ipvs: fix WARNING in __ip_vs_cleanup_batch()
	ipvs: fix WARNING in ip_vs_app_net_cleanup()
	rose: Fix NULL pointer dereference in rose_send_frame()
	mISDN: fix possible memory leak in mISDN_register_device()
	isdn: mISDN: netjet: fix wrong check of device registration
	btrfs: fix inode list leak during backref walking at resolve_indirect_refs()
	btrfs: fix inode list leak during backref walking at find_parent_nodes()
	btrfs: fix ulist leaks in error paths of qgroup self tests
	Bluetooth: L2CAP: Fix use-after-free caused by l2cap_reassemble_sdu
	Bluetooth: L2CAP: fix use-after-free in l2cap_conn_del()
	net: mdio: fix undefined behavior in bit shift for __mdiobus_register
	net, neigh: Fix null-ptr-deref in neigh_table_clear()
	ipv6: fix WARNING in ip6_route_net_exit_late()
	drm/msm/hdmi: Remove spurious IRQF_ONESHOT flag
	drm/msm/hdmi: fix IRQ lifetime
	mmc: sdhci-esdhc-imx: Propagate ESDHC_FLAG_HS400* only on 8bit bus
	mmc: sdhci-pci: Avoid comma separated statements
	mmc: sdhci-pci-core: Disable ES for ASUS BIOS on Jasper Lake
	video/fbdev/stifb: Implement the stifb_fillrect() function
	fbdev: stifb: Fall back to cfb_fillrect() on 32-bit HCRX cards
	mtd: parsers: bcm47xxpart: print correct offset on read error
	mtd: parsers: bcm47xxpart: Fix halfblock reads
	xhci-pci: Set runtime PM as default policy on all xHC 1.2 or later devices
	s390/boot: add secure boot trailer
	media: rkisp1: Initialize color space on resizer sink and source pads
	media: rkisp1: Zero v4l2_subdev_format fields in when validating links
	media: s5p_cec: limit msg.len to CEC_MAX_MSG_SIZE
	media: cros-ec-cec: limit msg.len to CEC_MAX_MSG_SIZE
	media: dvb-frontends/drxk: initialize err to 0
	media: meson: vdec: fix possible refcount leak in vdec_probe()
	ACPI: APEI: Fix integer overflow in ghes_estatus_pool_init()
	scsi: core: Restrict legal sdev_state transitions via sysfs
	HID: saitek: add madcatz variant of MMO7 mouse device ID
	drm/amdgpu: set vm_update_mode=0 as default for Sienna Cichlid in SRIOV case
	i2c: xiic: Add platform module alias
	efi/tpm: Pass correct address to memblock_reserve
	ARM: dts: imx6qdl-gw59{10,13}: fix user pushbutton GPIO offset
	firmware: arm_scmi: Suppress the driver's bind attributes
	firmware: arm_scmi: Make Rx chan_setup fail on memory errors
	arm64: dts: juno: Add thermal critical trip points
	i2c: piix4: Fix adapter not be removed in piix4_remove()
	Bluetooth: L2CAP: Fix accepting connection request for invalid SPSM
	Bluetooth: L2CAP: Fix attempting to access uninitialized memory
	block, bfq: protect 'bfqd->queued' by 'bfqd->lock'
	ALSA: usb-audio: Add quirks for MacroSilicon MS2100/MS2106 devices
	fscrypt: simplify master key locking
	fscrypt: stop using keyrings subsystem for fscrypt_master_key
	fscrypt: fix keyring memory leak on mount failure
	tcp/udp: Fix memory leak in ipv6_renew_options().
	mtd: rawnand: gpmi: Set WAIT_FOR_READY timeout based on program/erase times
	memcg: enable accounting of ipc resources
	binder: fix UAF of alloc->vma in race with munmap()
	coresight: cti: Fix hang in cti_disable_hw()
	btrfs: fix type of parameter generation in btrfs_get_dentry
	ftrace: Fix use-after-free for dynamic ftrace_ops
	tcp/udp: Make early_demux back namespacified.
	tracing: kprobe: Fix memory leak in test_gen_kprobe/kretprobe_cmd()
	kprobe: reverse kp->flags when arm_kprobe failed
	tools/nolibc/string: Fix memcmp() implementation
	tracing/histogram: Update document for KEYS_MAX size
	capabilities: fix potential memleak on error path from vfs_getxattr_alloc()
	fuse: add file_modified() to fallocate
	efi: random: reduce seed size to 32 bytes
	efi: random: Use 'ACPI reclaim' memory for random seed
	perf/x86/intel: Fix pebs event constraints for ICL
	perf/x86/intel: Add Cooper Lake stepping to isolation_ucodes[]
	parisc: Make 8250_gsc driver dependend on CONFIG_PARISC
	parisc: Export iosapic_serial_irq() symbol for serial port driver
	parisc: Avoid printing the hardware path twice
	ext4: fix warning in 'ext4_da_release_space'
	ext4: fix BUG_ON() when directory entry has invalid rec_len
	KVM: x86: Mask off reserved bits in CPUID.80000006H
	KVM: x86: Mask off reserved bits in CPUID.8000001AH
	KVM: x86: Mask off reserved bits in CPUID.80000008H
	KVM: x86: Mask off reserved bits in CPUID.80000001H
	KVM: x86: emulator: em_sysexit should update ctxt->mode
	KVM: x86: emulator: introduce emulator_recalc_and_set_mode
	KVM: x86: emulator: update the emulation mode after CR0 write
	ext4,f2fs: fix readahead of verity data
	drm/rockchip: dsi: Force synchronous probe
	drm/i915/sdvo: Filter out invalid outputs more sensibly
	drm/i915/sdvo: Setup DDC fully before output init
	wifi: brcmfmac: Fix potential buffer overflow in brcmf_fweh_event_worker()
	ipc: remove memcg accounting for sops objects in do_semtimedop()
	Linux 5.10.154

Change-Id: I6965878bf3bad857fbdbcdeb7dd066cc280aa026
Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
Eric Biggers 2022-11-29 23:34:04 +00:00
commit f466ca1247
123 changed files with 1286 additions and 825 deletions

View File

@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
will use the event's kernel stacktrace as the key. The keywords
'keys' or 'key' can be used to specify keys, and the keywords
'values', 'vals', or 'val' can be used to specify values. Compound
keys consisting of up to two fields can be specified by the 'keys'
keys consisting of up to three fields can be specified by the 'keys'
keyword. Hashing a compound key produces a unique entry in the
table for each unique combination of component keys, and can be
useful for providing more fine-grained summaries of event data.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 153
SUBLEVEL = 154
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -31,7 +31,7 @@ gpio-keys {
user-pb {
label = "user_pb";
gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
linux,code = <BTN_0>;
};

View File

@ -28,7 +28,7 @@ gpio-keys {
user-pb {
label = "user_pb";
gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
linux,code = <BTN_0>;
};

View File

@ -595,12 +595,26 @@ pmic {
polling-delay = <1000>;
polling-delay-passive = <100>;
thermal-sensors = <&scpi_sensors0 0>;
trips {
pmic_crit0: trip0 {
temperature = <90000>;
hysteresis = <2000>;
type = "critical";
};
};
};
soc {
polling-delay = <1000>;
polling-delay-passive = <100>;
thermal-sensors = <&scpi_sensors0 3>;
trips {
soc_crit0: trip0 {
temperature = <80000>;
hysteresis = <2000>;
type = "critical";
};
};
};
big_cluster_thermal_zone: big-cluster {

View File

@ -10,12 +10,12 @@
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned long sversion:28;
unsigned short opt;
const char name[80]; /* The hardware description */
};
unsigned int hw_type:8; /* HPHW_xxx */
unsigned int hversion:12;
unsigned int sversion:12;
unsigned char opt;
unsigned char name[59]; /* The hardware description */
} __packed;
struct parisc_device;

View File

@ -883,15 +883,13 @@ void __init walk_central_bus(void)
&root);
}
static void print_parisc_device(struct parisc_device *dev)
static __init void print_parisc_device(struct parisc_device *dev)
{
char hw_path[64];
static int count;
static int count __initdata;
print_pa_hwpath(dev, hw_path);
pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
if (dev->num_addrs) {
int k;
@ -1080,7 +1078,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
static int print_one_device(struct device * dev, void * data)
static __init int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);

View File

@ -91,8 +91,17 @@ SECTIONS
_compressed_start = .;
*(.vmlinux.bin.compressed)
_compressed_end = .;
FILL(0xff);
. = ALIGN(4096);
}
#define SB_TRAILER_SIZE 32
/* Trailer needed for Secure Boot */
. += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
. = ALIGN(4096) - SB_TRAILER_SIZE;
.sb.trailer : {
QUAD(0)
QUAD(0)
QUAD(0)
QUAD(0x000000207a49504c)
}
_end = .;

View File

@ -4412,6 +4412,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),

View File

@ -855,8 +855,13 @@ struct event_constraint intel_icl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */

View File

@ -96,6 +96,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings;
unsigned int pkg_mask_width;
bool die_level_present = false;
int leaf;
leaf = detect_extended_topology_leaf(c);
@ -110,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
sub_index = 1;
do {
while (true) {
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
/*
@ -126,23 +128,33 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) {
die_level_present = true;
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
sub_index++;
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
else
break;
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
sub_index++;
}
core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
die_select_mask = (~(-1 << die_plus_mask_width)) >>
core_plus_mask_width;
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
ht_mask_width) & core_select_mask;
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
core_plus_mask_width) & die_select_mask;
if (die_level_present) {
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
core_plus_mask_width) & die_select_mask;
}
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width);
pkg_mask_width);
/*
* Reinit the apicid, now that we have extended initial_apicid.
*/

View File

@ -813,11 +813,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = min(entry->eax, 0x8000001f);
break;
case 0x80000001:
entry->ebx &= ~GENMASK(27, 16);
cpuid_entry_override(entry, CPUID_8000_0001_EDX);
cpuid_entry_override(entry, CPUID_8000_0001_ECX);
break;
case 0x80000006:
/* L2 cache and TLB: pass through host info. */
/* Drop reserved bits, pass host L2 cache and TLB info. */
entry->edx &= ~GENMASK(17, 16);
break;
case 0x80000007: /* Advanced power management */
/* invariant TSC is CPUID.80000007H:EDX[8] */
@ -840,6 +842,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
break;
@ -859,6 +862,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->ecx = entry->edx = 0;
break;
case 0x8000001a:
entry->eax &= GENMASK(2, 0);
entry->ebx = entry->ecx = entry->edx = 0;
break;
case 0x8000001e:
break;
/* Support memory encryption cpuid if host supports it */

View File

@ -796,8 +796,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
{
ulong linear;
int rc;
@ -807,41 +806,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
{
return assign_eip(ctxt, dst, ctxt->mode);
u64 efer;
struct desc_struct cs;
u16 selector;
u32 base3;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
/* Real mode. cpu must not have long mode active */
if (efer & EFER_LMA)
return X86EMUL_UNHANDLEABLE;
ctxt->mode = X86EMUL_MODE_REAL;
return X86EMUL_CONTINUE;
}
if (ctxt->eflags & X86_EFLAGS_VM) {
/* Protected/VM86 mode. cpu must not have long mode active */
if (efer & EFER_LMA)
return X86EMUL_UNHANDLEABLE;
ctxt->mode = X86EMUL_MODE_VM86;
return X86EMUL_CONTINUE;
}
if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
return X86EMUL_UNHANDLEABLE;
if (efer & EFER_LMA) {
if (cs.l) {
/* Proper long mode */
ctxt->mode = X86EMUL_MODE_PROT64;
} else if (cs.d) {
/* 32 bit compatibility mode*/
ctxt->mode = X86EMUL_MODE_PROT32;
} else {
ctxt->mode = X86EMUL_MODE_PROT16;
}
} else {
/* Legacy 32 bit / 16 bit mode */
ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
}
return X86EMUL_CONTINUE;
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
return assign_eip(ctxt, dst);
}
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
{
int rc = emulator_recalc_and_set_mode(ctxt);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip(ctxt, dst);
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@ -2256,7 +2285,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
rc = assign_eip_far(ctxt, ctxt->src.val);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@ -2337,7 +2366,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
rc = assign_eip_far(ctxt, eip);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@ -2957,6 +2986,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
ctxt->mode = usermode;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
@ -3553,7 +3583,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
rc = assign_eip_far(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
goto fail;
@ -3695,11 +3725,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
int cr_num = ctxt->modrm_reg;
int r;
if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
if (cr_num == 0) {
/*
* CR0 write might have updated CR0.PE and/or CR0.PG
* which can affect the cpu's execution mode.
*/
r = emulator_recalc_and_set_mode(ctxt);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}

View File

@ -304,25 +304,29 @@ TRACE_EVENT(kvm_inj_virq,
* Tracepoint for kvm interrupt injection:
*/
TRACE_EVENT(kvm_inj_exception,
TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
TP_ARGS(exception, has_error, error_code),
TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
bool reinjected),
TP_ARGS(exception, has_error, error_code, reinjected),
TP_STRUCT__entry(
__field( u8, exception )
__field( u8, has_error )
__field( u32, error_code )
__field( bool, reinjected )
),
TP_fast_assign(
__entry->exception = exception;
__entry->has_error = has_error;
__entry->error_code = error_code;
__entry->reinjected = reinjected;
),
TP_printk("%s (0x%x)",
TP_printk("%s (0x%x)%s",
__print_symbolic(__entry->exception, kvm_trace_sym_exc),
/* FIXME: don't print error_code if not present */
__entry->has_error ? __entry->error_code : 0)
__entry->has_error ? __entry->error_code : 0,
__entry->reinjected ? " [reinjected]" : "")
);
/*

View File

@ -2232,7 +2232,8 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
}
}
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
struct vmcs12 *vmcs12)
{
u32 exec_control, vmcs12_exec_ctrl;
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
@ -2243,7 +2244,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
/*
* PIN CONTROLS
*/
exec_control = vmx_pin_based_exec_ctrl(vmx);
exec_control = __pin_controls_get(vmcs01);
exec_control |= (vmcs12->pin_based_vm_exec_control &
~PIN_BASED_VMX_PREEMPTION_TIMER);
@ -2258,7 +2259,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
/*
* EXEC CONTROLS
*/
exec_control = vmx_exec_control(vmx); /* L0's desires */
exec_control = __exec_controls_get(vmcs01); /* L0's desires */
exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
exec_control &= ~CPU_BASED_TPR_SHADOW;
@ -2295,17 +2296,20 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* SECONDARY EXEC CONTROLS
*/
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx->secondary_exec_control;
exec_control = __secondary_exec_controls_get(vmcs01);
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_ENABLE_RDTSCP |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_ENABLE_VMFUNC);
SECONDARY_EXEC_ENABLE_VMFUNC |
SECONDARY_EXEC_DESC);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
@ -2341,9 +2345,15 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
* on the related bits (if supported by the CPU) in the hope that
* we can avoid VMWrites during vmx_set_efer().
*
* Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
* loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
* do the same for L2.
*/
exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
exec_control = __vm_entry_controls_get(vmcs01);
exec_control |= (vmcs12->vm_entry_controls &
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
if (cpu_has_load_ia32_efer()) {
if (guest_efer & EFER_LMA)
exec_control |= VM_ENTRY_IA32E_MODE;
@ -2359,9 +2369,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
* bits may be modified by vmx_set_efer() in prepare_vmcs02().
*/
exec_control = vmx_vmexit_ctrl();
exec_control = __vm_exit_controls_get(vmcs01);
if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
exec_control |= VM_EXIT_LOAD_IA32_EFER;
else
exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
vm_exit_controls_set(vmx, exec_control);
/*
@ -3370,7 +3382,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
prepare_vmcs02_early(vmx, vmcs12);
prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
if (from_vmentry) {
if (unlikely(!nested_get_vmcs12_pages(vcpu))) {

View File

@ -386,9 +386,13 @@ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
vmx->loaded_vmcs->controls_shadow.lname = val; \
} \
} \
static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
{ \
return vmcs->controls_shadow.lname; \
} \
static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
{ \
return vmx->loaded_vmcs->controls_shadow.lname; \
return __##lname##_controls_get(vmx->loaded_vmcs); \
} \
static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
{ \

View File

@ -459,6 +459,7 @@ static int exception_class(int vector)
#define EXCPT_TRAP 1
#define EXCPT_ABORT 2
#define EXCPT_INTERRUPT 3
#define EXCPT_DB 4
static int exception_type(int vector)
{
@ -469,8 +470,14 @@ static int exception_type(int vector)
mask = 1 << vector;
/* #DB is trap, as instruction watchpoints are handled elsewhere */
if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
/*
* #DBs can be trap-like or fault-like, the caller must check other CPU
* state, e.g. DR6, to determine whether a #DB is a trap or fault.
*/
if (mask & (1 << DB_VECTOR))
return EXCPT_DB;
if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
return EXCPT_TRAP;
if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
@ -5353,6 +5360,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
break;
case KVM_CAP_X86_USER_SPACE_MSR:
r = -EINVAL;
if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
KVM_MSR_EXIT_REASON_UNKNOWN |
KVM_MSR_EXIT_REASON_FILTER))
break;
kvm->arch.user_space_msr_mask = cap->args[0];
r = 0;
break;
@ -5434,23 +5446,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
return r;
}
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
struct kvm_msr_filter *filter)
{
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_x86_msr_filter *new_filter, *old_filter;
struct kvm_msr_filter filter;
bool default_allow;
bool empty = true;
int r = 0;
u32 i;
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
return -EFAULT;
if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
empty &= !filter.ranges[i].nmsrs;
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
empty &= !filter->ranges[i].nmsrs;
default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
if (empty && !default_allow)
return -EINVAL;
@ -5458,8 +5469,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
if (!new_filter)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
if (r) {
kvm_free_msr_filter(new_filter);
return r;
@ -5482,6 +5493,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
return 0;
}
#ifdef CONFIG_KVM_COMPAT
/* for KVM_X86_SET_MSR_FILTER */
struct kvm_msr_filter_range_compat {
__u32 flags;
__u32 nmsrs;
__u32 base;
__u32 bitmap;
};
struct kvm_msr_filter_compat {
__u32 flags;
struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
};
#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct kvm *kvm = filp->private_data;
long r = -ENOTTY;
switch (ioctl) {
case KVM_X86_SET_MSR_FILTER_COMPAT: {
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_msr_filter_compat filter_compat;
struct kvm_msr_filter filter;
int i;
if (copy_from_user(&filter_compat, user_msr_filter,
sizeof(filter_compat)))
return -EFAULT;
filter.flags = filter_compat.flags;
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
struct kvm_msr_filter_range_compat *cr;
cr = &filter_compat.ranges[i];
filter.ranges[i] = (struct kvm_msr_filter_range) {
.flags = cr->flags,
.nmsrs = cr->nmsrs,
.base = cr->base,
.bitmap = (__u8 *)(ulong)cr->bitmap,
};
}
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
break;
}
}
return r;
}
#endif
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@ -5788,9 +5855,16 @@ long kvm_arch_vm_ioctl(struct file *filp,
case KVM_SET_PMU_EVENT_FILTER:
r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
break;
case KVM_X86_SET_MSR_FILTER:
r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
case KVM_X86_SET_MSR_FILTER: {
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_msr_filter filter;
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
return -EFAULT;
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
break;
}
default:
r = -ENOTTY;
}
@ -7560,6 +7634,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
/*
* Note, EXCPT_DB is assumed to be fault-like as the emulator
* only supports code breakpoints and general detect #DB, both
* of which are fault-like.
*/
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
kvm_rip_write(vcpu, ctxt->eip);
@ -8347,6 +8427,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
trace_kvm_inj_exception(vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
vcpu->arch.exception.injected);
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
vcpu->arch.exception.error_code = false;
kvm_x86_ops.queue_exception(vcpu);
@ -8404,13 +8489,16 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
/* try to inject new event if pending */
if (vcpu->arch.exception.pending) {
trace_kvm_inj_exception(vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code);
vcpu->arch.exception.pending = false;
vcpu->arch.exception.injected = true;
/*
* Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
* value pushed on the stack. Trap-like exception and all #DBs
* leave RF as-is (KVM follows Intel's behavior in this regard;
* AMD states that code breakpoint #DBs excplitly clear RF=0).
*
* Note, most versions of Intel's SDM and AMD's APM incorrectly
* describe the behavior of General Detect #DBs, which are
* fault-like. They do _not_ set RF, a la code breakpoints.
*/
if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
@ -8424,6 +8512,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
}
kvm_inject_exception(vcpu);
vcpu->arch.exception.pending = false;
vcpu->arch.exception.injected = true;
can_inject = false;
}

View File

@ -421,6 +421,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
*/
void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
lockdep_assert_held(&bfqd->lock);
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
blk_mq_run_hw_queues(bfqd->queue, true);
@ -6264,8 +6266,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_bfqq_expire(bfqd, bfqq, true, reason);
schedule_dispatch:
spin_unlock_irqrestore(&bfqd->lock, flags);
bfq_schedule_dispatch(bfqd);
spin_unlock_irqrestore(&bfqd->lock, flags);
}
/*

View File

@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
clear_fixmap(fixmap_idx);
}
int ghes_estatus_pool_init(int num_ghes)
int ghes_estatus_pool_init(unsigned int num_ghes)
{
unsigned long addr, len;
int rc;

View File

@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
mmap_read_lock(mm);
mmap_write_lock(mm);
vma = alloc->vma;
}
@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return 0;
@ -304,7 +304,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
err_no_vma:
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;

View File

@ -278,9 +278,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
rt &= 0x07 << (3 * adev->devno);
rt &= ~(0x07 << (3 * !adev->devno));
if (pio)
rt |= (1 + 3 * pio) << (3 * adev->devno);
rt |= (1 + 3 * pio) << (3 * !adev->devno);
outb(rt, 0x1F3);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);

View File

@ -1126,8 +1126,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
int ret = scmi_chan_setup(info, dev, prot_id, true);
if (!ret) /* Rx is optional, hence no error check */
scmi_chan_setup(info, dev, prot_id, false);
if (!ret) {
/* Rx is optional, report only memory errors */
ret = scmi_chan_setup(info, dev, prot_id, false);
if (ret && ret != -ENOMEM)
ret = 0;
}
return ret;
}
@ -1608,6 +1612,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match);
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.suppress_bind_attrs = true,
.of_match_table = scmi_of_match,
.dev_groups = versions_groups,
},

View File

@ -590,7 +590,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
seed = early_memremap(efi_rng_seed, sizeof(*seed));
if (seed != NULL) {
size = READ_ONCE(seed->size);
size = min(seed->size, EFI_RANDOM_SEED_SIZE);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");

View File

@ -75,7 +75,12 @@ efi_status_t efi_random_get_seed(void)
if (status != EFI_SUCCESS)
return status;
status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
/*
* Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
* allocation will survive a kexec reboot (although we refresh the seed
* beforehand)
*/
status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
(void **)&seed);
if (status != EFI_SUCCESS)

View File

@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void)
goto out_calc;
}
memblock_reserve((unsigned long)final_tbl,
memblock_reserve(efi.tpm_final_log,
tbl_size + sizeof(*final_tbl));
efi_tpm_final_log_size = tbl_size;

View File

@ -670,6 +670,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
/* VF MMIO access (except mailbox range) from CPU
* will be blocked during sriov runtime
*/
adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {

View File

@ -31,6 +31,7 @@
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
/* all asic after AI use this offset */
#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
@ -241,6 +242,9 @@ struct amdgpu_virt {
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
#define amdgpu_sriov_vf_mmio_access_protection(adev) \
((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
static inline bool is_virtual_machine(void)
{
#ifdef CONFIG_X86

View File

@ -3200,7 +3200,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
/* For asic with VF MMIO access protection
* avoid using CPU for VM table updates
*/
if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!amdgpu_sriov_vf_mmio_access_protection(adev))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else

View File

@ -2760,13 +2760,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_connector)
return false;
if (device == 0) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
@ -2821,7 +2818,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@ -2862,13 +2858,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
if (device == 0) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@ -2898,13 +2891,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
if (device == 0) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@ -2937,16 +2927,39 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return false;
}
static u16 intel_sdvo_filter_output_flags(u16 flags)
{
flags &= SDVO_OUTPUT_MASK;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (!(flags & SDVO_OUTPUT_TMDS0))
flags &= ~SDVO_OUTPUT_TMDS1;
if (!(flags & SDVO_OUTPUT_RGB0))
flags &= ~SDVO_OUTPUT_RGB1;
if (!(flags & SDVO_OUTPUT_LVDS0))
flags &= ~SDVO_OUTPUT_LVDS1;
return flags;
}
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
{
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
flags = intel_sdvo_filter_output_flags(flags);
intel_sdvo->controlled_output = flags;
intel_sdvo_select_ddc_bus(i915, intel_sdvo);
if (flags & SDVO_OUTPUT_TMDS0)
if (!intel_sdvo_dvi_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
if (flags & SDVO_OUTPUT_TMDS1)
if (!intel_sdvo_dvi_init(intel_sdvo, 1))
return false;
@ -2967,7 +2980,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
if (flags & SDVO_OUTPUT_RGB1)
if (!intel_sdvo_analog_init(intel_sdvo, 1))
return false;
@ -2975,14 +2988,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
if (!intel_sdvo_lvds_init(intel_sdvo, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
if (flags & SDVO_OUTPUT_LVDS1)
if (!intel_sdvo_lvds_init(intel_sdvo, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
if (flags == 0) {
unsigned char bytes[2];
intel_sdvo->controlled_output = 0;
memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(intel_sdvo),
@ -3394,8 +3406,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
*/
intel_sdvo->base.cloneable = 0;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
goto err_output;

View File

@ -326,8 +326,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
ret = devm_request_irq(&pdev->dev, hdmi->irq,
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
ret = devm_request_irq(dev->dev, hdmi->irq,
msm_hdmi_irq, IRQF_TRIGGER_HIGH,
"hdmi_isr", hdmi);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",

View File

@ -1286,5 +1286,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
.pm = &dw_mipi_dsi_rockchip_pm_ops,
.name = "dw-mipi-dsi-rockchip",
/*
* For dual-DSI display, one DSI pokes at the other DSI's
* drvdata in dw_mipi_dsi_rockchip_find_second(). This is not
* safe for asynchronous probe.
*/
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};

View File

@ -827,6 +827,7 @@
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
#define USB_DEVICE_ID_MADCATZ_RAT5 0x1705
#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076

View File

@ -609,6 +609,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
#endif
#if IS_ENABLED(CONFIG_HID_SAMSUNG)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },

View File

@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ }
};

View File

@ -90,11 +90,9 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
static int cti_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct device *dev = &drvdata->csdev->dev;
unsigned long flags;
int rc = 0;
pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
/* no need to do anything if enabled or unpowered*/
@ -119,7 +117,6 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
/* cannot enable due to error */
cti_err_not_enabled:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
return rc;
}
@ -153,7 +150,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct device *dev = &drvdata->csdev->dev;
struct coresight_device *csdev = drvdata->csdev;
spin_lock(&drvdata->spinlock);
@ -175,7 +171,6 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
pm_runtime_put(dev->parent);
return 0;
/* not disabled this call */

View File

@ -961,6 +961,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
"", &piix4_main_adapters[0]);
if (retval < 0)
return retval;
piix4_adapter_count = 1;
}
/* Check for auxiliary SMBus on some AMD chipsets */

View File

@ -935,6 +935,7 @@ static struct platform_driver xiic_i2c_driver = {
module_platform_driver(xiic_i2c_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");

View File

@ -1437,7 +1437,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
return false;
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_iif = net_dev->ifindex;
fl4.flowi4_oif = net_dev->ifindex;
fl4.daddr = daddr;
fl4.saddr = saddr;

View File

@ -2759,10 +2759,18 @@ static int __init ib_core_init(void)
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
roce_gid_mgmt_init();
ret = roce_gid_mgmt_init();
if (ret) {
pr_warn("Couldn't init RoCE GID management\n");
goto err_parent;
}
return 0;
err_parent:
rdma_nl_unregister(RDMA_NL_LS);
nldev_exit();
unregister_pernet_device(&rdma_dev_net_ops);
err_compat:
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
err_sa:

View File

@ -2181,7 +2181,7 @@ void __init nldev_init(void)
rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
}
void __exit nldev_exit(void)
void nldev_exit(void)
{
rdma_nl_unregister(RDMA_NL_NLDEV);
}

View File

@ -955,8 +955,7 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
if (!list_empty(&sc->piowait))
list_move(&sc->piowait, &wake_list);
list_splice_init(&sc->piowait, &wake_list);
write_sequnlock(&sc->waitlock);
while (!list_empty(&wake_list)) {
struct iowait *wait;

View File

@ -374,6 +374,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
if (IS_IWARP(dev)) {
xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
if (!dev->iwarp_wq) {
rc = -ENOMEM;
goto err1;
}
}
/* Allocate Status blocks for CNQ */
@ -381,7 +385,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
GFP_KERNEL);
if (!dev->sb_array) {
rc = -ENOMEM;
goto err1;
goto err_destroy_wq;
}
dev->cnq_array = kcalloc(dev->num_cnq,
@ -432,6 +436,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
kfree(dev->cnq_array);
err2:
kfree(dev->sb_array);
err_destroy_wq:
if (IS_IWARP(dev))
destroy_workqueue(dev->iwarp_wq);
err1:
kfree(dev->sgid_tbl);
return rc;

View File

@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card)
}
if (card->irq > 0)
free_irq(card->irq, card);
if (card->isac.dch.dev.dev.class)
if (device_is_registered(&card->isac.dch.dev.dev))
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {

View File

@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev,
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_register %s %d\n",
dev_name(&dev->dev), dev->id);
dev->dev.class = &mISDN_class;
err = create_stack(dev);
if (err)
goto error1;
dev->dev.class = &mISDN_class;
dev->dev.platform_data = dev;
dev->dev.parent = parent;
dev_set_drvdata(&dev->dev, dev);
@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev,
error3:
delete_stack(dev);
return err;
error1:
put_device(&dev->dev);
return err;
}

View File

@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
uint8_t *cec_message = cros_ec->event_data.data.cec_message;
unsigned int len = cros_ec->event_size;
if (len > CEC_MAX_MSG_SIZE)
len = CEC_MAX_MSG_SIZE;
cros_ec_cec->rx_msg.len = len;
memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);

View File

@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
cec->rx = STATE_BUSY;
cec->msg.len = status >> 24;
if (cec->msg.len > CEC_MAX_MSG_SIZE)
cec->msg.len = CEC_MAX_MSG_SIZE;
cec->msg.rx_status = CEC_RX_STATUS_OK;
s5p_cec_get_rx_buf(cec, cec->msg.len,
cec->msg.msg);

View File

@ -6672,7 +6672,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct drxk_state *state = fe->demodulator_priv;
u16 err;
u16 err = 0;
dprintk(1, "\n");

View File

@ -1692,6 +1692,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
}
err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
if (err)
goto disable_ahb_clk;
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
@ -1699,13 +1703,15 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
if (host->caps & MMC_CAP_8_BIT_DATA &&
imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
if (host->caps & MMC_CAP_8_BIT_DATA &&
imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
host->mmc_host_ops.hs400_enhanced_strobe =
esdhc_hs400_enhanced_strobe;
@ -1727,13 +1733,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_ahb_clk;
}
if (of_id)
err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
else
err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
if (err)
goto disable_ahb_clk;
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);

View File

@ -967,6 +967,12 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
dmi_match(DMI_SYS_VENDOR, "IRBIS"));
}
static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
}
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
@ -975,9 +981,11 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->mmc->caps2 |= MMC_CAP2_CQE;
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
if (!jsl_broken_hs400es(slot)) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
}
slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}

View File

@ -653,8 +653,9 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
u16 busy_timeout_cycles;
unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
@ -678,7 +679,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |

View File

@ -233,11 +233,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read middle of the block */
err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read,
(uint8_t *)buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
offset, err);
offset + (blocksize / 2), err);
continue;
}

View File

@ -376,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = {
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
static void dsa_loop_phydevs_unregister(void)
{
unsigned int i;
for (i = 0; i < NUM_FIXED_PHYS; i++)
if (!IS_ERR(phydevs[i])) {
fixed_phy_unregister(phydevs[i]);
phy_device_free(phydevs[i]);
}
}
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
@ -383,23 +394,23 @@ static int __init dsa_loop_init(void)
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
unsigned int i;
unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
return mdio_driver_register(&dsa_loop_drv);
ret = mdio_driver_register(&dsa_loop_drv);
if (ret)
dsa_loop_phydevs_unregister();
return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
unsigned int i;
mdio_driver_unregister(&dsa_loop_drv);
for (i = 0; i < NUM_FIXED_PHYS; i++)
if (!IS_ERR(phydevs[i]))
fixed_phy_unregister(phydevs[i]);
dsa_loop_phydevs_unregister();
}
module_exit(dsa_loop_exit);

View File

@ -623,7 +623,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
bdp->cbd_datlen = cpu_to_fec16(size);
@ -685,7 +685,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
}

View File

@ -574,7 +574,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
}
for (i = 0; i < PHY_MAX_ADDR; i++) {
if ((bus->phy_mask & (1 << i)) == 0) {
if ((bus->phy_mask & BIT(i)) == 0) {
struct phy_device *phydev;
phydev = mdiobus_scan(bus, i);

View File

@ -1475,7 +1475,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int err;
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
return ERR_PTR(-EMSGSIZE);
local_bh_disable();

View File

@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
goto event_free;
}
/* convert event message */
emsg_be = &event->emsg;

View File

@ -238,9 +238,6 @@ static int fdp_nci_open(struct nci_dev *ndev)
{
int r;
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
r = info->phy_ops->enable(info->phy);
@ -249,35 +246,26 @@ static int fdp_nci_open(struct nci_dev *ndev)
static int fdp_nci_close(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
return 0;
}
static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
int ret;
if (atomic_dec_and_test(&info->data_pkt_counter))
info->data_pkt_counter_cb(ndev);
return info->phy_ops->write(info->phy, skb);
}
ret = info->phy_ops->write(info->phy, skb);
if (ret < 0) {
kfree_skb(skb);
return ret;
}
int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
return nci_recv_frame(ndev, skb);
consume_skb(skb);
return 0;
}
EXPORT_SYMBOL(fdp_nci_recv_frame);
static int fdp_nci_request_firmware(struct nci_dev *ndev)
{
@ -489,8 +477,6 @@ static int fdp_nci_setup(struct nci_dev *ndev)
int r;
u8 patched = 0;
dev_dbg(dev, "%s\n", __func__);
r = nci_core_init(ndev);
if (r)
goto error;
@ -598,9 +584,7 @@ static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
info->setup_reset_ntf = 1;
wake_up(&info->setup_wq);
@ -611,9 +595,7 @@ static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
info->setup_patch_ntf = 1;
info->setup_patch_status = skb->data[0];
wake_up(&info->setup_wq);
@ -786,11 +768,6 @@ EXPORT_SYMBOL(fdp_nci_probe);
void fdp_nci_remove(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
dev_dbg(dev, "%s\n", __func__);
nci_unregister_device(ndev);
nci_free_device(ndev);
}

View File

@ -25,6 +25,5 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
void fdp_nci_remove(struct nci_dev *ndev);
int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
#endif /* __LOCAL_FDP_H_ */

View File

@ -49,7 +49,6 @@ static int fdp_nci_i2c_enable(void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
return 0;
@ -59,7 +58,6 @@ static void fdp_nci_i2c_disable(void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
}
@ -197,7 +195,6 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb)
static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
struct i2c_client *client;
struct sk_buff *skb;
int r;
@ -206,9 +203,6 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
client = phy->i2c_dev;
dev_dbg(&client->dev, "%s\n", __func__);
r = fdp_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO)
@ -217,7 +211,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
if (skb != NULL)
fdp_nci_recv_frame(phy->ndev, skb);
nci_recv_frame(phy->ndev, skb);
return IRQ_HANDLED;
}
@ -288,8 +282,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
u32 clock_freq;
int r = 0;
dev_dbg(dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(dev, "No I2C_FUNC_I2C support\n");
return -ENODEV;
@ -351,8 +343,6 @@ static int fdp_nci_i2c_remove(struct i2c_client *client)
{
struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
dev_dbg(&client->dev, "%s\n", __func__);
fdp_nci_remove(phy->ndev);
fdp_nci_i2c_disable(phy);

View File

@ -151,10 +151,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
ret = -EREMOTEIO;
} else
ret = 0;
kfree_skb(skb);
}
return ret;
if (ret) {
kfree_skb(skb);
return ret;
}
consume_skb(skb);
return 0;
}
static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,

View File

@ -70,22 +70,20 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
struct nxp_nci_info *info = nci_get_drvdata(ndev);
int r;
if (!info->phy_ops->write) {
r = -ENOTSUPP;
goto send_exit;
}
if (!info->phy_ops->write)
return -EOPNOTSUPP;
if (info->mode != NXP_NCI_MODE_NCI) {
r = -EINVAL;
goto send_exit;
}
if (info->mode != NXP_NCI_MODE_NCI)
return -EINVAL;
r = info->phy_ops->write(info->phy_id, skb);
if (r < 0)
if (r < 0) {
kfree_skb(skb);
return r;
}
send_exit:
return r;
consume_skb(skb);
return 0;
}
static struct nci_ops nxp_nci_ops = {
@ -104,10 +102,8 @@ int nxp_nci_probe(void *phy_id, struct device *pdev,
int r;
info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL);
if (!info) {
r = -ENOMEM;
goto probe_exit;
}
if (!info)
return -ENOMEM;
info->phy_id = phy_id;
info->pdev = pdev;
@ -120,31 +116,25 @@ int nxp_nci_probe(void *phy_id, struct device *pdev,
if (info->phy_ops->set_mode) {
r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
if (r < 0)
goto probe_exit;
return r;
}
info->mode = NXP_NCI_MODE_COLD;
info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS,
NXP_NCI_HDR_LEN, 0);
if (!info->ndev) {
r = -ENOMEM;
goto probe_exit;
}
if (!info->ndev)
return -ENOMEM;
nci_set_parent_dev(info->ndev, pdev);
nci_set_drvdata(info->ndev, info);
r = nci_register_device(info->ndev);
if (r < 0)
goto probe_exit_free_nci;
if (r < 0) {
nci_free_device(info->ndev);
return r;
}
*ndev = info->ndev;
goto probe_exit;
probe_exit_free_nci:
nci_free_device(info->ndev);
probe_exit:
return r;
}
EXPORT_SYMBOL(nxp_nci_probe);

View File

@ -97,11 +97,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
}
ret = s3fwrn5_write(info, skb);
if (ret < 0)
if (ret < 0) {
kfree_skb(skb);
mutex_unlock(&info->mutex);
return ret;
}
consume_skb(skb);
mutex_unlock(&info->mutex);
return ret;
return 0;
}
static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)

View File

@ -875,6 +875,7 @@ int iosapic_serial_irq(struct parisc_device *dev)
return vi->txn_irq;
}
EXPORT_SYMBOL(iosapic_serial_irq);
#endif

View File

@ -815,6 +815,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
switch (sdev->sdev_state) {
case SDEV_RUNNING:
case SDEV_OFFLINE:
break;
default:
mutex_unlock(&sdev->state_mutex);
return -EINVAL;
}
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
ret = 0;
} else {

View File

@ -1109,6 +1109,7 @@ static int vdec_probe(struct platform_device *pdev)
err_vdev_release:
video_device_release(vdev);
v4l2_device_unregister(&core->v4l2_dev);
return ret;
}
@ -1117,6 +1118,7 @@ static int vdec_remove(struct platform_device *pdev)
struct amvdec_core *core = platform_get_drvdata(pdev);
video_unregister_device(core->vdev_dec);
v4l2_device_unregister(&core->v4l2_dev);
return 0;
}

View File

@ -1258,11 +1258,12 @@ static int rkisp1_capture_link_validate(struct media_link *link)
struct rkisp1_capture *cap = video_get_drvdata(vdev);
const struct rkisp1_capture_fmt_cfg *fmt =
rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat);
struct v4l2_subdev_format sd_fmt;
struct v4l2_subdev_format sd_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = link->source->index,
};
int ret;
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sd_fmt.pad = link->source->index;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;

View File

@ -500,6 +500,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_FMT;
sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK);
sink_crop->width = RKISP1_DEFAULT_WIDTH;

View File

@ -1023,7 +1023,8 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) &&
!(up->port.flags & UPF_FULL_PROBE))
return;
/*

View File

@ -119,7 +119,7 @@ config SERIAL_8250_CONSOLE
config SERIAL_8250_GSC
tristate
depends on SERIAL_8250 && GSC
depends on SERIAL_8250 && PARISC
default SERIAL_8250
config SERIAL_8250_DMA

View File

@ -593,6 +593,11 @@ static int ar933x_config_rs485(struct uart_port *port,
dev_err(port->dev, "RS485 needs rts-gpio\n");
return 1;
}
if (rs485conf->flags & SER_RS485_ENABLED)
gpiod_set_value(up->rts_gpiod,
!!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
port->rs485 = *rs485conf;
return 0;
}

View File

@ -58,24 +58,12 @@
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e
#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 0x161a
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 0x161b
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 0x161d
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
@ -268,12 +256,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI))
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@ -342,15 +325,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
xhci->quirks |= XHCI_NO_SOFT_RETRY;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
(pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)

View File

@ -1041,6 +1041,48 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
SETUP_FB(fb);
}
#define ARTIST_VRAM_SIZE 0x000804
#define ARTIST_VRAM_SRC 0x000808
#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04
#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00
#define ARTIST_SRC_BM_ACCESS 0x018008
#define ARTIST_FGCOLOR 0x018010
#define ARTIST_BGCOLOR 0x018014
#define ARTIST_BITMAP_OP 0x01801c
static void
stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct stifb_info *fb = container_of(info, struct stifb_info, info);
if (rect->rop != ROP_COPY ||
(fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32))
return cfb_fillrect(info, rect);
SETUP_HW(fb);
if (fb->info.var.bits_per_pixel == 32) {
WRITE_WORD(0xBBA0A000, fb, REG_10);
NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
} else {
WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
}
WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP);
WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS);
NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000);
NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color);
WRITE_WORD(0, fb, ARTIST_BGCOLOR);
NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy));
SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height));
SETUP_FB(fb);
}
static void __init
stifb_init_display(struct stifb_info *fb)
{
@ -1105,7 +1147,7 @@ static const struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = cfb_fillrect,
.fb_fillrect = stifb_fillrect,
.fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@ -1297,7 +1339,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
goto out_err0;
}
info->screen_size = fix->smem_len;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */

View File

@ -288,8 +288,10 @@ static void prelim_release(struct preftree *preftree)
struct prelim_ref *ref, *next_ref;
rbtree_postorder_for_each_entry_safe(ref, next_ref,
&preftree->root.rb_root, rbnode)
&preftree->root.rb_root, rbnode) {
free_inode_elem_list(ref->inode_list);
free_pref(ref);
}
preftree->root = RB_ROOT_CACHED;
preftree->count = 0;
@ -647,6 +649,18 @@ unode_aux_to_inode_list(struct ulist_node *node)
return (struct extent_inode_elem *)(uintptr_t)node->aux;
}
static void free_leaf_list(struct ulist *ulist)
{
struct ulist_node *node;
struct ulist_iterator uiter;
ULIST_ITER_INIT(&uiter);
while ((node = ulist_next(ulist, &uiter)))
free_inode_elem_list(unode_aux_to_inode_list(node));
ulist_free(ulist);
}
/*
* We maintain three separate rbtrees: one for direct refs, one for
* indirect refs which have a key, and one for indirect refs which do not
@ -761,7 +775,11 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
cond_resched();
}
out:
ulist_free(parents);
/*
* We may have inode lists attached to refs in the parents ulist, so we
* must free them before freeing the ulist and its refs.
*/
free_leaf_list(parents);
return ret;
}
@ -1372,6 +1390,12 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;
ref->inode_list = eie;
/*
* We transferred the list ownership to the ref,
* so set to NULL to avoid a double free in case
* an error happens after this.
*/
eie = NULL;
}
ret = ulist_add_merge_ptr(refs, ref->parent,
ref->inode_list,
@ -1397,6 +1421,14 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
eie->next = ref->inode_list;
}
eie = NULL;
/*
* We have transferred the inode list ownership from
* this ref to the ref we added to the 'refs' ulist.
* So set this ref's inode list to NULL to avoid
* use-after-free when our caller uses it or double
* frees in case an error happens before we return.
*/
ref->inode_list = NULL;
}
cond_resched();
}
@ -1413,24 +1445,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
return ret;
}
static void free_leaf_list(struct ulist *blocks)
{
struct ulist_node *node = NULL;
struct extent_inode_elem *eie;
struct ulist_iterator uiter;
ULIST_ITER_INIT(&uiter);
while ((node = ulist_next(blocks, &uiter))) {
if (!node->aux)
continue;
eie = unode_aux_to_inode_list(node);
free_inode_elem_list(eie);
node->aux = 0;
}
ulist_free(blocks);
}
/*
* Finds all leafs with a reference to the specified combination of bytenr and
* offset. key_list_head will point to a list of corresponding keys (caller must

View File

@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
}
struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
u64 root_objectid, u32 generation,
u64 root_objectid, u64 generation,
int check_generation)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);

View File

@ -19,7 +19,7 @@ struct btrfs_fid {
} __attribute__ ((packed));
struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
u64 root_objectid, u32 generation,
u64 root_objectid, u64 generation,
int check_generation);
struct dentry *btrfs_get_parent(struct dentry *child);

View File

@ -237,8 +237,10 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FS_TREE_OBJECTID);
if (ret)
if (ret) {
ulist_free(old_roots);
return ret;
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
@ -273,8 +275,10 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
}
ret = remove_extent_item(root, nodesize, nodesize);
if (ret)
if (ret) {
ulist_free(old_roots);
return -EINVAL;
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
@ -338,8 +342,10 @@ static int test_multiple_refs(struct btrfs_root *root,
ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FS_TREE_OBJECTID);
if (ret)
if (ret) {
ulist_free(old_roots);
return ret;
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
@ -373,8 +379,10 @@ static int test_multiple_refs(struct btrfs_root *root,
ret = add_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FIRST_FREE_OBJECTID);
if (ret)
if (ret) {
ulist_free(old_roots);
return ret;
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
@ -414,8 +422,10 @@ static int test_multiple_refs(struct btrfs_root *root,
ret = remove_extent_ref(root, nodesize, nodesize, 0,
BTRFS_FIRST_FREE_OBJECTID);
if (ret)
if (ret) {
ulist_free(old_roots);
return ret;
}
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);

View File

@ -222,7 +222,7 @@ struct fscrypt_info {
* will be NULL if the master key was found in a process-subscribed
* keyring rather than in the filesystem-level keyring.
*/
struct key *ci_master_key;
struct fscrypt_master_key *ci_master_key;
/*
* Link in list of inodes that were unlocked with the master key.
@ -461,6 +461,40 @@ struct fscrypt_master_key_secret {
*/
struct fscrypt_master_key {
/*
* Back-pointer to the super_block of the filesystem to which this
* master key has been added. Only valid if ->mk_active_refs > 0.
*/
struct super_block *mk_sb;
/*
* Link in ->mk_sb->s_master_keys->key_hashtable.
* Only valid if ->mk_active_refs > 0.
*/
struct hlist_node mk_node;
/* Semaphore that protects ->mk_secret and ->mk_users */
struct rw_semaphore mk_sem;
/*
* Active and structural reference counts. An active ref guarantees
* that the struct continues to exist, continues to be in the keyring
* ->mk_sb->s_master_keys, and that any embedded subkeys (e.g.
* ->mk_direct_keys) that have been prepared continue to exist.
* A structural ref only guarantees that the struct continues to exist.
*
* There is one active ref associated with ->mk_secret being present,
* and one active ref for each inode in ->mk_decrypted_inodes.
*
* There is one structural ref associated with the active refcount being
* nonzero. Finding a key in the keyring also takes a structural ref,
* which is then held temporarily while the key is operated on.
*/
refcount_t mk_active_refs;
refcount_t mk_struct_refs;
struct rcu_head mk_rcu_head;
/*
* The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is
* executed, this is wiped and no new inodes can be unlocked with this
@ -469,7 +503,10 @@ struct fscrypt_master_key {
* FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or
* FS_IOC_ADD_ENCRYPTION_KEY can add the secret again.
*
* Locking: protected by this master key's key->sem.
* While ->mk_secret is present, one ref in ->mk_active_refs is held.
*
* Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs
* associated with this field is protected by ->mk_sem as well.
*/
struct fscrypt_master_key_secret mk_secret;
@ -490,22 +527,12 @@ struct fscrypt_master_key {
*
* This is NULL for v1 policy keys; those can only be added by root.
*
* Locking: in addition to this keyring's own semaphore, this is
* protected by this master key's key->sem, so we can do atomic
* search+insert. It can also be searched without taking any locks, but
* in that case the returned key may have already been removed.
* Locking: protected by ->mk_sem. (We don't just rely on the keyrings
* subsystem semaphore ->mk_users->sem, as we need support for atomic
* search+insert along with proper synchronization with ->mk_secret.)
*/
struct key *mk_users;
/*
* Length of ->mk_decrypted_inodes, plus one if mk_secret is present.
* Once this goes to 0, the master key is removed from ->s_master_keys.
* The 'struct fscrypt_master_key' will continue to live as long as the
* 'struct key' whose payload it is, but we won't let this reference
* count rise again.
*/
refcount_t mk_refcount;
/*
* List of inodes that were unlocked using this key. This allows the
* inodes to be evicted efficiently if the key is removed.
@ -531,10 +558,10 @@ static inline bool
is_master_key_secret_present(const struct fscrypt_master_key_secret *secret)
{
/*
* The READ_ONCE() is only necessary for fscrypt_drop_inode() and
* fscrypt_key_describe(). These run in atomic context, so they can't
* take the key semaphore and thus 'secret' can change concurrently
* which would be a data race. But they only need to know whether the
* The READ_ONCE() is only necessary for fscrypt_drop_inode().
* fscrypt_drop_inode() runs in atomic context, so it can't take the key
* semaphore and thus 'secret' can change concurrently which would be a
* data race. But fscrypt_drop_inode() only need to know whether the
* secret *was* present at the time of check, so READ_ONCE() suffices.
*/
return READ_ONCE(secret->size) != 0;
@ -563,7 +590,11 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec)
return 0;
}
struct key *
void fscrypt_put_master_key(struct fscrypt_master_key *mk);
void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk);
struct fscrypt_master_key *
fscrypt_find_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec);

View File

@ -5,8 +5,6 @@
* Encryption hooks for higher-level filesystem operations.
*/
#include <linux/key.h>
#include "fscrypt_private.h"
/**
@ -142,7 +140,6 @@ int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags)
{
struct fscrypt_info *ci;
struct key *key;
struct fscrypt_master_key *mk;
int err;
@ -158,14 +155,13 @@ int fscrypt_prepare_setflags(struct inode *inode,
ci = inode->i_crypt_info;
if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
return -EINVAL;
key = ci->ci_master_key;
mk = key->payload.data[0];
down_read(&key->sem);
mk = ci->ci_master_key;
down_read(&mk->mk_sem);
if (is_master_key_secret_present(&mk->mk_secret))
err = fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
up_read(&key->sem);
up_read(&mk->mk_sem);
return err;
}
return 0;

View File

@ -18,6 +18,7 @@
* information about these ioctls.
*/
#include <asm/unaligned.h>
#include <crypto/skcipher.h>
#include <linux/key-type.h>
#include <linux/random.h>
@ -25,6 +26,18 @@
#include "fscrypt_private.h"
/* The master encryption keys for a filesystem (->s_master_keys) */
struct fscrypt_keyring {
/*
* Lock that protects ->key_hashtable. It does *not* protect the
* fscrypt_master_key structs themselves.
*/
spinlock_t lock;
/* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */
struct hlist_head key_hashtable[128];
};
static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret)
{
fscrypt_destroy_hkdf(&secret->hkdf);
@ -38,20 +51,70 @@ static void move_master_key_secret(struct fscrypt_master_key_secret *dst,
memzero_explicit(src, sizeof(*src));
}
static void free_master_key(struct fscrypt_master_key *mk)
static void fscrypt_free_master_key(struct rcu_head *head)
{
struct fscrypt_master_key *mk =
container_of(head, struct fscrypt_master_key, mk_rcu_head);
/*
* The master key secret and any embedded subkeys should have already
* been wiped when the last active reference to the fscrypt_master_key
* struct was dropped; doing it here would be unnecessarily late.
* Nevertheless, use kfree_sensitive() in case anything was missed.
*/
kfree_sensitive(mk);
}
void fscrypt_put_master_key(struct fscrypt_master_key *mk)
{
if (!refcount_dec_and_test(&mk->mk_struct_refs))
return;
/*
* No structural references left, so free ->mk_users, and also free the
* fscrypt_master_key struct itself after an RCU grace period ensures
* that concurrent keyring lookups can no longer find it.
*/
WARN_ON(refcount_read(&mk->mk_active_refs) != 0);
key_put(mk->mk_users);
mk->mk_users = NULL;
call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key);
}
void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk)
{
struct super_block *sb = mk->mk_sb;
struct fscrypt_keyring *keyring = sb->s_master_keys;
size_t i;
wipe_master_key_secret(&mk->mk_secret);
if (!refcount_dec_and_test(&mk->mk_active_refs))
return;
/*
* No active references left, so complete the full removal of this
* fscrypt_master_key struct by removing it from the keyring and
* destroying any subkeys embedded in it.
*/
spin_lock(&keyring->lock);
hlist_del_rcu(&mk->mk_node);
spin_unlock(&keyring->lock);
/*
* ->mk_active_refs == 0 implies that ->mk_secret is not present and
* that ->mk_decrypted_inodes is empty.
*/
WARN_ON(is_master_key_secret_present(&mk->mk_secret));
WARN_ON(!list_empty(&mk->mk_decrypted_inodes));
for (i = 0; i <= FSCRYPT_MODE_MAX; i++) {
fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]);
fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]);
fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]);
}
memzero_explicit(&mk->mk_ino_hash_key,
sizeof(mk->mk_ino_hash_key));
mk->mk_ino_hash_key_initialized = false;
key_put(mk->mk_users);
kfree_sensitive(mk);
/* Drop the structural ref associated with the active refs. */
fscrypt_put_master_key(mk);
}
static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
@ -61,44 +124,6 @@ static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
return master_key_spec_len(spec) != 0;
}
static int fscrypt_key_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
key->payload.data[0] = (struct fscrypt_master_key *)prep->data;
return 0;
}
static void fscrypt_key_destroy(struct key *key)
{
free_master_key(key->payload.data[0]);
}
static void fscrypt_key_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_positive(key)) {
const struct fscrypt_master_key *mk = key->payload.data[0];
if (!is_master_key_secret_present(&mk->mk_secret))
seq_puts(m, ": secret removed");
}
}
/*
* Type of key in ->s_master_keys. Each key of this type represents a master
* key which has been added to the filesystem. Its payload is a
* 'struct fscrypt_master_key'. The "." prefix in the key type name prevents
* users from adding keys of this type via the keyrings syscalls rather than via
* the intended method of FS_IOC_ADD_ENCRYPTION_KEY.
*/
static struct key_type key_type_fscrypt = {
.name = "._fscrypt",
.instantiate = fscrypt_key_instantiate,
.destroy = fscrypt_key_destroy,
.describe = fscrypt_key_describe,
};
static int fscrypt_user_key_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
@ -131,32 +156,6 @@ static struct key_type key_type_fscrypt_user = {
.describe = fscrypt_user_key_describe,
};
/* Search ->s_master_keys or ->mk_users */
static struct key *search_fscrypt_keyring(struct key *keyring,
struct key_type *type,
const char *description)
{
/*
* We need to mark the keyring reference as "possessed" so that we
* acquire permission to search it, via the KEY_POS_SEARCH permission.
*/
key_ref_t keyref = make_key_ref(keyring, true /* possessed */);
keyref = keyring_search(keyref, type, description, false);
if (IS_ERR(keyref)) {
if (PTR_ERR(keyref) == -EAGAIN || /* not found */
PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
keyref = ERR_PTR(-ENOKEY);
return ERR_CAST(keyref);
}
return key_ref_to_ptr(keyref);
}
#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \
(CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id))
#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1)
#define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \
(CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \
CONST_STRLEN("-users") + 1)
@ -164,21 +163,6 @@ static struct key *search_fscrypt_keyring(struct key *keyring,
#define FSCRYPT_MK_USER_DESCRIPTION_SIZE \
(2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1)
static void format_fs_keyring_description(
char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE],
const struct super_block *sb)
{
sprintf(description, "fscrypt-%s", sb->s_id);
}
static void format_mk_description(
char description[FSCRYPT_MK_DESCRIPTION_SIZE],
const struct fscrypt_key_specifier *mk_spec)
{
sprintf(description, "%*phN",
master_key_spec_len(mk_spec), (u8 *)&mk_spec->u);
}
static void format_mk_users_keyring_description(
char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE],
const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
@ -199,20 +183,15 @@ static void format_mk_user_description(
/* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */
static int allocate_filesystem_keyring(struct super_block *sb)
{
char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE];
struct key *keyring;
struct fscrypt_keyring *keyring;
if (sb->s_master_keys)
return 0;
format_fs_keyring_description(description, sb);
keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
current_cred(), KEY_POS_SEARCH |
KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW,
KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
keyring = kzalloc(sizeof(*keyring), GFP_KERNEL);
if (!keyring)
return -ENOMEM;
spin_lock_init(&keyring->lock);
/*
* Pairs with the smp_load_acquire() in fscrypt_find_master_key().
* I.e., here we publish ->s_master_keys with a RELEASE barrier so that
@ -222,21 +201,80 @@ static int allocate_filesystem_keyring(struct super_block *sb)
return 0;
}
void fscrypt_sb_free(struct super_block *sb)
/*
* Release all encryption keys that have been added to the filesystem, along
* with the keyring that contains them.
*
* This is called at unmount time. The filesystem's underlying block device(s)
* are still available at this time; this is important because after user file
* accesses have been allowed, this function may need to evict keys from the
* keyslots of an inline crypto engine, which requires the block device(s).
*
* This is also called when the super_block is being freed. This is needed to
* avoid a memory leak if mounting fails after the "test_dummy_encryption"
* option was processed, as in that case the unmount-time call isn't made.
*/
void fscrypt_destroy_keyring(struct super_block *sb)
{
key_put(sb->s_master_keys);
struct fscrypt_keyring *keyring = sb->s_master_keys;
size_t i;
if (!keyring)
return;
for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) {
struct hlist_head *bucket = &keyring->key_hashtable[i];
struct fscrypt_master_key *mk;
struct hlist_node *tmp;
hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) {
/*
* Since all inodes were already evicted, every key
* remaining in the keyring should have an empty inode
* list, and should only still be in the keyring due to
* the single active ref associated with ->mk_secret.
* There should be no structural refs beyond the one
* associated with the active ref.
*/
WARN_ON(refcount_read(&mk->mk_active_refs) != 1);
WARN_ON(refcount_read(&mk->mk_struct_refs) != 1);
WARN_ON(!is_master_key_secret_present(&mk->mk_secret));
wipe_master_key_secret(&mk->mk_secret);
fscrypt_put_master_key_activeref(mk);
}
}
kfree_sensitive(keyring);
sb->s_master_keys = NULL;
}
/*
* Find the specified master key in ->s_master_keys.
* Returns ERR_PTR(-ENOKEY) if not found.
*/
struct key *fscrypt_find_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec)
static struct hlist_head *
fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring,
const struct fscrypt_key_specifier *mk_spec)
{
struct key *keyring;
char description[FSCRYPT_MK_DESCRIPTION_SIZE];
/*
* Since key specifiers should be "random" values, it is sufficient to
* use a trivial hash function that just takes the first several bits of
* the key specifier.
*/
unsigned long i = get_unaligned((unsigned long *)&mk_spec->u);
return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)];
}
/*
* Find the specified master key struct in ->s_master_keys and take a structural
* ref to it. The structural ref guarantees that the key struct continues to
* exist, but it does *not* guarantee that ->s_master_keys continues to contain
* the key struct. The structural ref needs to be dropped by
* fscrypt_put_master_key(). Returns NULL if the key struct is not found.
*/
struct fscrypt_master_key *
fscrypt_find_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec)
{
struct fscrypt_keyring *keyring;
struct hlist_head *bucket;
struct fscrypt_master_key *mk;
/*
* Pairs with the smp_store_release() in allocate_filesystem_keyring().
@ -246,10 +284,38 @@ struct key *fscrypt_find_master_key(struct super_block *sb,
*/
keyring = smp_load_acquire(&sb->s_master_keys);
if (keyring == NULL)
return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */
return NULL; /* No keyring yet, so no keys yet. */
format_mk_description(description, mk_spec);
return search_fscrypt_keyring(keyring, &key_type_fscrypt, description);
bucket = fscrypt_mk_hash_bucket(keyring, mk_spec);
rcu_read_lock();
switch (mk_spec->type) {
case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
hlist_for_each_entry_rcu(mk, bucket, mk_node) {
if (mk->mk_spec.type ==
FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
memcmp(mk->mk_spec.u.descriptor,
mk_spec->u.descriptor,
FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 &&
refcount_inc_not_zero(&mk->mk_struct_refs))
goto out;
}
break;
case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER:
hlist_for_each_entry_rcu(mk, bucket, mk_node) {
if (mk->mk_spec.type ==
FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER &&
memcmp(mk->mk_spec.u.identifier,
mk_spec->u.identifier,
FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 &&
refcount_inc_not_zero(&mk->mk_struct_refs))
goto out;
}
break;
}
mk = NULL;
out:
rcu_read_unlock();
return mk;
}
static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
@ -277,17 +343,30 @@ static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk)
static struct key *find_master_key_user(struct fscrypt_master_key *mk)
{
char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE];
key_ref_t keyref;
format_mk_user_description(description, mk->mk_spec.u.identifier);
return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user,
description);
/*
* We need to mark the keyring reference as "possessed" so that we
* acquire permission to search it, via the KEY_POS_SEARCH permission.
*/
keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/),
&key_type_fscrypt_user, description, false);
if (IS_ERR(keyref)) {
if (PTR_ERR(keyref) == -EAGAIN || /* not found */
PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */
keyref = ERR_PTR(-ENOKEY);
return ERR_CAST(keyref);
}
return key_ref_to_ptr(keyref);
}
/*
* Give the current user a "key" in ->mk_users. This charges the user's quota
* and marks the master key as added by the current user, so that it cannot be
* removed by another user with the key. Either the master key's key->sem must
* be held for write, or the master key must be still undergoing initialization.
* removed by another user with the key. Either ->mk_sem must be held for
* write, or the master key must be still undergoing initialization.
*/
static int add_master_key_user(struct fscrypt_master_key *mk)
{
@ -309,7 +388,7 @@ static int add_master_key_user(struct fscrypt_master_key *mk)
/*
* Remove the current user's "key" from ->mk_users.
* The master key's key->sem must be held for write.
* ->mk_sem must be held for write.
*
* Returns 0 if removed, -ENOKEY if not found, or another -errno code.
*/
@ -327,63 +406,49 @@ static int remove_master_key_user(struct fscrypt_master_key *mk)
}
/*
* Allocate a new fscrypt_master_key which contains the given secret, set it as
* the payload of a new 'struct key' of type fscrypt, and link the 'struct key'
* into the given keyring. Synchronized by fscrypt_add_key_mutex.
* Allocate a new fscrypt_master_key, transfer the given secret over to it, and
* insert it into sb->s_master_keys.
*/
static int add_new_master_key(struct fscrypt_master_key_secret *secret,
const struct fscrypt_key_specifier *mk_spec,
struct key *keyring)
static int add_new_master_key(struct super_block *sb,
struct fscrypt_master_key_secret *secret,
const struct fscrypt_key_specifier *mk_spec)
{
struct fscrypt_keyring *keyring = sb->s_master_keys;
struct fscrypt_master_key *mk;
char description[FSCRYPT_MK_DESCRIPTION_SIZE];
struct key *key;
int err;
mk = kzalloc(sizeof(*mk), GFP_KERNEL);
if (!mk)
return -ENOMEM;
mk->mk_sb = sb;
init_rwsem(&mk->mk_sem);
refcount_set(&mk->mk_struct_refs, 1);
mk->mk_spec = *mk_spec;
move_master_key_secret(&mk->mk_secret, secret);
refcount_set(&mk->mk_refcount, 1); /* secret is present */
INIT_LIST_HEAD(&mk->mk_decrypted_inodes);
spin_lock_init(&mk->mk_decrypted_inodes_lock);
if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
err = allocate_master_key_users_keyring(mk);
if (err)
goto out_free_mk;
goto out_put;
err = add_master_key_user(mk);
if (err)
goto out_free_mk;
goto out_put;
}
/*
* Note that we don't charge this key to anyone's quota, since when
* ->mk_users is in use those keys are charged instead, and otherwise
* (when ->mk_users isn't in use) only root can add these keys.
*/
format_mk_description(description, mk_spec);
key = key_alloc(&key_type_fscrypt, description,
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto out_free_mk;
}
err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL);
key_put(key);
if (err)
goto out_free_mk;
move_master_key_secret(&mk->mk_secret, secret);
refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */
spin_lock(&keyring->lock);
hlist_add_head_rcu(&mk->mk_node,
fscrypt_mk_hash_bucket(keyring, mk_spec));
spin_unlock(&keyring->lock);
return 0;
out_free_mk:
free_master_key(mk);
out_put:
fscrypt_put_master_key(mk);
return err;
}
@ -392,42 +457,34 @@ static int add_new_master_key(struct fscrypt_master_key_secret *secret,
static int add_existing_master_key(struct fscrypt_master_key *mk,
struct fscrypt_master_key_secret *secret)
{
struct key *mk_user;
bool rekey;
int err;
/*
* If the current user is already in ->mk_users, then there's nothing to
* do. (Not applicable for v1 policy keys, which have NULL ->mk_users.)
* do. Otherwise, we need to add the user to ->mk_users. (Neither is
* applicable for v1 policy keys, which have NULL ->mk_users.)
*/
if (mk->mk_users) {
mk_user = find_master_key_user(mk);
struct key *mk_user = find_master_key_user(mk);
if (mk_user != ERR_PTR(-ENOKEY)) {
if (IS_ERR(mk_user))
return PTR_ERR(mk_user);
key_put(mk_user);
return 0;
}
}
/* If we'll be re-adding ->mk_secret, try to take the reference. */
rekey = !is_master_key_secret_present(&mk->mk_secret);
if (rekey && !refcount_inc_not_zero(&mk->mk_refcount))
return KEY_DEAD;
/* Add the current user to ->mk_users, if applicable. */
if (mk->mk_users) {
err = add_master_key_user(mk);
if (err) {
if (rekey && refcount_dec_and_test(&mk->mk_refcount))
return KEY_DEAD;
if (err)
return err;
}
}
/* Re-add the secret if needed. */
if (rekey)
if (!is_master_key_secret_present(&mk->mk_secret)) {
if (!refcount_inc_not_zero(&mk->mk_active_refs))
return KEY_DEAD;
move_master_key_secret(&mk->mk_secret, secret);
}
return 0;
}
@ -436,38 +493,36 @@ static int do_add_master_key(struct super_block *sb,
const struct fscrypt_key_specifier *mk_spec)
{
static DEFINE_MUTEX(fscrypt_add_key_mutex);
struct key *key;
struct fscrypt_master_key *mk;
int err;
mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */
retry:
key = fscrypt_find_master_key(sb, mk_spec);
if (IS_ERR(key)) {
err = PTR_ERR(key);
if (err != -ENOKEY)
goto out_unlock;
mk = fscrypt_find_master_key(sb, mk_spec);
if (!mk) {
/* Didn't find the key in ->s_master_keys. Add it. */
err = allocate_filesystem_keyring(sb);
if (err)
goto out_unlock;
err = add_new_master_key(secret, mk_spec, sb->s_master_keys);
if (!err)
err = add_new_master_key(sb, secret, mk_spec);
} else {
/*
* Found the key in ->s_master_keys. Re-add the secret if
* needed, and add the user to ->mk_users if needed.
*/
down_write(&key->sem);
err = add_existing_master_key(key->payload.data[0], secret);
up_write(&key->sem);
down_write(&mk->mk_sem);
err = add_existing_master_key(mk, secret);
up_write(&mk->mk_sem);
if (err == KEY_DEAD) {
/* Key being removed or needs to be removed */
key_invalidate(key);
key_put(key);
goto retry;
/*
* We found a key struct, but it's already been fully
* removed. Ignore the old struct and add a new one.
* fscrypt_add_key_mutex means we don't need to worry
* about concurrent adds.
*/
err = add_new_master_key(sb, secret, mk_spec);
}
key_put(key);
fscrypt_put_master_key(mk);
}
out_unlock:
mutex_unlock(&fscrypt_add_key_mutex);
return err;
}
@ -761,19 +816,19 @@ int fscrypt_verify_key_added(struct super_block *sb,
const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE])
{
struct fscrypt_key_specifier mk_spec;
struct key *key, *mk_user;
struct fscrypt_master_key *mk;
struct key *mk_user;
int err;
mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER;
memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE);
key = fscrypt_find_master_key(sb, &mk_spec);
if (IS_ERR(key)) {
err = PTR_ERR(key);
mk = fscrypt_find_master_key(sb, &mk_spec);
if (!mk) {
err = -ENOKEY;
goto out;
}
mk = key->payload.data[0];
down_read(&mk->mk_sem);
mk_user = find_master_key_user(mk);
if (IS_ERR(mk_user)) {
err = PTR_ERR(mk_user);
@ -781,7 +836,8 @@ int fscrypt_verify_key_added(struct super_block *sb,
key_put(mk_user);
err = 0;
}
key_put(key);
up_read(&mk->mk_sem);
fscrypt_put_master_key(mk);
out:
if (err == -ENOKEY && capable(CAP_FOWNER))
err = 0;
@ -943,11 +999,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
struct super_block *sb = file_inode(filp)->i_sb;
struct fscrypt_remove_key_arg __user *uarg = _uarg;
struct fscrypt_remove_key_arg arg;
struct key *key;
struct fscrypt_master_key *mk;
u32 status_flags = 0;
int err;
bool dead;
bool inodes_remain;
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
@ -967,12 +1022,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
return -EACCES;
/* Find the key being removed. */
key = fscrypt_find_master_key(sb, &arg.key_spec);
if (IS_ERR(key))
return PTR_ERR(key);
mk = key->payload.data[0];
down_write(&key->sem);
mk = fscrypt_find_master_key(sb, &arg.key_spec);
if (!mk)
return -ENOKEY;
down_write(&mk->mk_sem);
/* If relevant, remove current user's (or all users) claim to the key */
if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) {
@ -981,7 +1034,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
else
err = remove_master_key_user(mk);
if (err) {
up_write(&key->sem);
up_write(&mk->mk_sem);
goto out_put_key;
}
if (mk->mk_users->keys.nr_leaves_on_tree != 0) {
@ -993,26 +1046,22 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
status_flags |=
FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS;
err = 0;
up_write(&key->sem);
up_write(&mk->mk_sem);
goto out_put_key;
}
}
/* No user claims remaining. Go ahead and wipe the secret. */
dead = false;
err = -ENOKEY;
if (is_master_key_secret_present(&mk->mk_secret)) {
wipe_master_key_secret(&mk->mk_secret);
dead = refcount_dec_and_test(&mk->mk_refcount);
}
up_write(&key->sem);
if (dead) {
/*
* No inodes reference the key, and we wiped the secret, so the
* key object is free to be removed from the keyring.
*/
key_invalidate(key);
fscrypt_put_master_key_activeref(mk);
err = 0;
} else {
}
inodes_remain = refcount_read(&mk->mk_active_refs) > 0;
up_write(&mk->mk_sem);
if (inodes_remain) {
/* Some inodes still reference this key; try to evict them. */
err = try_to_lock_encrypted_files(sb, mk);
if (err == -EBUSY) {
@ -1028,7 +1077,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
* has been fully removed including all files locked.
*/
out_put_key:
key_put(key);
fscrypt_put_master_key(mk);
if (err == 0)
err = put_user(status_flags, &uarg->removal_status_flags);
return err;
@ -1075,7 +1124,6 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
{
struct super_block *sb = file_inode(filp)->i_sb;
struct fscrypt_get_key_status_arg arg;
struct key *key;
struct fscrypt_master_key *mk;
int err;
@ -1092,19 +1140,18 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
arg.user_count = 0;
memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved));
key = fscrypt_find_master_key(sb, &arg.key_spec);
if (IS_ERR(key)) {
if (key != ERR_PTR(-ENOKEY))
return PTR_ERR(key);
mk = fscrypt_find_master_key(sb, &arg.key_spec);
if (!mk) {
arg.status = FSCRYPT_KEY_STATUS_ABSENT;
err = 0;
goto out;
}
mk = key->payload.data[0];
down_read(&key->sem);
down_read(&mk->mk_sem);
if (!is_master_key_secret_present(&mk->mk_secret)) {
arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED;
arg.status = refcount_read(&mk->mk_active_refs) > 0 ?
FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED :
FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */;
err = 0;
goto out_release_key;
}
@ -1126,8 +1173,8 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
}
err = 0;
out_release_key:
up_read(&key->sem);
key_put(key);
up_read(&mk->mk_sem);
fscrypt_put_master_key(mk);
out:
if (!err && copy_to_user(uarg, &arg, sizeof(arg)))
err = -EFAULT;
@ -1139,13 +1186,9 @@ int __init fscrypt_init_keyring(void)
{
int err;
err = register_key_type(&key_type_fscrypt);
if (err)
return err;
err = register_key_type(&key_type_fscrypt_user);
if (err)
goto err_unregister_fscrypt;
return err;
err = register_key_type(&key_type_fscrypt_provisioning);
if (err)
@ -1155,7 +1198,5 @@ int __init fscrypt_init_keyring(void)
err_unregister_fscrypt_user:
unregister_key_type(&key_type_fscrypt_user);
err_unregister_fscrypt:
unregister_key_type(&key_type_fscrypt);
return err;
}

View File

@ -9,7 +9,6 @@
*/
#include <crypto/skcipher.h>
#include <linux/key.h>
#include <linux/random.h>
#include "fscrypt_private.h"
@ -156,6 +155,7 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
{
crypto_free_skcipher(prep_key->tfm);
fscrypt_destroy_inline_crypt_key(prep_key);
memzero_explicit(prep_key, sizeof(*prep_key));
}
/* Given a per-file encryption key, set up the file's crypto transform object */
@ -443,20 +443,18 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
/*
* Find the master key, then set up the inode's actual encryption key.
*
* If the master key is found in the filesystem-level keyring, then the
* corresponding 'struct key' is returned in *master_key_ret with its semaphore
* read-locked. This is needed to ensure that only one task links the
* fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race to create
* an fscrypt_info for the same inode), and to synchronize the master key being
* removed with a new inode starting to use it.
* If the master key is found in the filesystem-level keyring, then it is
* returned in *mk_ret with its semaphore read-locked. This is needed to ensure
* that only one task links the fscrypt_info into ->mk_decrypted_inodes (as
* multiple tasks may race to create an fscrypt_info for the same inode), and to
* synchronize the master key being removed with a new inode starting to use it.
*/
static int setup_file_encryption_key(struct fscrypt_info *ci,
bool need_dirhash_key,
struct key **master_key_ret)
struct fscrypt_master_key **mk_ret)
{
struct key *key;
struct fscrypt_master_key *mk = NULL;
struct fscrypt_key_specifier mk_spec;
struct fscrypt_master_key *mk;
int err;
switch (ci->ci_policy.version) {
@ -477,11 +475,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
return -EINVAL;
}
key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec);
if (IS_ERR(key)) {
if (key != ERR_PTR(-ENOKEY) ||
ci->ci_policy.version != FSCRYPT_POLICY_V1)
return PTR_ERR(key);
mk = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec);
if (!mk) {
if (ci->ci_policy.version != FSCRYPT_POLICY_V1)
return -ENOKEY;
err = fscrypt_select_encryption_impl(ci, false);
if (err)
@ -495,9 +492,7 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
*/
return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci);
}
mk = key->payload.data[0];
down_read(&key->sem);
down_read(&mk->mk_sem);
/* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
if (!is_master_key_secret_present(&mk->mk_secret)) {
@ -529,18 +524,18 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
if (err)
goto out_release_key;
*master_key_ret = key;
*mk_ret = mk;
return 0;
out_release_key:
up_read(&key->sem);
key_put(key);
up_read(&mk->mk_sem);
fscrypt_put_master_key(mk);
return err;
}
static void put_crypt_info(struct fscrypt_info *ci)
{
struct key *key;
struct fscrypt_master_key *mk;
if (!ci)
return;
@ -550,24 +545,18 @@ static void put_crypt_info(struct fscrypt_info *ci)
else if (ci->ci_owns_key)
fscrypt_destroy_prepared_key(&ci->ci_enc_key);
key = ci->ci_master_key;
if (key) {
struct fscrypt_master_key *mk = key->payload.data[0];
mk = ci->ci_master_key;
if (mk) {
/*
* Remove this inode from the list of inodes that were unlocked
* with the master key.
*
* In addition, if we're removing the last inode from a key that
* already had its secret removed, invalidate the key so that it
* gets removed from ->s_master_keys.
* with the master key. In addition, if we're removing the last
* inode from a master key struct that already had its secret
* removed, then complete the full removal of the struct.
*/
spin_lock(&mk->mk_decrypted_inodes_lock);
list_del(&ci->ci_master_key_link);
spin_unlock(&mk->mk_decrypted_inodes_lock);
if (refcount_dec_and_test(&mk->mk_refcount))
key_invalidate(key);
key_put(key);
fscrypt_put_master_key_activeref(mk);
}
memzero_explicit(ci, sizeof(*ci));
kmem_cache_free(fscrypt_info_cachep, ci);
@ -581,7 +570,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
{
struct fscrypt_info *crypt_info;
struct fscrypt_mode *mode;
struct key *master_key = NULL;
struct fscrypt_master_key *mk = NULL;
int res;
res = fscrypt_initialize(inode->i_sb->s_cop->flags);
@ -604,8 +593,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
crypt_info->ci_mode = mode;
res = setup_file_encryption_key(crypt_info, need_dirhash_key,
&master_key);
res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
if (res)
goto out;
@ -620,12 +608,9 @@ fscrypt_setup_encryption_info(struct inode *inode,
* We won the race and set ->i_crypt_info to our crypt_info.
* Now link it into the master key's inode list.
*/
if (master_key) {
struct fscrypt_master_key *mk =
master_key->payload.data[0];
refcount_inc(&mk->mk_refcount);
crypt_info->ci_master_key = key_get(master_key);
if (mk) {
crypt_info->ci_master_key = mk;
refcount_inc(&mk->mk_active_refs);
spin_lock(&mk->mk_decrypted_inodes_lock);
list_add(&crypt_info->ci_master_key_link,
&mk->mk_decrypted_inodes);
@ -635,9 +620,9 @@ fscrypt_setup_encryption_info(struct inode *inode,
}
res = 0;
out:
if (master_key) {
up_read(&master_key->sem);
key_put(master_key);
if (mk) {
up_read(&mk->mk_sem);
fscrypt_put_master_key(mk);
}
put_crypt_info(crypt_info);
return res;
@ -802,7 +787,6 @@ EXPORT_SYMBOL(fscrypt_free_inode);
int fscrypt_drop_inode(struct inode *inode)
{
const struct fscrypt_info *ci = fscrypt_get_info(inode);
const struct fscrypt_master_key *mk;
/*
* If ci is NULL, then the inode doesn't have an encryption key set up
@ -812,7 +796,6 @@ int fscrypt_drop_inode(struct inode *inode)
*/
if (!ci || !ci->ci_master_key)
return 0;
mk = ci->ci_master_key->payload.data[0];
/*
* With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
@ -831,6 +814,6 @@ int fscrypt_drop_inode(struct inode *inode)
* then the thread removing the key will either evict the inode itself
* or will correctly detect that it wasn't evicted due to the race.
*/
return !is_master_key_secret_present(&mk->mk_secret);
return !is_master_key_secret_present(&ci->ci_master_key->mk_secret);
}
EXPORT_SYMBOL_GPL(fscrypt_drop_inode);

View File

@ -692,12 +692,8 @@ int fscrypt_set_context(struct inode *inode, void *fs_data)
* delayed key setup that requires the inode number.
*/
if (ci->ci_policy.version == FSCRYPT_POLICY_V2 &&
(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
const struct fscrypt_master_key *mk =
ci->ci_master_key->payload.data[0];
fscrypt_hash_inode_number(ci, mk);
}
(ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
fscrypt_hash_inode_number(ci, ci->ci_master_key);
return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data);
}

View File

@ -425,7 +425,8 @@ int ext4_ext_migrate(struct inode *inode)
* already is extent-based, error out.
*/
if (!ext4_has_feature_extents(inode->i_sb) ||
(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
ext4_has_inline_data(inode))
return -EINVAL;
if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)

View File

@ -2235,8 +2235,16 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
memcpy(data2, de, len);
de = (struct ext4_dir_entry_2 *) data2;
top = data2 + len;
while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) {
if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len, block,
(data2 + (blocksize - csum_size) -
(char *) de))) {
brelse(bh2);
brelse(bh);
return -EFSCORRUPTED;
}
de = de2;
}
de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
(char *) de, blocksize);

View File

@ -370,13 +370,14 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
struct page *page;
index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
if (page)
put_page(page);
else if (num_ra_pages > 1)

View File

@ -261,13 +261,14 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
struct page *page;
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
if (page)
put_page(page);
else if (num_ra_pages > 1)

View File

@ -3320,6 +3320,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
err = file_modified(file);
if (err)
goto out;
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);

View File

@ -228,8 +228,7 @@ static int nfs_delegation_claim_opens(struct inode *inode,
*
*/
void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
fmode_t type,
const nfs4_stateid *stateid,
fmode_t type, const nfs4_stateid *stateid,
unsigned long pagemod_limit)
{
struct nfs_delegation *delegation;
@ -239,25 +238,24 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation != NULL) {
spin_lock(&delegation->lock);
if (nfs4_is_valid_delegation(delegation, 0)) {
nfs4_stateid_copy(&delegation->stateid, stateid);
delegation->type = type;
delegation->pagemod_limit = pagemod_limit;
oldcred = delegation->cred;
delegation->cred = get_cred(cred);
clear_bit(NFS_DELEGATION_NEED_RECLAIM,
&delegation->flags);
spin_unlock(&delegation->lock);
rcu_read_unlock();
put_cred(oldcred);
trace_nfs4_reclaim_delegation(inode, type);
return;
}
/* We appear to have raced with a delegation return. */
nfs4_stateid_copy(&delegation->stateid, stateid);
delegation->type = type;
delegation->pagemod_limit = pagemod_limit;
oldcred = delegation->cred;
delegation->cred = get_cred(cred);
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
if (test_and_clear_bit(NFS_DELEGATION_REVOKED,
&delegation->flags))
atomic_long_inc(&nfs_active_delegations);
spin_unlock(&delegation->lock);
rcu_read_unlock();
put_cred(oldcred);
trace_nfs4_reclaim_delegation(inode, type);
} else {
rcu_read_unlock();
nfs_inode_set_delegation(inode, cred, type, stateid,
pagemod_limit);
}
rcu_read_unlock();
nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit);
}
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)

View File

@ -346,6 +346,7 @@ int nfs40_init_client(struct nfs_client *clp)
ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
"NFSv4.0 transport Slot table");
if (ret) {
nfs4_shutdown_slot_table(tbl);
kfree(tbl);
return ret;
}

View File

@ -1777,6 +1777,7 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
{
set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
/* Mark all delegations for reclaim */
nfs_delegation_mark_reclaim(clp);
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
@ -2642,6 +2643,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (status < 0)
goto out_error;
nfs4_state_end_reclaim_reboot(clp);
continue;
}
/* Detect expired delegations... */

View File

@ -293,7 +293,7 @@ static void __put_super(struct super_block *s)
WARN_ON(s->s_inode_lru.node);
WARN_ON(!list_empty(&s->s_mounts));
security_sb_free(s);
fscrypt_sb_free(s);
fscrypt_destroy_keyring(s);
put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
call_rcu(&s->rcu, destroy_super_rcu);
@ -454,6 +454,7 @@ void generic_shutdown_super(struct super_block *sb)
evict_inodes(sb);
/* only nonzero refcount inodes can have marks */
fsnotify_sb_delete(sb);
fscrypt_destroy_keyring(sb);
if (sb->s_dio_done_wq) {
destroy_workqueue(sb->s_dio_done_wq);

View File

@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
#endif
int ghes_estatus_pool_init(int num_ghes);
int ghes_estatus_pool_init(unsigned int num_ghes);
/* From drivers/edac/ghes_edac.c */

View File

@ -1161,7 +1161,7 @@ void efi_retrieve_tpm2_eventlog(void);
arch_efi_call_virt_teardown(); \
})
#define EFI_RANDOM_SEED_SIZE 64U
#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;

View File

@ -1471,7 +1471,7 @@ struct super_block {
const struct xattr_handler **s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
struct key *s_master_keys; /* master crypto keys in use */
struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
#endif
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;

View File

@ -200,7 +200,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
void fscrypt_sb_free(struct super_block *sb);
void fscrypt_destroy_keyring(struct super_block *sb);
int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
@ -388,7 +388,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
static inline void fscrypt_sb_free(struct super_block *sb)
static inline void fscrypt_destroy_keyring(struct super_block *sb)
{
}

View File

@ -104,7 +104,7 @@ struct uart_icount {
__u32 buf_overrun;
};
typedef unsigned int __bitwise upf_t;
typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
struct uart_port {
@ -211,6 +211,7 @@ struct uart_port {
#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
#define UPF_DEAD ((__force upf_t) (1 << 30))
#define UPF_IOREMAP ((__force upf_t) (1 << 31))
#define UPF_FULL_PROBE ((__force upf_t) (1ULL << 32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)

View File

@ -35,8 +35,6 @@
/* This is used to register protocols. */
struct net_protocol {
int (*early_demux)(struct sk_buff *skb);
int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
@ -53,8 +51,6 @@ struct net_protocol {
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
void (*early_demux)(struct sk_buff *skb);
void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */

View File

@ -945,7 +945,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific;
INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
void tcp_v6_early_demux(struct sk_buff *skb);
#endif

View File

@ -176,6 +176,7 @@ INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct udphdr *uh, struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
void udp_v6_early_demux(struct sk_buff *skb);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6);

View File

@ -147,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int msgflg = params->flg;
msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT);
if (unlikely(!msq))
return -ENOMEM;

View File

@ -511,7 +511,7 @@ static struct sem_array *sem_alloc(size_t nsems)
if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
return NULL;
sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
if (unlikely(!sma))
return NULL;
@ -1852,7 +1852,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
@ -1937,7 +1937,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
rcu_read_unlock();
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);

Some files were not shown because too many files have changed in this diff Show More