Merge tag 'ASB-2024-12-05_12-5.10' of https://android.googlesource.com/kernel/common into android13-5.10-waipio

https://source.android.com/docs/security/bulletin/2024-12-01

* tag 'ASB-2024-12-05_12-5.10' of https://android.googlesource.com/kernel/common: (649 commits)
  ANDROID: ABI: update symbol list for honor
  ANDROID: fs: add vendor hook to collect IO statistics
  ANDROID: tools/objtool: Pass CFLAGS to libsubcmd build via EXTRA_CFLAGS
  UPSTREAM: HID: core: zero-initialize the report buffer
  ANDROID: libsubcmd: Hoist iterator variable declarations in parse_options_subcommand()
  ANDROID: mm: Fix SPF-aware fast-mremap
  UPSTREAM: net/sched: stop qdisc_tree_reduce_backlog on TC_H_ROOT
  UPSTREAM: f2fs: support SEEK_DATA and SEEK_HOLE for compression files
  Revert "genetlink: hold RCU in genlmsg_mcast()"
  ANDROID: add file for recording allowed ABI breaks
  ANDROID: GKI: update symbol list for honor
  ANDROID: Allow vendor modules perform more operations on memleak detect
  UPSTREAM: drm/omap: fix misleading indentation in pixinc()
  UPSTREAM: bitfield: build kunit tests without structleak plugin
  BACKPORT: FROMGIT: binder: add delivered_freeze to debugfs output
  BACKPORT: FROMGIT: binder: fix memleak of proc->delivered_freeze
  FROMGIT: binder: allow freeze notification for dead nodes
  FROMGIT: binder: fix BINDER_WORK_CLEAR_FREEZE_NOTIFICATION debug logs
  FROMGIT: binder: fix BINDER_WORK_FROZEN_BINDER debug logs
  BACKPORT: FROMGIT: binder: fix freeze UAF in binder_release_work()
  ...

 Conflicts:
	android/abi_gki_aarch64.xml
	net/qrtr/af_qrtr.c

Change-Id: I4f416cf6c90e71fbdc0bea2c76a620842a2a2288
This commit is contained in:
Michael Bestas 2024-12-16 00:43:42 +02:00
commit 6725684c74
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
528 changed files with 5728 additions and 3422 deletions

1
.gitignore vendored
View File

@ -125,7 +125,6 @@ GTAGS
# id-utils files
ID
*.orig
*~
\#*#

View File

@ -4165,6 +4165,16 @@
printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
proc_mem.force_override= [KNL]
Format: {always | ptrace | never}
Traditionally /proc/pid/mem allows memory permissions to be
overridden without restrictions. This option may be set to
restrict that. Can be one of:
- 'always': traditional behavior always allows mem overrides.
- 'ptrace': only allow mem overrides for active ptracers.
- 'never': never allow mem overrides.
If not specified, default is the CONFIG_PROC_MEM_* choice.
processor.max_cstate= [HW,ACPI]
Limit processor to maximum C-state
max_cstate=9 overrides any DMI blacklist limit.

View File

@ -112,6 +112,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
@ -140,6 +142,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |

View File

@ -519,7 +519,7 @@ at module load time (for a module) with::
alerts_broken
The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
spaces, so if the name is "This is an I2C chip" you can say
adapter_name=ThisisanI2cchip. This is because it's hard to pass in

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 226
SUBLEVEL = 228
EXTRAVERSION =
NAME = Dare mighty things

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,127 @@
# How to use this file: http://go/approve-abi-break
# ABI freeze commit: 870488eb0745645feff5bddfd44fe538660b9cf8
type 'struct xhci_hcd' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; }; union { }; }' was added
type 'struct xhci_hcd' changed
member changed from 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; } __UNIQUE_ID_android_kabi_hide315; union { }; }' to 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; }; union { }; }'
type changed from 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; } __UNIQUE_ID_android_kabi_hide315; union { }; }' to 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; }; union { }; }'
member 'struct { u64 android_kabi_reserved1; } __UNIQUE_ID_android_kabi_hide315' was removed
member 'struct { u64 android_kabi_reserved1; }' was added
type 'struct spi_controller' changed
member changed from 'u8 unused_native_cs' to 's8 unused_native_cs'
type changed from 'u8' = '__u8' = 'unsigned char' to 's8' = '__s8' = 'signed char'
resolved type changed from 'unsigned char' to 'signed char'
member changed from 'u8 max_native_cs' to 's8 max_native_cs'
type changed from 'u8' = '__u8' = 'unsigned char' to 's8' = '__s8' = 'signed char'
resolved type changed from 'unsigned char' to 'signed char'
type 'struct xhci_hcd' changed
member changed from 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; } __UNIQUE_ID_android_kabi_hide316; union { }; }' to 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; }; union { }; }'
type changed from 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; } __UNIQUE_ID_android_kabi_hide316; union { }; }' to 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; }; union { }; }'
member 'struct { u64 android_kabi_reserved1; } __UNIQUE_ID_android_kabi_hide316' was removed
member 'struct { u64 android_kabi_reserved1; }' was added
type 'struct ehci_hcd' changed
member 'unsigned int is_aspeed' was added
type 'struct fscrypt_mode' changed
byte size changed from 32 to 40
member 'int security_strength' was added
3 members ('int ivsize' .. 'enum blk_crypto_mode_num blk_crypto_mode') changed
offset changed by 32
type 'struct sock' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { spinlock_t sk_peer_lock; struct { u64 android_kabi_reserved1; }; union { }; }' was added
type 'struct bpf_map' changed
member changed from 'u64 writecnt' to 'atomic64_t writecnt'
type changed from 'u64' = '__u64' = 'long long unsigned int' to 'atomic64_t' = 'struct { s64 counter; }'
resolved type changed from 'long long unsigned int' to 'struct { s64 counter; }'
type 'struct fib_rules_ops' changed
member changed from 'bool(* suppress)(struct fib_rule*, struct fib_lookup_arg*)' to 'bool(* suppress)(struct fib_rule*, int, struct fib_lookup_arg*)'
type changed from 'bool(*)(struct fib_rule*, struct fib_lookup_arg*)' to 'bool(*)(struct fib_rule*, int, struct fib_lookup_arg*)'
pointed-to type changed from 'bool(struct fib_rule*, struct fib_lookup_arg*)' to 'bool(struct fib_rule*, int, struct fib_lookup_arg*)'
parameter 2 type changed from 'struct fib_lookup_arg*' to 'int'
parameter 3 of type 'struct fib_lookup_arg*' was added
type 'struct snd_pcm_runtime' changed
byte size changed from 768 to 824
member 'struct mutex buffer_mutex' was added
member 'atomic_t buffer_accessing' was added
type 'struct gpio_irq_chip' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { bool initialized; struct { u64 android_kabi_reserved1; }; union { }; }' was added
function symbol changed from 'int hex_to_bin(char)' to 'int hex_to_bin(unsigned char)'
type changed from 'int(char)' to 'int(unsigned char)'
parameter 1 type changed from 'char' to 'unsigned char'
1 variable symbol(s) removed
'struct tracepoint __tracepoint_android_vh_record_percpu_rwsem_lock_starttime'
1 function symbol(s) removed
'int __traceiter_android_vh_record_percpu_rwsem_lock_starttime(void*, struct task_struct*, unsigned long int)'
type 'struct fscrypt_info' changed
member changed from 'struct key* ci_master_key' to 'struct fscrypt_master_key* ci_master_key'
type changed from 'struct key*' to 'struct fscrypt_master_key*'
pointed-to type changed from 'struct key' to 'struct fscrypt_master_key'
type 'struct tcp_sock' changed
member 'u32 max_packets_seq' was removed
member 'u32 cwnd_usage_seq' was added
type 'struct iphdr' changed
member '__be32 saddr' was removed
member '__be32 daddr' was removed
member 'union { struct { __be32 saddr; __be32 daddr; }; struct { __be32 saddr; __be32 daddr; } addrs; }' was added
type 'struct super_block' changed
member changed from 'struct key* s_master_keys' to 'struct fscrypt_keyring* s_master_keys'
type changed from 'struct key*' to 'struct fscrypt_keyring*'
pointed-to type changed from 'struct key' to 'struct fscrypt_keyring'
type 'struct sk_buff' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { struct { __u8 scm_io_uring; __u8 android_kabi_reserved1_padding1; __u16 android_kabi_reserved1_padding2; __u32 android_kabi_reserved1_padding3; }; struct { u64 android_kabi_reserved1; }; union { }; }' was added
type 'struct task_struct' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { void* pf_io_worker; struct { u64 android_kabi_reserved1; }; union { }; }' was added
type 'struct anon_vma' changed
byte size changed from 104 to 120
member 'unsigned long int num_children' was added
member 'unsigned long int num_active_vmas' was added
function symbol changed from 'struct irq_domain* __irq_domain_add(struct fwnode_handle*, int, irq_hw_number_t, int, const struct irq_domain_ops*, void*)' to 'struct irq_domain* __irq_domain_add(struct fwnode_handle*, unsigned int, irq_hw_number_t, int, const struct irq_domain_ops*, void*)'
type changed from 'struct irq_domain*(struct fwnode_handle*, int, irq_hw_number_t, int, const struct irq_domain_ops*, void*)' to 'struct irq_domain*(struct fwnode_handle*, unsigned int, irq_hw_number_t, int, const struct irq_domain_ops*, void*)'
parameter 2 type changed from 'int' to 'unsigned int'
function symbol changed from 'is_ashmem_file' to 'int is_ashmem_file(struct file*)'
CRC changed from 0x4bef6e5f to 0x94fc40b5
type 'struct perf_event' changed
member 'unsigned int group_generation' was added
type 'struct ipv6_devconf' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { struct { __s32 accept_ra_min_lft; u32 padding; }; struct { u64 android_kabi_reserved1; }; union { }; }' was added
type 'struct clk_core' changed
byte size changed from 248 to 264
member 'struct hlist_node rpm_node' was added
30 members ('struct device_node* of_node' .. 'struct kref ref') changed
offset changed by 128
type 'enum binder_work_type' changed
enumerator 'BINDER_WORK_FROZEN_BINDER' (9) was added
... 1 other enumerator(s) added

View File

@ -1,7 +1,13 @@
[abi_symbol_list]
arch_vma_name
_vm_normal_page
pmd_clear_bad
__pmd_trans_huge_lock
__traceiter_android_rvh_dma_buf_stats_teardown
__traceiter_android_vh_tune_fault_around_bytes
__traceiter_android_vh_io_statistics
__traceiter_android_vh_do_swap_page_spf
__tracepoint_android_rvh_dma_buf_stats_teardown
__tracepoint_android_vh_tune_fault_around_bytes
__tracepoint_android_vh_io_statistics
__tracepoint_android_vh_do_swap_page_spf

View File

@ -2,3 +2,5 @@
__traceiter_android_vh_wakeup_bypass
__tracepoint_android_vh_wakeup_bypass
tty_termios_hw_change
usb_bus_idr
usb_bus_idr_lock

View File

@ -77,7 +77,7 @@ &gpio {
};
&hdmi {
hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>;
power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
status = "okay";
};

View File

@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59
&iomuxc_lpsr {
pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
fsl,phy = <
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
>;
};

View File

@ -692,7 +692,7 @@ rtt: rtt@fffffe20 {
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xfffffe20 0x20>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
pit: timer@fffffe40 {
@ -718,7 +718,7 @@ rtc: rtc@fffffea8 {
compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
reg = <0xfffffea8 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
watchdog: watchdog@ffffff80 {

View File

@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
return;
}
map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) {
pr_err("PLATSMP: No syscon regmap\n");
return;

View File

@ -782,6 +782,7 @@ config ARM64_ERRATUM_3194386
* ARM Cortex-A78C erratum 3324346
* ARM Cortex-A78C erratum 3324347
* ARM Cortex-A710 erratam 3324338
* ARM Cortex-A715 errartum 3456084
* ARM Cortex-A720 erratum 3456091
* ARM Cortex-A725 erratum 3456106
* ARM Cortex-X1 erratum 3324344
@ -792,6 +793,7 @@ config ARM64_ERRATUM_3194386
* ARM Cortex-X925 erratum 3324334
* ARM Neoverse-N1 erratum 3324349
* ARM Neoverse N2 erratum 3324339
* ARM Neoverse-N3 erratum 3456111
* ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417

View File

@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X
LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour

View File

@ -25,12 +25,12 @@ chosen {
backlight: edp-backlight {
compatible = "pwm-backlight";
power-supply = <&vcc_12v>;
pwms = <&pwm0 0 740740 0>;
pwms = <&pwm0 0 125000 0>;
};
bat: battery {
compatible = "simple-battery";
charge-full-design-microamp-hours = <9800000>;
charge-full-design-microamp-hours = <10000000>;
voltage-max-design-microvolt = <4350000>;
voltage-min-design-microvolt = <3000000>;
};

View File

@ -148,6 +148,22 @@ &emmc_phy {
drive-impedance-ohm = <33>;
};
&gpio3 {
/*
* The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
* eMMC and SPI flash powered-down initially (in fact it keeps the
* reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
* that signal so that eMMC and SPI can be used regardless of the state
* of the signal.
*/
bios-disable-override-hog {
gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
gpio-hog;
line-name = "bios_disable_override";
output-high;
};
};
&gmac {
assigned-clocks = <&cru SCLK_RMII_SRC>;
assigned-clock-parents = <&clkin_gmac>;
@ -437,9 +453,14 @@ &pcie_clkreqn_cpm {
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
gpios {
bios_disable_override_hog_pin: bios-disable-override-hog-pin {
rockchip,pins =
<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
};
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;

View File

@ -81,6 +81,7 @@
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
@ -92,6 +93,7 @@
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_POTENZA 0x000
@ -141,6 +143,7 @@
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
@ -152,6 +155,7 @@
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View File

@ -10,11 +10,9 @@
#include <asm/insn.h>
#include <asm/probes.h>
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
typedef u32 uprobe_opcode_t;
@ -23,8 +21,8 @@ struct arch_uprobe_task {
struct arch_uprobe {
union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
__le32 insn;
__le32 ixol;
};
struct arch_probe_insn api;
bool simulate;

View File

@ -373,6 +373,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
@ -383,6 +384,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),

View File

@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) {
api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
} else {
/*
* Instruction cannot be stepped out-of-line and we don't
@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
probe_opcode_t insn = le32_to_cpu(*addr);
probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0;
struct arch_probe_insn *api = &asi->api;
if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
decoded = INSN_GOOD_NO_SLOT;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
decoded = INSN_GOOD_NO_SLOT;
} else {
decoded = arm_probe_decode_insn(insn, &asi->api);
}
/*
* If there's a symbol defined in front of and near enough to
@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
}
decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end))

View File

@ -170,17 +170,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
{
u64 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (u64 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
if (opcode & (1 << 30)) /* x0-x30 */
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
else /* w0-w30 */
set_w_reg(regs, xn, *load_addr);
set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}
@ -188,14 +186,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
{
s32 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (s32 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}

View File

@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
case INSN_REJECTED:
@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
addr = instruction_pointer(regs);
if (auprobe->api.handler)

View File

@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
{
/* regs will be equal to current_pt_regs() */
struct kernel_clone_args args = {
.flags = regs->d1 & ~CSIGNAL,
.flags = (u32)(regs->d1) & ~CSIGNAL,
.pidfd = (int __user *)regs->d3,
.child_tid = (int __user *)regs->d4,
.parent_tid = (int __user *)regs->d3,

View File

@ -245,11 +245,6 @@ asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
pr_emerg("Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x400000) {
pr_emerg("Memory must be greater than 4MB\n");
machine_restart(NULL);

View File

@ -1071,8 +1071,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
#if 0 && defined(CONFIG_64BIT)
/* Revisit when we have 64-bit code above 4Gb */
#if defined(CONFIG_64BIT)
b,n intr_save2
skip_save_ior:
@ -1080,8 +1079,7 @@ skip_save_ior:
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
extrd,u,* %r8,PSW_W_BIT,1,%r1
cmpib,COND(=),n 1,%r1,intr_save2
bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */

View File

@ -217,10 +217,10 @@ linux_gateway_entry:
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
or,ev %r2,%r2,%r2
ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@ -355,10 +355,10 @@ tracesys_next:
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
or,ev %r2,%r2,%r2
ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@ -931,6 +931,8 @@ ENTRY(sys_call_table)
END(sys_call_table)
#ifdef CONFIG_64BIT
#undef __SYSCALL_WITH_COMPAT
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
.align 8
ENTRY(sys_call_table64)
#include <asm/syscall_table_64.h> /* 64-bit native syscalls */

View File

@ -908,6 +908,7 @@ void __init setup_arch(char **cmdline_p)
mem_topology_setup();
/* Set max_mapnr before paging_init() */
set_max_mapnr(max_pfn);
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
/*
* Release secondary cpus out of their spinloops at 0x60 now that

View File

@ -292,8 +292,6 @@ void __init mem_init(void)
swiotlb_init(0);
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
kasan_late_init();
memblock_free_all();

View File

@ -1177,6 +1177,9 @@ void __init mem_topology_setup(void)
{
int cpu;
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
min_low_pfn = MEMORY_START >> PAGE_SHIFT;
/*
* Linux/mm assumes node 0 to be online at boot. However this is not
* true on PowerPC, where node 0 is similar to any other node, it
@ -1221,9 +1224,6 @@ void __init initmem_init(void)
{
int nid;
max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
max_pfn = max_low_pfn;
memblock_dump_all();
for_each_online_node(nid) {

View File

@ -193,6 +193,11 @@ config GENERIC_HWEIGHT
config FIX_EARLYCON_MEM
def_bool MMU
config ILLEGAL_POINTER_VALUE
hex
default 0 if 32BIT
default 0xdead000000000000 if 64BIT
config PGTABLE_LEVELS
int
default 3 if 64BIT

View File

@ -71,7 +71,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}

View File

@ -53,8 +53,10 @@ static inline int test_facility(unsigned long nr)
unsigned long facilities_als[] = { FACILITIES_ALS };
if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
if (__test_facility(nr, &facilities_als))
return 1;
if (__test_facility(nr, &facilities_als)) {
if (!__is_defined(__DECOMPRESSOR))
return 1;
}
}
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
}

View File

@ -75,6 +75,7 @@ struct perf_sf_sde_regs {
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
(regs)->psw.mask = 0; \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain); \

View File

@ -1432,7 +1432,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
unsigned long head, base, offset;
struct hws_trailer_entry *te;
if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
if (handle->head & ~PAGE_MASK)
return -EINVAL;
aux->head = handle->head >> PAGE_SHIFT;
@ -1613,7 +1613,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
unsigned long num_sdb;
aux = perf_get_aux(handle);
if (WARN_ON_ONCE(!aux))
if (!aux)
return;
/* Inform user space new data arrived */
@ -1635,7 +1635,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
__func__);
break;
}
if (WARN_ON_ONCE(!aux))
if (!aux)
return;
/* Update head and alert_mark to new position */
@ -1870,12 +1870,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
perf_pmu_disable(event->pmu);
event->hw.state = 0;
cpuhw->lsctl.cs = 1;

View File

@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
vcpu->stat.diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)

View File

@ -794,46 +794,102 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1;
}
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *pages, unsigned long nr_pages,
const union asce asce, enum gacc_mode mode)
/**
* guest_range_to_gpas() - Calculate guest physical addresses of page fragments
* covering a logical range
* @vcpu: virtual cpu
* @ga: guest address, start of range
* @ar: access register
* @gpas: output argument, may be NULL
* @len: length of range in bytes
* @asce: address-space-control element to use for translation
* @mode: access mode
*
* Translate a logical range to a series of guest absolute addresses,
* such that the concatenation of page fragments starting at each gpa make up
* the whole range.
* The translation is performed as if done by the cpu for the given @asce, @ar,
* @mode and state of the @vcpu.
* If the translation causes an exception, its program interruption code is
* returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
* such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
* a correct exception into the guest.
* The resulting gpas are stored into @gpas, unless it is NULL.
*
* Note: All fragments except the first one start at the beginning of a page.
* When deriving the boundaries of a fragment from a gpa, all but the last
* fragment end at the end of the page.
*
* Return:
* * 0 - success
* * <0 - translation could not be performed, for example if guest
* memory could not be accessed
* * >0 - an access exception occurred. In this case the returned value
* is the program interruption code and the contents of pgm may
* be used to inject an exception into the guest.
*/
static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *gpas, unsigned long len,
const union asce asce, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned int offset = offset_in_page(ga);
unsigned int fragment_len;
int lap_enabled, rc = 0;
enum prot_type prot;
unsigned long gpa;
lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
while (min(PAGE_SIZE - offset, len) > 0) {
fragment_len = min(PAGE_SIZE - offset, len);
ga = kvm_s390_logical_to_effective(vcpu, ga);
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
PROT_TYPE_LA);
ga &= PAGE_MASK;
if (psw_bits(*psw).dat) {
rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
if (rc < 0)
return rc;
} else {
*pages = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, *pages))
gpa = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, gpa))
rc = PGM_ADDRESSING;
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
ga += PAGE_SIZE;
pages++;
nr_pages--;
if (gpas)
*gpas++ = gpa;
offset = 0;
ga += fragment_len;
len -= fragment_len;
}
return 0;
}
static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
void *data, unsigned int len)
{
const unsigned int offset = offset_in_page(gpa);
const gfn_t gfn = gpa_to_gfn(gpa);
int rc;
if (!gfn_to_memslot(kvm, gfn))
return PGM_ADDRESSING;
if (mode == GACC_STORE)
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
else
rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
return rc;
}
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long _len, nr_pages, gpa, idx;
unsigned long pages_array[2];
unsigned long *pages;
unsigned long nr_pages, idx;
unsigned long gpa_array[2];
unsigned int fragment_len;
unsigned long *gpas;
int need_ipte_lock;
union asce asce;
int rc;
@ -845,50 +901,45 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
if (rc)
return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!pages)
gpas = gpa_array;
if (nr_pages > ARRAY_SIZE(gpa_array))
gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!gpas)
return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (mode == GACC_STORE)
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
else
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
len -= _len;
ga += _len;
data += _len;
fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len);
len -= fragment_len;
data += fragment_len;
}
if (need_ipte_lock)
ipte_unlock(vcpu);
if (nr_pages > ARRAY_SIZE(pages_array))
vfree(pages);
if (nr_pages > ARRAY_SIZE(gpa_array))
vfree(gpas);
return rc;
}
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, enum gacc_mode mode)
{
unsigned long _len, gpa;
unsigned int fragment_len;
unsigned long gpa;
int rc = 0;
while (len && !rc) {
gpa = kvm_s390_real_to_abs(vcpu, gra);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (mode)
rc = write_guest_abs(vcpu, gpa, data, _len);
else
rc = read_guest_abs(vcpu, gpa, data, _len);
len -= _len;
gra += _len;
data += _len;
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
len -= fragment_len;
gra += fragment_len;
data += fragment_len;
}
if (rc > 0)
vcpu->arch.pgm.code = rc;
return rc;
}
@ -904,8 +955,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
enum prot_type prot;
union asce asce;
int rc;
@ -913,23 +962,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (mode == GACC_STORE)
return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
mode, PROT_TYPE_LA);
}
if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
if (rc > 0)
return trans_exc(vcpu, rc, gva, 0, mode, prot);
} else {
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
}
return rc;
return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode);
}
/**
@ -938,17 +971,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode)
{
unsigned long gpa;
unsigned long currlen;
union asce asce;
int rc = 0;
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
ipte_lock(vcpu);
while (length > 0 && !rc) {
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
gva += currlen;
length -= currlen;
}
rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode);
ipte_unlock(vcpu);
return rc;

View File

@ -344,11 +344,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest low address and key protection are not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying from @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to guest memory.
*/
@ -367,11 +368,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest key protection is not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying to @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to kernel space.
*/

View File

@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
cond_resched();
}
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
long inc = 0;
while (nr) {
inc = min(256L, nr);
nr -= inc;
inc = __cmm_free_pages(inc, counter, list);
if (inc)
break;
cond_resched();
}
return nr + inc;
}
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{

View File

@ -9,6 +9,8 @@
#include <asm/unwind_hints.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/cpufeatures.h>
#include <asm/nospec-branch.h>
.pushsection .noinstr.text, "ax"
@ -17,6 +19,9 @@ SYM_FUNC_START(entry_ibpb)
movl $PRED_CMD_IBPB, %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
SYM_FUNC_END(entry_ibpb)
/* For KVM */

View File

@ -939,6 +939,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
/* Now ready to switch the cr3 */
SWITCH_TO_USER_CR3 scratch_reg=%eax
/* Clobbers ZF */
CLEAR_CPU_BUFFERS
/*
* Restore all flags except IF. (We restore IF separately because
@ -949,7 +951,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1
popfl
popl %eax
CLEAR_CPU_BUFFERS
/*
* Return back to the vDSO, which will pop ecx and edx.
@ -1221,7 +1222,6 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */
call exc_nmi
CLEAR_CPU_BUFFERS
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
@ -1242,6 +1242,7 @@ SYM_CODE_START(asm_exc_nmi)
CHECK_AND_APPLY_ESPFIX
RESTORE_ALL_NMI cr3_reg=%edi pop=4
CLEAR_CPU_BUFFERS
jmp .Lirq_return
#ifdef CONFIG_X86_ESPFIX32
@ -1283,6 +1284,7 @@ SYM_CODE_START(asm_exc_nmi)
* 1 - orig_ax
*/
lss (1+5+6)*4(%esp), %esp # back to espfix stack
CLEAR_CPU_BUFFERS
jmp .Lirq_return
#endif
SYM_CODE_END(asm_exc_nmi)

View File

@ -1586,6 +1586,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
* see comment in intel_pt_interrupt().
*/
WRITE_ONCE(pt->handle_nmi, 0);
barrier();
pt_config_stop(event);
@ -1637,11 +1638,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
return 0;
/*
* Here, handle_nmi tells us if the tracing is on
* There is no PT interrupt in this mode, so stop the trace and it will
* remain stopped while the buffer is copied.
*/
if (READ_ONCE(pt->handle_nmi))
pt_config_stop(event);
pt_config_stop(event);
pt_read_offset(buf);
pt_update_head(pt);
@ -1653,11 +1653,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
ret = perf_output_copy_aux(&pt->handle, handle, from, to);
/*
* If the tracing was on when we turned up, restart it.
* Compiler barrier not needed as we couldn't have been
* preempted by anything that touches pt->handle_nmi.
* Here, handle_nmi tells us if the tracing was on.
* If the tracing was on, restart it.
*/
if (pt->handle_nmi)
if (READ_ONCE(pt->handle_nmi))
pt_config_start(event);
return ret;

View File

@ -217,7 +217,7 @@
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
@ -324,6 +324,7 @@
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* "" IBPB clears return address predictor */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@ -455,4 +456,6 @@
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -85,7 +85,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned long *args)
{
memcpy(args, &regs->bx, 6 * sizeof(args[0]));
args[0] = regs->bx;
args[1] = regs->cx;
args[2] = regs->dx;
args[3] = regs->si;
args[4] = regs->di;
args[5] = regs->bp;
}
static inline void syscall_set_arguments(struct task_struct *task,

View File

@ -491,7 +491,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0);
/*
* Setting APIC_LVT_MASKED (above) should be enough to tell
* the hardware that this timer will never fire. But AMD
* erratum 411 and some Intel CPU behavior circa 2024 say
* otherwise. Time for belt and suspenders programming: mask
* the timer _and_ zero the counter registers:
*/
if (v & APIC_LVT_TIMER_TSCDEADLINE)
wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
else
apic_write(APIC_TMICT, 0);
return 0;
}

View File

@ -1061,7 +1061,24 @@ static void __init retbleed_select_mitigation(void)
case RETBLEED_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like SRSO has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
mitigate_smt = true;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break;
default:
@ -2453,6 +2470,14 @@ static void __init srso_select_mitigation(void)
if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
srso_mitigation = SRSO_MITIGATION_IBPB;
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like Retbleed has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
}
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
@ -2465,6 +2490,13 @@ static void __init srso_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
}
} else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");

View File

@ -1335,6 +1335,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (vulnerable_to_rfds(ia32_cap))
setup_force_cpu_bug(X86_BUG_RFDS);
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@ -275,6 +275,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
}
if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {

View File

@ -251,7 +251,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
return false;
}
static bool __get_mem_config_intel(struct rdt_resource *r)
static __init bool __get_mem_config_intel(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
@ -285,7 +285,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
return true;
}
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;

View File

@ -194,17 +194,10 @@ static unsigned long
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
{
struct kprobe *kp;
unsigned long faddr;
bool faddr;
kp = get_kprobe((void *)addr);
faddr = ftrace_location(addr);
/*
* Addresses inside the ftrace location are refused by
* arch_check_ftrace_location(). Something went terribly wrong
* if such an address is checked here.
*/
if (WARN_ON(faddr && faddr != addr))
return 0UL;
faddr = ftrace_location(addr) == addr;
/*
* Use the current code if it is not modified by Kprobe
* and it cannot be modified by ftrace.

View File

@ -263,21 +263,17 @@ static void __init probe_page_size_mask(void)
}
}
#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
.family = 6, \
.model = _model, \
}
/*
* INVLPG may not properly flush Global entries
* on these CPUs when PCIDs are enabled.
*/
static const struct x86_cpu_id invlpg_miss_ids[] = {
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, 0),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0),
{}
};

View File

@ -856,7 +856,7 @@ char * __init xen_memory_setup(void)
* to relocating (and even reusing) pages with kernel text or data.
*/
if (xen_is_e820_reserved(__pa_symbol(_text),
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
__pa_symbol(_end) - __pa_symbol(_text))) {
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
BUG();
}

View File

@ -2616,8 +2616,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_queue *in_service_bfqq, *new_bfqq;
/* if a merge has already been setup, then proceed with that first */
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
new_bfqq = bfqq->new_bfqq;
if (new_bfqq) {
while (new_bfqq->new_bfqq)
new_bfqq = new_bfqq->new_bfqq;
return new_bfqq;
}
/*
* Do not perform queue merging if the device is non
@ -2770,10 +2774,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_queue(bfqq);
}
static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
struct bfq_io_cq *bic,
struct bfq_queue *bfqq)
{
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
(unsigned long)new_bfqq->pid);
/* Save weight raising and idle window of the merged queues */
@ -2841,6 +2847,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
new_bfqq->pid = -1;
bfqq->bic = NULL;
bfq_release_process_ref(bfqd, bfqq);
return new_bfqq;
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@ -2876,14 +2884,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
* fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
new_bfqq);
/*
* If we get here, bio will be queued into new_queue,
* so use new_bfqq to decide whether bio and rq can be
* merged.
*/
bfqq = new_bfqq;
while (bfqq != new_bfqq)
bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
/*
* Change also bqfd->bio_bfqq, as
@ -5437,6 +5439,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
bool waiting, idle_timer_disabled = false;
if (new_bfqq) {
struct bfq_queue *old_bfqq = bfqq;
/*
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
@ -5452,18 +5455,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
* then complete the merge and redirect it to
* new_bfqq.
*/
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
bfqq, new_bfqq);
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) {
while (bfqq != new_bfqq)
bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
}
bfq_clear_bfqq_just_created(bfqq);
bfq_clear_bfqq_just_created(old_bfqq);
/*
* rq is about to be enqueued into new_bfqq,
* release rq reference on bfqq
*/
bfq_put_queue(bfqq);
bfq_put_queue(old_bfqq);
rq->elv.priv[1] = new_bfqq;
bfqq = new_bfqq;
}
bfq_update_io_thinktime(bfqd, bfqq);
@ -6004,7 +6007,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
if (bfqq_process_refs(bfqq) == 1) {
if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
bfqq->pid = current->pid;
bfq_clear_bfqq_coop(bfqq);
bfq_clear_bfqq_split_coop(bfqq);
@ -6189,7 +6192,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
* addition, if the queue has also just been split, we have to
* resume its state.
*/
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic;
if (split) {
/*

View File

@ -2022,7 +2022,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
struct ioc_now *now)
{
struct ioc_gq *iocg;
u64 dur, usage_pct, nr_cycles;
u64 dur, usage_pct, nr_cycles, nr_cycles_shift;
/* if no debtor, reset the cycle */
if (!nr_debtors) {
@ -2084,10 +2084,12 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
old_debt = iocg->abs_vdebt;
old_delay = iocg->delay;
nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1);
if (iocg->abs_vdebt)
iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1;
if (iocg->delay)
iocg->delay = iocg->delay >> nr_cycles ?: 1;
iocg->delay = iocg->delay >> nr_cycles_shift ?: 1;
iocg_kick_waitq(iocg, true, now);

View File

@ -225,8 +225,8 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
data->got_token = true;
smp_wmb();
list_del_init(&curr->entry);
wake_up_process(data->task);
list_del_init_careful(&curr->entry);
return 1;
}

View File

@ -691,9 +691,11 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
part = add_partition(disk, p, from, size, state->parts[p].flags,
&state->parts[p].info);
if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
printk(KERN_ERR " %s: p%d could not be added: %ld\n",
disk->disk_name, p, -PTR_ERR(part));
if (IS_ERR(part)) {
if (PTR_ERR(part) != -ENXIO) {
printk(KERN_ERR " %s: p%d could not be added: %pe\n",
disk->disk_name, p, part);
}
return true;
}

View File

@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
elements =
ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS *
sizeof(union acpi_object));
if (!elements)
return (AE_NO_MEMORY);
this = string;
for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) {

View File

@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
if (info->connection_node) {
second_desc = info->connection_node->object;
if (second_desc == NULL) {
break;
}
if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
status =
acpi_ds_get_buffer_arguments(second_desc);

View File

@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state);
static void acpi_ps_free_field_list(union acpi_parse_object *start);
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_next_package_length
@ -683,6 +685,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
return_PTR(field);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_free_field_list
*
* PARAMETERS: start - First Op in field list
*
* RETURN: None.
*
* DESCRIPTION: Free all Op objects inside a field list.
*
******************************************************************************/
static void acpi_ps_free_field_list(union acpi_parse_object *start)
{
union acpi_parse_object *cur = start;
union acpi_parse_object *next;
union acpi_parse_object *arg;
while (cur) {
next = cur->common.next;
/* AML_INT_CONNECTION_OP can have a single argument */
arg = acpi_ps_get_arg(cur, 0);
if (arg) {
acpi_ps_free_op(arg);
}
acpi_ps_free_op(cur);
cur = next;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_next_arg
@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
while (parser_state->aml < parser_state->pkg_end) {
field = acpi_ps_get_next_field(parser_state);
if (!field) {
if (arg) {
acpi_ps_free_field_list(arg);
}
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_NOT_METHOD_CALL);
if (ACPI_FAILURE(status)) {
acpi_ps_free_op(arg);
return_ACPI_STATUS(status);
}
} else {
/* Single complex argument, nothing returned */
@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_POSSIBLE_METHOD_CALL);
if (ACPI_FAILURE(status)) {
acpi_ps_free_op(arg);
return_ACPI_STATUS(status);
}
if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {

View File

@ -695,27 +695,34 @@ static LIST_HEAD(acpi_battery_list);
static LIST_HEAD(battery_hook_list);
static DEFINE_MUTEX(hook_mutex);
static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
{
struct acpi_battery *battery;
/*
* In order to remove a hook, we first need to
* de-register all the batteries that are registered.
*/
if (lock)
mutex_lock(&hook_mutex);
list_for_each_entry(battery, &acpi_battery_list, list) {
hook->remove_battery(battery->bat);
}
list_del(&hook->list);
if (lock)
mutex_unlock(&hook_mutex);
list_del_init(&hook->list);
pr_info("extension unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
{
__battery_hook_unregister(hook, 1);
mutex_lock(&hook_mutex);
/*
* Ignore already unregistered battery hooks. This might happen
* if a battery hook was previously unloaded due to an error when
* adding a new battery.
*/
if (!list_empty(&hook->list))
battery_hook_unregister_unlocked(hook);
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_unregister);
@ -724,7 +731,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
struct acpi_battery *battery;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&hook->list);
list_add(&hook->list, &battery_hook_list);
/*
* Now that the driver is registered, we need
@ -741,7 +747,7 @@ void battery_hook_register(struct acpi_battery_hook *hook)
* hooks.
*/
pr_err("extension failed to load: %s", hook->name);
__battery_hook_unregister(hook, 0);
battery_hook_unregister_unlocked(hook);
goto end;
}
}
@ -778,7 +784,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
*/
pr_err("error in extension, unloading: %s",
hook_node->name);
__battery_hook_unregister(hook_node, 0);
battery_hook_unregister_unlocked(hook_node);
}
}
mutex_unlock(&hook_mutex);
@ -811,7 +817,7 @@ static void __exit battery_hook_exit(void)
* need to remove the hooks.
*/
list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
__battery_hook_unregister(hook, 1);
battery_hook_unregister(hook);
}
mutex_destroy(&hook_mutex);
}

View File

@ -533,8 +533,9 @@ int acpi_device_setup_files(struct acpi_device *dev)
* If device has _STR, 'description' file is created
*/
if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR",
NULL, &buffer);
status = acpi_evaluate_object_typed(dev->handle, "_STR",
NULL, &buffer,
ACPI_TYPE_BUFFER);
if (ACPI_FAILURE(status))
buffer.pointer = NULL;
dev->pnp.str_obj = buffer.pointer;

View File

@ -786,6 +786,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
unsigned long tmp;
int ret = 0;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
@ -822,8 +825,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
return -EINVAL;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->mutex);
if (ec->global_lock) {
@ -850,7 +851,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
.wdata = NULL, .rdata = &d,
.wlen = 0, .rlen = 1};
return acpi_ec_transaction(ec, &t);
return acpi_ec_transaction_unlocked(ec, &t);
}
static int acpi_ec_burst_disable(struct acpi_ec *ec)
@ -860,7 +861,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
.wlen = 0, .rlen = 0};
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
acpi_ec_transaction(ec, &t) : 0;
acpi_ec_transaction_unlocked(ec, &t) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
@ -876,6 +877,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
return result;
}
static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
{
int result;
u8 d;
struct transaction t = {.command = ACPI_EC_COMMAND_READ,
.wdata = &address, .rdata = &d,
.wlen = 1, .rlen = 1};
result = acpi_ec_transaction_unlocked(ec, &t);
*data = d;
return result;
}
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
@ -886,6 +900,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
return acpi_ec_transaction(ec, &t);
}
static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
.wdata = wdata, .rdata = NULL,
.wlen = 2, .rlen = 0};
return acpi_ec_transaction_unlocked(ec, &t);
}
int ec_read(u8 addr, u8 *val)
{
int err;
@ -1306,6 +1330,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
struct acpi_ec *ec = handler_context;
int result = 0, i, bytes = bits / 8;
u8 *value = (u8 *)value64;
u32 glk;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
@ -1313,13 +1338,25 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
mutex_lock(&ec->mutex);
if (ec->global_lock) {
acpi_status status;
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
if (ACPI_FAILURE(status)) {
result = -ENODEV;
goto unlock;
}
}
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
for (i = 0; i < bytes; ++i, ++address, ++value) {
result = (function == ACPI_READ) ?
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
acpi_ec_read_unlocked(ec, address, value) :
acpi_ec_write_unlocked(ec, address, *value);
if (result < 0)
break;
}
@ -1327,6 +1364,12 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
mutex_unlock(&ec->mutex);
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;

View File

@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
struct tps68470_pmic_opregion *opregion;
acpi_status status;
if (!dev || !tps68470_regmap) {
dev_warn(dev, "dev or regmap is NULL\n");
return -EINVAL;
}
if (!tps68470_regmap)
return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) {
dev_warn(dev, "acpi handle is NULL\n");

View File

@ -442,6 +442,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
},
},
{
/* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"),
},
},
{
/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
.matches = {
@ -455,6 +462,12 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
},
},
{
/* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
},
},
{
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
.matches = {
@ -481,6 +494,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
},
},
{
/* Asus ExpertBook B2502CVA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"),
},
},
{
/* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
.matches = {

View File

@ -1283,6 +1283,10 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
if (ref->freeze)
binder_dequeue_work(ref->proc, &ref->freeze->work);
binder_stats_deleted(BINDER_STAT_REF);
}
@ -3832,7 +3836,6 @@ binder_request_freeze_notification(struct binder_proc *proc,
{
struct binder_ref_freeze *freeze;
struct binder_ref *ref;
bool is_frozen;
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
if (!freeze)
@ -3848,31 +3851,30 @@ binder_request_freeze_notification(struct binder_proc *proc,
}
binder_node_lock(ref->node);
if (ref->freeze || !ref->node->proc) {
binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
proc->pid, thread->pid,
ref->freeze ? "already set" : "dead node");
if (ref->freeze) {
binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
kfree(freeze);
return -EINVAL;
}
binder_inner_proc_lock(ref->node->proc);
is_frozen = ref->node->proc->is_frozen;
binder_inner_proc_unlock(ref->node->proc);
INIT_LIST_HEAD(&freeze->work.entry);
freeze->cookie = handle_cookie->cookie;
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
freeze->is_frozen = is_frozen;
ref->freeze = freeze;
binder_inner_proc_lock(proc);
binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
if (ref->node->proc) {
binder_inner_proc_lock(ref->node->proc);
freeze->is_frozen = ref->node->proc->is_frozen;
binder_inner_proc_unlock(ref->node->proc);
binder_inner_proc_lock(proc);
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
}
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
@ -5145,6 +5147,15 @@ static void binder_release_work(struct binder_proc *proc,
} break;
case BINDER_WORK_NODE:
break;
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
struct binder_ref_freeze *freeze;
freeze = container_of(w, struct binder_ref_freeze, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered freeze notification, %016llx\n",
(u64)freeze->cookie);
kfree(freeze);
} break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
@ -5558,6 +5569,7 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
{
struct binder_node *prev = NULL;
struct rb_node *n;
struct binder_ref *ref;
@ -5566,7 +5578,10 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
struct binder_node *node;
node = rb_entry(n, struct binder_node, rb_node);
binder_inc_node_tmpref_ilocked(node);
binder_inner_proc_unlock(proc);
if (prev)
binder_put_node(prev);
binder_node_lock(node);
hlist_for_each_entry(ref, &node->refs, node_entry) {
/*
@ -5592,10 +5607,15 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
}
binder_inner_proc_unlock(ref->proc);
}
prev = node;
binder_node_unlock(node);
binder_inner_proc_lock(proc);
if (proc->is_dead)
break;
}
binder_inner_proc_unlock(proc);
if (prev)
binder_put_node(prev);
}
static int binder_ioctl_freeze(struct binder_freeze_info *info,
@ -6258,6 +6278,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
binder_release_work(proc, &proc_wrapper(proc)->delivered_freeze);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@ -6391,6 +6412,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
case BINDER_WORK_FROZEN_BINDER:
seq_printf(m, "%shas frozen binder\n", prefix);
break;
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
seq_printf(m, "%shas cleared freeze notification\n", prefix);
break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
@ -6540,6 +6567,10 @@ static void print_binder_proc(struct seq_file *m,
seq_puts(m, " has delivered dead binder\n");
break;
}
list_for_each_entry(w, &proc_wrapper(proc)->delivered_freeze, entry) {
seq_puts(m, " has delivered freeze binder\n");
break;
}
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;

View File

@ -566,3 +566,4 @@ EXPORT_SYMBOL_GPL(GKI_struct_gic_chip_data);
#include <linux/swap_slots.h>
const struct swap_slots_cache *GKI_struct_swap_slots_cache;
EXPORT_SYMBOL_GPL(GKI_struct_swap_slots_cache);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_io_statistics);

View File

@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
static const struct sil_drivelist {
const char *product;
unsigned int quirk;
} sil_blacklist [] = {
} sil_quirks[] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
@ -601,8 +601,8 @@ static void sil_thaw(struct ata_port *ap)
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*
* 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
* The Maxtor quirk is in the blacklist, but I'm keeping the original
* 20040111 - Seagate drives affected by the Mod15Write bug are quirked
* The Maxtor quirk is in sil_quirks, but I'm keeping the original
* pessimistic fix for the following reasons...
* - There seems to be less info on it, only one device gleaned off the
* Windows driver, maybe only one is affected. More info would be greatly
@ -621,9 +621,9 @@ static void sil_dev_config(struct ata_device *dev)
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (n = 0; sil_blacklist[n].product; n++)
if (!strcmp(sil_blacklist[n].product, model_num)) {
quirks = sil_blacklist[n].quirk;
for (n = 0; sil_quirks[n].product; n++)
if (!strcmp(sil_quirks[n].product, model_num)) {
quirks = sil_quirks[n].quirk;
break;
}

View File

@ -104,7 +104,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
/* return -EIO for reading a bus attribute without show() */
ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
@ -116,7 +117,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
/* return -EIO for writing a bus attribute without store() */
ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);

View File

@ -787,6 +787,26 @@ static void fw_abort_batch_reqs(struct firmware *fw)
mutex_unlock(&fw_lock);
}
/*
* Reject firmware file names with ".." path components.
* There are drivers that construct firmware file names from device-supplied
* strings, and we don't want some device to be able to tell us "I would like to
* be sent my firmware from ../../../etc/shadow, please".
*
* Search for ".." surrounded by either '/' or start/end of string.
*
* This intentionally only looks at the firmware name, not at the firmware base
* directory or at symlink contents.
*/
static bool name_contains_dotdot(const char *name)
{
size_t name_len = strlen(name);
return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
strstr(name, "/../") != NULL ||
(name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
}
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@ -805,6 +825,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
if (name_contains_dotdot(name)) {
dev_warn(device,
"Firmware load for '%s' refused, path contains '..' component\n",
name);
ret = -EINVAL;
goto out;
}
ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags);
if (ret <= 0) /* error or already assigned */
@ -860,6 +888,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
* It must not contain any ".." path components - "foo/bar..bin" is
* allowed, but "foo/../bar.bin" is not.
*
* Caller must hold the reference count of @device.
*

View File

@ -362,6 +362,7 @@ ata_rw_frameinit(struct frame *f)
}
ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
}
@ -402,6 +403,8 @@ aoecmd_ata_rw(struct aoedev *d)
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
} else {
dev_put(f->t->ifp->nd);
}
return 1;
}
@ -484,10 +487,13 @@ resend(struct aoedev *d, struct frame *f)
memcpy(h->dst, t->addr, sizeof h->dst);
memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
if (skb == NULL) {
dev_put(t->ifp->nd);
return;
}
f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
@ -618,6 +624,8 @@ probe(struct aoetgt *t)
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
} else {
dev_put(f->t->ifp->nd);
}
}
@ -1403,6 +1411,7 @@ aoecmd_ata_id(struct aoedev *d)
ah->cmdstat = ATA_CMD_ID_ATA;
ah->lba3 = 0xa0;
dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
d->rttavg = RTTAVG_INIT;
@ -1412,6 +1421,8 @@ aoecmd_ata_id(struct aoedev *d)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb)
f->sent = ktime_get();
else
dev_put(t->ifp->nd);
return skb;
}

View File

@ -3429,10 +3429,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
}
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];

View File

@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
ns.disk == D_OUTDATED)
rv = SS_CONNECTED_OUTDATES;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG;

View File

@ -105,7 +105,7 @@ static int btmrvl_sdio_probe_of(struct device *dev,
} else {
ret = devm_request_irq(dev, cfg->irq_bt,
btmrvl_wake_irq_bt,
0, "bt_wake", card);
IRQF_NO_AUTOEN, "bt_wake", card);
if (ret) {
dev_err(dev,
"Failed to request irq_bt %d (%d)\n",
@ -114,7 +114,6 @@ static int btmrvl_sdio_probe_of(struct device *dev,
/* Configure wakeup (enabled by default) */
device_init_wakeup(dev, true);
disable_irq(cfg->irq_bt);
}
}

View File

@ -928,7 +928,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
/* Fake CSR devices don't seem to support sort-transter */
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
else
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {

View File

@ -84,6 +84,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
return -ENODEV;
}
map = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(map)) {
dev_err(dev,
"could not find Integrator/AP system controller\n");

View File

@ -679,6 +679,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
/* wait for Cryptocell reset completion */
if (!cctrng_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
clk_disable_unprepare(drvdata->clk);
return -EBUSY;
}

View File

@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
devm_pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n");

View File

@ -48,6 +48,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret)
ret = tpm2_commit_space(chip, space, buf, &len);
else
tpm2_flush_space(chip);
out_rc:
return ret ? ret : len;

View File

@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
struct tpm_space *space = &chip->work_space;
int i;
if (!space)
return;
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context(chip, space->context_tbl[i]);

View File

@ -2049,25 +2049,27 @@ static int virtcons_probe(struct virtio_device *vdev)
multiport = true;
}
err = init_vqs(portdev);
if (err < 0) {
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
goto free_chrdev;
}
spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports);
INIT_LIST_HEAD(&portdev->list);
virtio_device_ready(portdev->vdev);
INIT_WORK(&portdev->config_work, &config_work_handler);
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
}
err = init_vqs(portdev);
if (err < 0) {
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
goto free_chrdev;
}
virtio_device_ready(portdev->vdev);
if (multiport) {
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
if (err < 0) {
dev_err(&vdev->dev,

View File

@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np)
goto err_free_ilp;
}
ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
ilp->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(ilp->regmap)) {
err = PTR_ERR(ilp->regmap);
goto err_free_ilp;

View File

@ -498,9 +498,9 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);

View File

@ -579,8 +579,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);

View File

@ -665,6 +665,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = {
.hw = &disp_cc_mdss_dp_link1_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@ -700,6 +701,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
.hw = &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@ -825,6 +827,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
.hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},

View File

@ -3229,7 +3229,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
.pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_1_gdsc = {
@ -3237,7 +3237,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
.pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_2_gdsc = {
@ -3245,7 +3245,7 @@ static struct gdsc pcie_2_gdsc = {
.pd = {
.name = "pcie_2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
.pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_card_gdsc = {

View File

@ -408,7 +408,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),

View File

@ -442,12 +442,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
struct clk *clk = NULL;
struct clk *clk;
unsigned int idx;
unsigned long flags;
for (idx = 0; idx < nr_clk; idx++, list++) {
flags = list->flags;
clk = NULL;
/* catch simple muxes */
switch (list->branch_type) {

View File

@ -258,6 +258,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
}
clk = of_clk_get_from_provider(&clkspec);
of_node_put(clkspec.np);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);

View File

@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np)
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
iounmap(cpu0_base);
pr_err("Unknown frequency\n");
return -EINVAL;
}
@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
return msm_timer_init(freq, 32, irq, !!percpu_offset);
ret = msm_timer_init(freq, 32, irq, !!percpu_offset);
if (ret)
iounmap(cpu0_base);
return ret;
}
TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);

View File

@ -53,6 +53,9 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_shift;
unsigned long rev_offset;
bool multi_regulator;
/* Backward compatibility hack: Might have missing syscon */
#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
u8 quirks;
};
struct ti_cpufreq_data {
@ -156,6 +159,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
.efuse_mask = BIT(3),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
.quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@ -183,6 +187,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.efuse_mask = BIT(9),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = true,
.quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@ -197,6 +202,7 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.efuse_mask = 0,
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
.quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
@ -216,7 +222,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
if (ret == -EIO) {
if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->efuse_offset, 4);
@ -257,7 +263,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
if (ret == -EIO) {
if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->rev_offset, 4);

View File

@ -1129,6 +1129,8 @@ void sev_pci_init(void)
return;
err:
sev_dev_destroy(psp_master);
psp_master->sev_data = NULL;
}

View File

@ -115,7 +115,7 @@ void efi_retrieve_tpm2_eventlog(void)
}
/* Allocate space for the logs and copy them. */
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {

View File

@ -25,12 +25,6 @@
#define MSG_RING BIT(1)
#define TAG_SZ 32
static inline struct tegra_bpmp *
mbox_client_to_bpmp(struct mbox_client *client)
{
return container_of(client, struct tegra_bpmp, mbox.client);
}
static inline const struct tegra_bpmp_ops *
channel_to_ops(struct tegra_bpmp_channel *channel)
{

View File

@ -404,6 +404,8 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
gpio->dcache[GPIO_BANK(offset)] = reg;
iowrite32(reg, addr);
/* Flush write */
ioread32(addr);
}
static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
@ -1157,7 +1159,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (!gpio_id)
return -EINVAL;
gpio->clk = of_clk_get(pdev->dev.of_node, 0);
gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(gpio->clk)) {
dev_warn(&pdev->dev,
"Failed to get clock from devicetree, debouncing disabled\n");

View File

@ -293,7 +293,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
* serve as EDMA event triggers.
*/
static void gpio_irq_disable(struct irq_data *d)
static void gpio_irq_mask(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d);
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
@ -302,7 +302,7 @@ static void gpio_irq_disable(struct irq_data *d)
writel_relaxed(mask, &g->clr_rising);
}
static void gpio_irq_enable(struct irq_data *d)
static void gpio_irq_unmask(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d);
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
@ -328,8 +328,8 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_unmask = gpio_irq_unmask,
.irq_mask = gpio_irq_mask,
.irq_set_type = gpio_irq_type,
.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};

View File

@ -1110,15 +1110,18 @@ static long linereq_set_config_unlocked(struct linereq *lr,
for (i = 0; i < lr->num_lines; i++) {
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(lc, i);
/*
* Lines not explicitly reconfigured as input or output
* are left unchanged.
*/
if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
continue;
polarity_change =
(!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
@ -1126,7 +1129,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
} else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;

View File

@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/nospec.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/device.h>
@ -147,7 +148,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
return &gdev->descs[hwnum];
return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)];
}
EXPORT_SYMBOL_GPL(gpiochip_get_desc);

View File

@ -2098,23 +2098,29 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
struct edid *edid;
int edid_size =
max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
edid = kmalloc(edid_size, GFP_KERNEL);
if (edid) {
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
int edid_size;
if (fake_edid_record->ucFakeEDIDLength == 128)
edid_size = fake_edid_record->ucFakeEDIDLength;
else
edid_size = fake_edid_record->ucFakeEDIDLength * 128;
edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0],
edid_size, GFP_KERNEL);
if (edid) {
if (drm_edid_is_valid(edid)) {
adev->mode_info.bios_hardcoded_edid = edid;
adev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
} else {
kfree(edid);
}
}
record += struct_size(fake_edid_record,
ucFakeEDIDString,
edid_size);
} else {
/* empty fake edid record must be 3 bytes long */
record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
record += fake_edid_record->ucFakeEDIDLength ?
fake_edid_record->ucFakeEDIDLength + 2 :
sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;

View File

@ -1250,6 +1250,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
/* https://bbs.openkylin.top/t/topic/171497 */
{ 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
/* HP 705G4 DM with R5 2400G */
{ 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
{ 0, 0, 0, 0, 0 },
};

View File

@ -201,7 +201,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)

Some files were not shown because too many files have changed in this diff Show More