Merge keystone/android12-5.10-keystone-qcom-release.81+ (4a857f8
) into msm-5.10
* refs/heads/tmp-4a857f8: Revert "ANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree" FROMGIT: iommu/iova: Improve 32-bit free space estimate UPSTREAM: arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting UPSTREAM: arm64: Use the clearbhb instruction in mitigations UPSTREAM: KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated UPSTREAM: arm64: Mitigate spectre style branch history side channels UPSTREAM: arm64: Do not include __READ_ONCE() block in assembly files UPSTREAM: KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A UPSTREAM: arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2 UPSTREAM: arm64: Add percpu vectors for EL1 UPSTREAM: arm64: entry: Add macro for reading symbol addresses from the trampoline UPSTREAM: arm64: entry: Add vectors that have the bhb mitigation sequences UPSTREAM: arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations UPSTREAM: arm64: entry: Allow the trampoline text to occupy multiple pages UPSTREAM: arm64: entry: Make the kpti trampoline's kpti sequence optional UPSTREAM: arm64: entry: Move trampoline macros out of ifdef'd section UPSTREAM: arm64: entry: Don't assume tramp_vectors is the start of the vectors UPSTREAM: arm64: entry: Allow tramp_alias to access symbols after the 4K boundary UPSTREAM: arm64: entry: Move the trampoline data page before the text page UPSTREAM: arm64: entry: Free up another register on kpti's tramp_exit path UPSTREAM: arm64: entry: Make the trampoline cleanup optional UPSTREAM: arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit UPSTREAM: arm64: entry.S: Add ventry overflow sanity checks UPSTREAM: arm64: cpufeature: add HWCAP for FEAT_RPRES UPSTREAM: arm64: cpufeature: add HWCAP for FEAT_AFP UPSTREAM: arm64: add ID_AA64ISAR2_EL1 sys register UPSTREAM: arm64: Add HWCAP for self-synchronising virtual counter UPSTREAM: arm64: Add Cortex-X2 CPU part definition UPSTREAM: arm64: cputype: Add CPU implementor & types for the Apple M1 cores UPSTREAM: usb: gadget: clear related members when goto fail UPSTREAM: usb: gadget: don't release an existing dev->buf UPSTREAM: sr9700: sanity check for packet length UPSTREAM: io_uring: return back safer resurrect UPSTREAM: sctp: use call_rcu to free endpoint Change-Id: If25f62e772c45d25c4df797b06a901825d42b361 Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
commit
149406272b
@ -235,7 +235,15 @@ infrastructure:
|
||||
| DPB | [3-0] | y |
|
||||
+------------------------------+---------+---------+
|
||||
|
||||
6) ID_AA64MMFR2_EL1 - Memory model feature register 2
|
||||
6) ID_AA64MMFR0_EL1 - Memory model feature register 0
|
||||
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
+------------------------------+---------+---------+
|
||||
| ECV | [63-60] | y |
|
||||
+------------------------------+---------+---------+
|
||||
|
||||
7) ID_AA64MMFR2_EL1 - Memory model feature register 2
|
||||
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
@ -243,7 +251,7 @@ infrastructure:
|
||||
| AT | [35-32] | y |
|
||||
+------------------------------+---------+---------+
|
||||
|
||||
7) ID_AA64ZFR0_EL1 - SVE feature ID register 0
|
||||
8) ID_AA64ZFR0_EL1 - SVE feature ID register 0
|
||||
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
@ -267,6 +275,23 @@ infrastructure:
|
||||
| SVEVer | [3-0] | y |
|
||||
+------------------------------+---------+---------+
|
||||
|
||||
8) ID_AA64MMFR1_EL1 - Memory model feature register 1
|
||||
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
+------------------------------+---------+---------+
|
||||
| AFP | [47-44] | y |
|
||||
+------------------------------+---------+---------+
|
||||
|
||||
9) ID_AA64ISAR2_EL1 - Instruction set attribute register 2
|
||||
|
||||
+------------------------------+---------+---------+
|
||||
| Name | bits | visible |
|
||||
+------------------------------+---------+---------+
|
||||
| RPRES | [7-4] | y |
|
||||
+------------------------------+---------+---------+
|
||||
|
||||
|
||||
Appendix I: Example
|
||||
-------------------
|
||||
|
||||
|
@ -245,6 +245,18 @@ HWCAP2_MTE
|
||||
Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described
|
||||
by Documentation/arm64/memory-tagging-extension.rst.
|
||||
|
||||
HWCAP2_ECV
|
||||
|
||||
Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
|
||||
|
||||
HWCAP2_AFP
|
||||
|
||||
Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
|
||||
|
||||
HWCAP2_RPRES
|
||||
|
||||
Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
-----------------------
|
||||
|
||||
|
@ -1 +1 @@
|
||||
306d827cfbb9633f5be11438b2fc8922bf4d2b3c
|
||||
64099431c232d4a95f621411747a3972cc1c8061
|
||||
|
@ -1239,6 +1239,15 @@ config UNMAP_KERNEL_AT_EL0
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
bool "Mitigate Spectre style attacks against branch history" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
make use of branch history to influence future speculation.
|
||||
When taking an exception from user-space, a sequence of branches
|
||||
or a firmware call overwrites the branch history.
|
||||
|
||||
config RODATA_FULL_DEFAULT_ENABLED
|
||||
bool "Apply r/o permissions of VM areas also to their linear aliases"
|
||||
default y
|
||||
|
@ -107,6 +107,13 @@
|
||||
hint #20
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Clear Branch History instruction
|
||||
*/
|
||||
.macro clearbhb
|
||||
hint #22
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Speculation barrier
|
||||
*/
|
||||
@ -801,4 +808,50 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
||||
|
||||
#endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
|
||||
|
||||
.macro __mitigate_spectre_bhb_loop tmp
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
alternative_cb spectre_bhb_patch_loop_iter
|
||||
mov \tmp, #32 // Patched to correct the immediate
|
||||
alternative_cb_end
|
||||
.Lspectre_bhb_loop\@:
|
||||
b . + 4
|
||||
subs \tmp, \tmp, #1
|
||||
b.ne .Lspectre_bhb_loop\@
|
||||
sb
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
|
||||
.macro mitigate_spectre_bhb_loop tmp
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
alternative_cb spectre_bhb_patch_loop_mitigation_enable
|
||||
b .L_spectre_bhb_loop_done\@ // Patched to NOP
|
||||
alternative_cb_end
|
||||
__mitigate_spectre_bhb_loop \tmp
|
||||
.L_spectre_bhb_loop_done\@:
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
|
||||
/* Save/restores x0-x3 to the stack */
|
||||
.macro __mitigate_spectre_bhb_fw
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
stp x0, x1, [sp, #-16]!
|
||||
stp x2, x3, [sp, #-16]!
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
|
||||
alternative_cb smccc_patch_fw_mitigation_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
ldp x2, x3, [sp], #16
|
||||
ldp x0, x1, [sp], #16
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
|
||||
.macro mitigate_spectre_bhb_clear_insn
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
alternative_cb spectre_bhb_patch_clearbhb
|
||||
/* Patched to NOP when not supported */
|
||||
clearbhb
|
||||
isb
|
||||
alternative_cb_end
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
#endif /* __ASM_ASSEMBLER_H */
|
||||
|
@ -50,6 +50,7 @@ struct cpuinfo_arm64 {
|
||||
u64 reg_id_aa64dfr1;
|
||||
u64 reg_id_aa64isar0;
|
||||
u64 reg_id_aa64isar1;
|
||||
u64 reg_id_aa64isar2;
|
||||
u64 reg_id_aa64mmfr0;
|
||||
u64 reg_id_aa64mmfr1;
|
||||
u64 reg_id_aa64mmfr2;
|
||||
|
@ -70,8 +70,9 @@
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
#define ARM64_KVM_PROTECTED_MODE 60
|
||||
#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61
|
||||
#define ARM64_SPECTRE_BHB 62
|
||||
|
||||
/* kabi: reserve 62 - 76 for future cpu capabilities */
|
||||
/* kabi: reserve 63 - 76 for future cpu capabilities */
|
||||
#define ARM64_NCAPS 76
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
@ -613,6 +613,35 @@ static inline bool cpu_supports_mixed_endian_el0(void)
|
||||
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
|
||||
}
|
||||
|
||||
|
||||
static inline bool supports_csv2p3(int scope)
|
||||
{
|
||||
u64 pfr0;
|
||||
u8 csv2_val;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU)
|
||||
pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
|
||||
else
|
||||
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
|
||||
ID_AA64PFR0_CSV2_SHIFT);
|
||||
return csv2_val == 3;
|
||||
}
|
||||
|
||||
static inline bool supports_clearbhb(int scope)
|
||||
{
|
||||
u64 isar2;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU)
|
||||
isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
|
||||
else
|
||||
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
|
||||
|
||||
return cpuid_feature_extract_unsigned_field(isar2,
|
||||
ID_AA64ISAR2_CLEARBHB_SHIFT);
|
||||
}
|
||||
|
||||
const struct cpumask *system_32bit_el0_cpumask(void);
|
||||
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
|
||||
|
||||
|
@ -59,6 +59,7 @@
|
||||
#define ARM_CPU_IMP_NVIDIA 0x4E
|
||||
#define ARM_CPU_IMP_FUJITSU 0x46
|
||||
#define ARM_CPU_IMP_HISI 0x48
|
||||
#define ARM_CPU_IMP_APPLE 0x61
|
||||
|
||||
#define ARM_CPU_PART_AEM_V8 0xD0F
|
||||
#define ARM_CPU_PART_FOUNDATION 0xD00
|
||||
@ -72,9 +73,14 @@
|
||||
#define ARM_CPU_PART_CORTEX_A76 0xD0B
|
||||
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
|
||||
#define ARM_CPU_PART_CORTEX_A77 0xD0D
|
||||
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
|
||||
#define ARM_CPU_PART_CORTEX_A78 0xD41
|
||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_CORTEX_X2 0xD48
|
||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
@ -102,6 +108,9 @@
|
||||
|
||||
#define HISI_CPU_PART_TSV110 0xD01
|
||||
|
||||
#define APPLE_CPU_PART_M1_ICESTORM 0x022
|
||||
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
|
||||
|
||||
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
|
||||
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
|
||||
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
|
||||
@ -112,9 +121,14 @@
|
||||
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
|
||||
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
|
||||
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
|
||||
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
|
||||
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
|
||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
|
||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
@ -133,6 +147,8 @@
|
||||
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
|
||||
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
|
||||
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
|
||||
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
|
||||
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
|
||||
|
||||
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
|
||||
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
|
||||
|
@ -62,9 +62,11 @@ enum fixed_addresses {
|
||||
#endif /* CONFIG_ACPI_APEI_GHES */
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
FIX_ENTRY_TRAMP_TEXT3,
|
||||
FIX_ENTRY_TRAMP_TEXT2,
|
||||
FIX_ENTRY_TRAMP_TEXT1,
|
||||
FIX_ENTRY_TRAMP_DATA,
|
||||
FIX_ENTRY_TRAMP_TEXT,
|
||||
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
|
||||
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
|
@ -105,6 +105,9 @@
|
||||
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
|
||||
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
|
||||
#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
|
||||
#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
|
||||
#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
|
||||
#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
|
@ -64,6 +64,7 @@ enum aarch64_insn_hint_cr_op {
|
||||
AARCH64_INSN_HINT_PSB = 0x11 << 5,
|
||||
AARCH64_INSN_HINT_TSB = 0x12 << 5,
|
||||
AARCH64_INSN_HINT_CSDB = 0x14 << 5,
|
||||
AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
|
||||
|
||||
AARCH64_INSN_HINT_BTI = 0x20 << 5,
|
||||
AARCH64_INSN_HINT_BTIC = 0x22 << 5,
|
||||
|
@ -705,6 +705,11 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
|
||||
}
|
||||
|
||||
static inline bool kvm_system_needs_idmapped_vectors(void)
|
||||
{
|
||||
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
|
||||
}
|
||||
|
||||
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
|
@ -5,7 +5,7 @@
|
||||
#ifndef __ASM_RWONCE_H
|
||||
#define __ASM_RWONCE_H
|
||||
|
||||
#ifdef CONFIG_LTO
|
||||
#if defined(CONFIG_LTO) && !defined(__ASSEMBLY__)
|
||||
|
||||
#include <linux/compiler_types.h>
|
||||
#include <asm/alternative-macros.h>
|
||||
@ -66,7 +66,7 @@
|
||||
})
|
||||
|
||||
#endif /* !BUILD_VDSO */
|
||||
#endif /* CONFIG_LTO */
|
||||
#endif /* CONFIG_LTO && !__ASSEMBLY__ */
|
||||
|
||||
#include <asm-generic/rwonce.h>
|
||||
|
||||
|
@ -22,4 +22,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[];
|
||||
extern char __mmuoff_data_start[], __mmuoff_data_end[];
|
||||
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
|
||||
|
||||
static inline size_t entry_tramp_text_size(void)
|
||||
{
|
||||
return __entry_tramp_text_end - __entry_tramp_text_start;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SECTIONS_H */
|
||||
|
@ -92,6 +92,9 @@ void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
|
||||
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
|
||||
|
||||
enum mitigation_state arm64_get_meltdown_state(void);
|
||||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
||||
u8 spectre_bhb_loop_affected(int scope);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_SPECTRE_H */
|
||||
|
@ -180,6 +180,7 @@
|
||||
|
||||
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
|
||||
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
|
||||
#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
|
||||
|
||||
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
|
||||
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
|
||||
@ -753,6 +754,21 @@
|
||||
#define ID_AA64ISAR1_GPI_NI 0x0
|
||||
#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
|
||||
|
||||
/* id_aa64isar2 */
|
||||
#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
|
||||
#define ID_AA64ISAR2_RPRES_SHIFT 4
|
||||
#define ID_AA64ISAR2_WFXT_SHIFT 0
|
||||
|
||||
#define ID_AA64ISAR2_RPRES_8BIT 0x0
|
||||
#define ID_AA64ISAR2_RPRES_12BIT 0x1
|
||||
/*
|
||||
* Value 0x1 has been removed from the architecture, and is
|
||||
* reserved, but has not yet been removed from the ARM ARM
|
||||
* as of ARM DDI 0487G.b.
|
||||
*/
|
||||
#define ID_AA64ISAR2_WFXT_NI 0x0
|
||||
#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2
|
||||
|
||||
/* id_aa64pfr0 */
|
||||
#define ID_AA64PFR0_CSV3_SHIFT 60
|
||||
#define ID_AA64PFR0_CSV2_SHIFT 56
|
||||
@ -852,6 +868,8 @@
|
||||
#endif
|
||||
|
||||
/* id_aa64mmfr1 */
|
||||
#define ID_AA64MMFR1_ECBHB_SHIFT 60
|
||||
#define ID_AA64MMFR1_AFP_SHIFT 44
|
||||
#define ID_AA64MMFR1_ETS_SHIFT 36
|
||||
#define ID_AA64MMFR1_TWED_SHIFT 32
|
||||
#define ID_AA64MMFR1_XNX_SHIFT 28
|
||||
|
73
arch/arm64/include/asm/vectors.h
Normal file
73
arch/arm64/include/asm/vectors.h
Normal file
@ -0,0 +1,73 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2022 ARM Ltd.
|
||||
*/
|
||||
#ifndef __ASM_VECTORS_H
|
||||
#define __ASM_VECTORS_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
extern char vectors[];
|
||||
extern char tramp_vectors[];
|
||||
extern char __bp_harden_el1_vectors[];
|
||||
|
||||
/*
|
||||
* Note: the order of this enum corresponds to two arrays in entry.S:
|
||||
* tramp_vecs and __bp_harden_el1_vectors. By default the canonical
|
||||
* 'full fat' vectors are used directly.
|
||||
*/
|
||||
enum arm64_bp_harden_el1_vectors {
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
/*
|
||||
* Perform the BHB loop mitigation, before branching to the canonical
|
||||
* vectors.
|
||||
*/
|
||||
EL1_VECTOR_BHB_LOOP,
|
||||
|
||||
/*
|
||||
* Make the SMC call for firmware mitigation, before branching to the
|
||||
* canonical vectors.
|
||||
*/
|
||||
EL1_VECTOR_BHB_FW,
|
||||
|
||||
/*
|
||||
* Use the ClearBHB instruction, before branching to the canonical
|
||||
* vectors.
|
||||
*/
|
||||
EL1_VECTOR_BHB_CLEAR_INSN,
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
|
||||
/*
|
||||
* Remap the kernel before branching to the canonical vectors.
|
||||
*/
|
||||
EL1_VECTOR_KPTI,
|
||||
};
|
||||
|
||||
#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
#define EL1_VECTOR_BHB_LOOP -1
|
||||
#define EL1_VECTOR_BHB_FW -1
|
||||
#define EL1_VECTOR_BHB_CLEAR_INSN -1
|
||||
#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
|
||||
/* The vectors to use on return from EL0. e.g. to remap the kernel */
|
||||
DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
|
||||
|
||||
#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define TRAMP_VALIAS 0
|
||||
#endif
|
||||
|
||||
static inline const char *
|
||||
arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
|
||||
{
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
return (char *)TRAMP_VALIAS + SZ_2K * slot;
|
||||
|
||||
WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
|
||||
|
||||
return __bp_harden_el1_vectors + SZ_2K * slot;
|
||||
}
|
||||
|
||||
#endif /* __ASM_VECTORS_H */
|
@ -75,5 +75,8 @@
|
||||
#define HWCAP2_RNG (1 << 16)
|
||||
#define HWCAP2_BTI (1 << 17)
|
||||
#define HWCAP2_MTE (1 << 18)
|
||||
#define HWCAP2_ECV (1 << 19)
|
||||
#define HWCAP2_AFP (1 << 20)
|
||||
#define HWCAP2_RPRES (1 << 21)
|
||||
|
||||
#endif /* _UAPI__ASM_HWCAP_H */
|
||||
|
@ -270,6 +270,11 @@ struct kvm_vcpu_events {
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
|
||||
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
|
||||
|
||||
/* SVE registers */
|
||||
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
|
||||
|
@ -478,6 +478,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.matches = has_spectre_v4,
|
||||
.cpu_enable = spectre_v4_enable_mitigation,
|
||||
},
|
||||
{
|
||||
.desc = "Spectre-BHB",
|
||||
.capability = ARM64_SPECTRE_BHB,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = is_spectre_bhb_affected,
|
||||
.cpu_enable = spectre_bhb_enable_mitigation,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1418040
|
||||
{
|
||||
.desc = "ARM erratum 1418040",
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/sysfs.h>
|
||||
@ -72,6 +73,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
@ -82,6 +84,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/vectors.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
|
||||
@ -107,6 +110,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
|
||||
bool arm64_use_ng_mappings = false;
|
||||
EXPORT_SYMBOL(arm64_use_ng_mappings);
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
|
||||
|
||||
/*
|
||||
* Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
|
||||
* support it?
|
||||
@ -226,6 +231,12 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
|
||||
@ -280,7 +291,7 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
|
||||
/*
|
||||
@ -326,6 +337,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
|
||||
@ -633,6 +645,7 @@ static const struct __ftr_reg_entry {
|
||||
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
|
||||
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
|
||||
&id_aa64isar1_override),
|
||||
ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
|
||||
|
||||
/* Op1 = 0, CRn = 0, CRm = 7 */
|
||||
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
|
||||
@ -920,6 +933,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
|
||||
@ -1135,6 +1149,8 @@ void update_cpu_features(int cpu,
|
||||
info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
|
||||
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
|
||||
info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
|
||||
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
|
||||
info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
|
||||
|
||||
/*
|
||||
* Differing PARange support is fine as long as all peripherals and
|
||||
@ -1245,6 +1261,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
|
||||
read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
|
||||
|
||||
read_sysreg_case(SYS_CNTFRQ_EL0);
|
||||
read_sysreg_case(SYS_CTR_EL0);
|
||||
@ -1546,6 +1563,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (__this_cpu_read(this_cpu_vector) == vectors) {
|
||||
const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
|
||||
|
||||
__this_cpu_write(this_cpu_vector, v);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't need to rewrite the page-tables if either we've done
|
||||
* it already or we have KASLR enabled and therefore have not
|
||||
@ -2434,6 +2457,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
|
||||
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -94,6 +94,9 @@ static const char *const hwcap_str[] = {
|
||||
[KERNEL_HWCAP_RNG] = "rng",
|
||||
[KERNEL_HWCAP_BTI] = "bti",
|
||||
[KERNEL_HWCAP_MTE] = "mte",
|
||||
[KERNEL_HWCAP_ECV] = "ecv",
|
||||
[KERNEL_HWCAP_AFP] = "afp",
|
||||
[KERNEL_HWCAP_RPRES] = "rpres",
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@ -390,6 +393,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
||||
info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
|
||||
info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
|
||||
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
|
||||
info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
|
||||
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
|
||||
|
@ -62,18 +62,21 @@
|
||||
|
||||
.macro kernel_ventry, el, label, regsize = 64
|
||||
.align 7
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
.Lventry_start\@:
|
||||
.if \el == 0
|
||||
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* This must be the first instruction of the EL0 vector entries. It is
|
||||
* skipped by the trampoline vectors, to trigger the cleanup.
|
||||
*/
|
||||
b .Lskip_tramp_vectors_cleanup\@
|
||||
.if \regsize == 64
|
||||
mrs x30, tpidrro_el0
|
||||
msr tpidrro_el0, xzr
|
||||
.else
|
||||
mov x30, xzr
|
||||
.endif
|
||||
alternative_else_nop_endif
|
||||
.Lskip_tramp_vectors_cleanup\@:
|
||||
.endif
|
||||
#endif
|
||||
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
@ -120,11 +123,15 @@ alternative_else_nop_endif
|
||||
mrs x0, tpidrro_el0
|
||||
#endif
|
||||
b el\()\el\()_\label
|
||||
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
|
||||
.endm
|
||||
|
||||
.macro tramp_alias, dst, sym
|
||||
.macro tramp_alias, dst, sym, tmp
|
||||
mov_q \dst, TRAMP_VALIAS
|
||||
add \dst, \dst, #(\sym - .entry.tramp.text)
|
||||
adr_l \tmp, \sym
|
||||
add \dst, \dst, \tmp
|
||||
adr_l \tmp, .entry.tramp.text
|
||||
sub \dst, \dst, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
@ -141,7 +148,7 @@ alternative_cb_end
|
||||
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
mov w1, #\state
|
||||
alternative_cb spectre_v4_patch_fw_mitigation_conduit
|
||||
alternative_cb smccc_patch_fw_mitigation_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
.L__asm_ssbd_skip\@:
|
||||
@ -450,21 +457,26 @@ alternative_else_nop_endif
|
||||
ldp x24, x25, [sp, #16 * 12]
|
||||
ldp x26, x27, [sp, #16 * 13]
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
|
||||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
eret
|
||||
alternative_else_nop_endif
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
bne 4f
|
||||
msr far_el1, x30
|
||||
tramp_alias x30, tramp_exit_native
|
||||
msr far_el1, x29
|
||||
tramp_alias x30, tramp_exit_native, x29
|
||||
br x30
|
||||
4:
|
||||
tramp_alias x30, tramp_exit_compat
|
||||
tramp_alias x30, tramp_exit_compat, x29
|
||||
br x30
|
||||
#endif
|
||||
.else
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
|
||||
/* Ensure any device/NC reads complete */
|
||||
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
|
||||
|
||||
@ -861,12 +873,6 @@ SYM_CODE_END(ret_to_user)
|
||||
|
||||
.popsection // .entry.text
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* Exception vectors trampoline.
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
|
||||
// Move from tramp_pg_dir to swapper_pg_dir
|
||||
.macro tramp_map_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
@ -900,12 +906,47 @@ alternative_else_nop_endif
|
||||
*/
|
||||
.endm
|
||||
|
||||
.macro tramp_ventry, regsize = 64
|
||||
.macro tramp_data_page dst
|
||||
adr_l \dst, .entry.tramp.text
|
||||
sub \dst, \dst, PAGE_SIZE
|
||||
.endm
|
||||
|
||||
.macro tramp_data_read_var dst, var
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
tramp_data_page \dst
|
||||
add \dst, \dst, #:lo12:__entry_tramp_data_\var
|
||||
ldr \dst, [\dst]
|
||||
#else
|
||||
ldr \dst, =\var
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define BHB_MITIGATION_NONE 0
|
||||
#define BHB_MITIGATION_LOOP 1
|
||||
#define BHB_MITIGATION_FW 2
|
||||
#define BHB_MITIGATION_INSN 3
|
||||
|
||||
.macro tramp_ventry, vector_start, regsize, kpti, bhb
|
||||
.align 7
|
||||
1:
|
||||
.if \regsize == 64
|
||||
msr tpidrro_el0, x30 // Restored in kernel_ventry
|
||||
.endif
|
||||
|
||||
.if \bhb == BHB_MITIGATION_LOOP
|
||||
/*
|
||||
* This sequence must appear before the first indirect branch. i.e. the
|
||||
* ret out of tramp_ventry. It appears here because x30 is free.
|
||||
*/
|
||||
__mitigate_spectre_bhb_loop x30
|
||||
.endif // \bhb == BHB_MITIGATION_LOOP
|
||||
|
||||
.if \bhb == BHB_MITIGATION_INSN
|
||||
clearbhb
|
||||
isb
|
||||
.endif // \bhb == BHB_MITIGATION_INSN
|
||||
|
||||
.if \kpti == 1
|
||||
/*
|
||||
* Defend against branch aliasing attacks by pushing a dummy
|
||||
* entry onto the return stack and using a RET instruction to
|
||||
@ -915,46 +956,75 @@ alternative_else_nop_endif
|
||||
b .
|
||||
2:
|
||||
tramp_map_kernel x30
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x30, tramp_vectors + PAGE_SIZE
|
||||
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
ldr x30, [x30]
|
||||
#else
|
||||
ldr x30, =vectors
|
||||
#endif
|
||||
tramp_data_read_var x30, vectors
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
prfm plil1strm, [x30, #(1b - tramp_vectors)]
|
||||
prfm plil1strm, [x30, #(1b - \vector_start)]
|
||||
alternative_else_nop_endif
|
||||
|
||||
msr vbar_el1, x30
|
||||
add x30, x30, #(1b - tramp_vectors)
|
||||
isb
|
||||
.else
|
||||
ldr x30, =vectors
|
||||
.endif // \kpti == 1
|
||||
|
||||
.if \bhb == BHB_MITIGATION_FW
|
||||
/*
|
||||
* The firmware sequence must appear before the first indirect branch.
|
||||
* i.e. the ret out of tramp_ventry. But it also needs the stack to be
|
||||
* mapped to save/restore the registers the SMC clobbers.
|
||||
*/
|
||||
__mitigate_spectre_bhb_fw
|
||||
.endif // \bhb == BHB_MITIGATION_FW
|
||||
|
||||
add x30, x30, #(1b - \vector_start + 4)
|
||||
ret
|
||||
.org 1b + 128 // Did we overflow the ventry slot?
|
||||
.endm
|
||||
|
||||
.macro tramp_exit, regsize = 64
|
||||
adr x30, tramp_vectors
|
||||
tramp_data_read_var x30, this_cpu_vector
|
||||
this_cpu_offset x29
|
||||
ldr x30, [x30, x29]
|
||||
|
||||
msr vbar_el1, x30
|
||||
tramp_unmap_kernel x30
|
||||
ldr lr, [sp, #S_LR]
|
||||
tramp_unmap_kernel x29
|
||||
.if \regsize == 64
|
||||
mrs x30, far_el1
|
||||
mrs x29, far_el1
|
||||
.endif
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
eret
|
||||
sb
|
||||
.endm
|
||||
|
||||
.align 11
|
||||
SYM_CODE_START_NOALIGN(tramp_vectors)
|
||||
.macro generate_tramp_vector, kpti, bhb
|
||||
.Lvector_start\@:
|
||||
.space 0x400
|
||||
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
|
||||
.endr
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
|
||||
.endr
|
||||
.endm
|
||||
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* Exception vectors trampoline.
|
||||
* The order must match __bp_harden_el1_vectors and the
|
||||
* arm64_bp_harden_el1_vectors enum.
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
.align 11
|
||||
SYM_CODE_START_NOALIGN(tramp_vectors)
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
|
||||
SYM_CODE_END(tramp_vectors)
|
||||
|
||||
SYM_CODE_START(tramp_exit_native)
|
||||
@ -971,12 +1041,56 @@ SYM_CODE_END(tramp_exit_compat)
|
||||
.pushsection ".rodata", "a"
|
||||
.align PAGE_SHIFT
|
||||
SYM_DATA_START(__entry_tramp_data_start)
|
||||
__entry_tramp_data_vectors:
|
||||
.quad vectors
|
||||
#ifdef CONFIG_ARM_SDE_INTERFACE
|
||||
__entry_tramp_data___sdei_asm_handler:
|
||||
.quad __sdei_asm_handler
|
||||
#endif /* CONFIG_ARM_SDE_INTERFACE */
|
||||
__entry_tramp_data_this_cpu_vector:
|
||||
.quad this_cpu_vector
|
||||
SYM_DATA_END(__entry_tramp_data_start)
|
||||
.popsection // .rodata
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
* Exception vectors for spectre mitigations on entry from EL1 when
|
||||
* kpti is not in use.
|
||||
*/
|
||||
.macro generate_el1_vector, bhb
|
||||
.Lvector_start\@:
|
||||
kernel_ventry 1, sync_invalid // Synchronous EL1t
|
||||
kernel_ventry 1, irq_invalid // IRQ EL1t
|
||||
kernel_ventry 1, fiq_invalid // FIQ EL1t
|
||||
kernel_ventry 1, error_invalid // Error EL1t
|
||||
|
||||
kernel_ventry 1, sync // Synchronous EL1h
|
||||
kernel_ventry 1, irq // IRQ EL1h
|
||||
kernel_ventry 1, fiq_invalid // FIQ EL1h
|
||||
kernel_ventry 1, error // Error EL1h
|
||||
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 64, 0, \bhb
|
||||
.endr
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 32, 0, \bhb
|
||||
.endr
|
||||
.endm
|
||||
|
||||
/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
|
||||
.pushsection ".entry.text", "ax"
|
||||
.align 11
|
||||
SYM_CODE_START(__bp_harden_el1_vectors)
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
generate_el1_vector bhb=BHB_MITIGATION_LOOP
|
||||
generate_el1_vector bhb=BHB_MITIGATION_FW
|
||||
generate_el1_vector bhb=BHB_MITIGATION_INSN
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
SYM_CODE_END(__bp_harden_el1_vectors)
|
||||
.popsection
|
||||
|
||||
|
||||
/*
|
||||
* Register switch for AArch64. The callee-saved registers need to be saved
|
||||
* and restored. On entry:
|
||||
@ -1066,13 +1180,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
|
||||
*/
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x4, tramp_vectors + PAGE_SIZE
|
||||
add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
|
||||
ldr x4, [x4]
|
||||
#else
|
||||
ldr x4, =__sdei_asm_handler
|
||||
#endif
|
||||
tramp_data_read_var x4, __sdei_asm_handler
|
||||
br x4
|
||||
SYM_CODE_END(__sdei_asm_entry_trampoline)
|
||||
NOKPROBE(__sdei_asm_entry_trampoline)
|
||||
@ -1095,13 +1203,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline)
|
||||
NOKPROBE(__sdei_asm_exit_trampoline)
|
||||
.ltorg
|
||||
.popsection // .entry.tramp.text
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
.pushsection ".rodata", "a"
|
||||
SYM_DATA_START(__sdei_asm_trampoline_next_handler)
|
||||
.quad __sdei_asm_handler
|
||||
SYM_DATA_END(__sdei_asm_trampoline_next_handler)
|
||||
.popsection // .rodata
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
@ -1209,7 +1310,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
|
||||
alternative_else_nop_endif
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
|
||||
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
|
||||
br x5
|
||||
#endif
|
||||
SYM_CODE_END(__sdei_asm_handler)
|
||||
|
@ -66,6 +66,10 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
|
||||
KVM_NVHE_ALIAS(kvm_update_va_mask);
|
||||
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
|
||||
KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
|
||||
|
||||
/* Global kernel state accessed by nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(kvm_vgic_global_state);
|
||||
|
@ -18,15 +18,18 @@
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/vectors.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/*
|
||||
@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
|
||||
{
|
||||
switch (bhb_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return "";
|
||||
default:
|
||||
case SPECTRE_VULNERABLE:
|
||||
return ", but not BHB";
|
||||
case SPECTRE_MITIGATED:
|
||||
return ", BHB";
|
||||
}
|
||||
}
|
||||
|
||||
static bool _unprivileged_ebpf_enabled(void)
|
||||
{
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
return !sysctl_unprivileged_bpf_disabled;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
|
||||
const char *bhb_str = get_bhb_affected_string(bhb_state);
|
||||
const char *v2_str = "Branch predictor hardening";
|
||||
|
||||
switch (spectre_v2_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return sprintf(buf, "Not affected\n");
|
||||
if (bhb_state == SPECTRE_UNAFFECTED)
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
/*
|
||||
* Platforms affected by Spectre-BHB can't report
|
||||
* "Not affected" for Spectre-v2.
|
||||
*/
|
||||
v2_str = "CSV2";
|
||||
fallthrough;
|
||||
case SPECTRE_MITIGATED:
|
||||
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
|
||||
if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
|
||||
return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
|
||||
|
||||
return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
|
||||
case SPECTRE_VULNERABLE:
|
||||
fallthrough;
|
||||
default:
|
||||
@ -554,9 +594,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
|
||||
* Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
|
||||
* to call into firmware to adjust the mitigation state.
|
||||
*/
|
||||
void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
@ -770,3 +810,344 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Spectre BHB.
|
||||
*
|
||||
* A CPU is either:
|
||||
* - Mitigated by a branchy loop a CPU specific number of times, and listed
|
||||
* in our "loop mitigated list".
|
||||
* - Mitigated in software by the firmware Spectre v2 call.
|
||||
* - Has the ClearBHB instruction to perform the mitigation.
|
||||
* - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
|
||||
* software mitigation in the vectors is needed.
|
||||
* - Has CSV2.3, so is unaffected.
|
||||
*/
|
||||
static enum mitigation_state spectre_bhb_state;
|
||||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void)
|
||||
{
|
||||
return spectre_bhb_state;
|
||||
}
|
||||
|
||||
enum bhb_mitigation_bits {
|
||||
BHB_LOOP,
|
||||
BHB_FW,
|
||||
BHB_HW,
|
||||
BHB_INSN,
|
||||
};
|
||||
static unsigned long system_bhb_mitigations;
|
||||
|
||||
/*
|
||||
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
||||
* SCOPE_SYSTEM call will give the right answer.
|
||||
*/
|
||||
u8 spectre_bhb_loop_affected(int scope)
|
||||
{
|
||||
u8 k = 0;
|
||||
static u8 max_bhb_k;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU) {
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
{},
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
max_bhb_k = max(max_bhb_k, k);
|
||||
} else {
|
||||
k = max_bhb_k;
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3, &res);
|
||||
|
||||
ret = res.a0;
|
||||
switch (ret) {
|
||||
case SMCCC_RET_SUCCESS:
|
||||
return SPECTRE_MITIGATED;
|
||||
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
|
||||
return SPECTRE_UNAFFECTED;
|
||||
default:
|
||||
fallthrough;
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_spectre_bhb_fw_affected(int scope)
|
||||
{
|
||||
static bool system_affected;
|
||||
enum mitigation_state fw_state;
|
||||
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
|
||||
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
{},
|
||||
};
|
||||
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
|
||||
spectre_bhb_firmware_mitigated_list);
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return system_affected;
|
||||
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
|
||||
system_affected = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool supports_ecbhb(int scope)
|
||||
{
|
||||
u64 mmfr1;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU)
|
||||
mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
|
||||
else
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_ECBHB_SHIFT);
|
||||
}
|
||||
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
if (supports_csv2p3(scope))
|
||||
return false;
|
||||
|
||||
if (supports_clearbhb(scope))
|
||||
return true;
|
||||
|
||||
if (spectre_bhb_loop_affected(scope))
|
||||
return true;
|
||||
|
||||
if (is_spectre_bhb_fw_affected(scope))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
{
|
||||
const char *v = arm64_get_bp_hardening_vector(slot);
|
||||
|
||||
if (slot < 0)
|
||||
return;
|
||||
|
||||
__this_cpu_write(this_cpu_vector, v);
|
||||
|
||||
/*
|
||||
* When KPTI is in use, the vectors are switched when exiting to
|
||||
* user-space.
|
||||
*/
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
return;
|
||||
|
||||
write_sysreg(v, vbar_el1);
|
||||
isb();
|
||||
}
|
||||
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
bp_hardening_cb_t cpu_cb;
|
||||
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
|
||||
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
|
||||
|
||||
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
|
||||
return;
|
||||
|
||||
if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
|
||||
/* No point mitigating Spectre-BHB alone. */
|
||||
} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
|
||||
pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
|
||||
} else if (cpu_mitigations_off()) {
|
||||
pr_info_once("spectre-bhb mitigation disabled by command line option\n");
|
||||
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_HW, &system_bhb_mitigations);
|
||||
} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
|
||||
/*
|
||||
* Ensure KVM uses the indirect vector which will have ClearBHB
|
||||
* added.
|
||||
*/
|
||||
if (!data->slot)
|
||||
data->slot = HYP_VECTOR_INDIRECT;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_INSN, &system_bhb_mitigations);
|
||||
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
|
||||
/*
|
||||
* Ensure KVM uses the indirect vector which will have the
|
||||
* branchy-loop added. A57/A72-r0 will already have selected
|
||||
* the spectre-indirect vector, which is sufficient for BHB
|
||||
* too.
|
||||
*/
|
||||
if (!data->slot)
|
||||
data->slot = HYP_VECTOR_INDIRECT;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_LOOP, &system_bhb_mitigations);
|
||||
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (fw_state == SPECTRE_MITIGATED) {
|
||||
/*
|
||||
* Ensure KVM uses one of the spectre bp_hardening
|
||||
* vectors. The indirect vector doesn't include the EL3
|
||||
* call, so needs upgrading to
|
||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||
*/
|
||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||
data->slot += 1;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||
|
||||
/*
|
||||
* The WA3 call in the vectors supersedes the WA1 call
|
||||
* made during context-switch. Uninstall any firmware
|
||||
* bp_hardening callback.
|
||||
*/
|
||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
}
|
||||
|
||||
update_mitigation_state(&spectre_bhb_state, state);
|
||||
}
|
||||
|
||||
/* Patched to NOP when enabled */
|
||||
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
if (test_bit(BHB_LOOP, &system_bhb_mitigations))
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
/* Patched to NOP when enabled */
|
||||
void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
if (test_bit(BHB_FW, &system_bhb_mitigations))
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
/* Patched to correct the immediate */
|
||||
void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
u8 rd;
|
||||
u32 insn;
|
||||
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
|
||||
|
||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
|
||||
return;
|
||||
|
||||
insn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
/* Patched to mov WA3 when supported */
|
||||
void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
u8 rd;
|
||||
u32 insn;
|
||||
|
||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
|
||||
!test_bit(BHB_FW, &system_bhb_mitigations))
|
||||
return;
|
||||
|
||||
insn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||
|
||||
insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
|
||||
AARCH64_INSN_VARIANT_32BIT,
|
||||
AARCH64_INSN_REG_ZR, rd,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3);
|
||||
if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
|
||||
return;
|
||||
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
/* Patched to NOP when not supported */
|
||||
void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 2);
|
||||
|
||||
if (test_bit(BHB_INSN, &system_bhb_mitigations))
|
||||
return;
|
||||
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
|
||||
void unpriv_ebpf_notify(int new_state)
|
||||
{
|
||||
if (spectre_v2_state == SPECTRE_VULNERABLE ||
|
||||
spectre_bhb_state != SPECTRE_MITIGATED)
|
||||
return;
|
||||
|
||||
if (!new_state)
|
||||
pr_err("WARNING: %s", EBPF_WARN);
|
||||
}
|
||||
#endif
|
||||
|
@ -330,7 +330,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
|
||||
<= SZ_4K, "Hibernate exit text too big or misaligned")
|
||||
#endif
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
|
||||
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
|
||||
"Entry trampoline text too big")
|
||||
#endif
|
||||
#ifdef CONFIG_KVM
|
||||
|
@ -1388,10 +1388,7 @@ static int kvm_init_vector_slots(void)
|
||||
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
|
||||
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
|
||||
return 0;
|
||||
|
||||
if (!has_vhe()) {
|
||||
if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
|
||||
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
|
||||
__BP_HARDEN_HYP_VECS_SZ, &base);
|
||||
if (err)
|
||||
|
@ -62,6 +62,10 @@ el1_sync: // Guest trapped into EL2
|
||||
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
|
||||
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2)
|
||||
cbz w1, wa_epilogue
|
||||
|
||||
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3)
|
||||
cbnz w1, el1_trap
|
||||
|
||||
wa_epilogue:
|
||||
@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector)
|
||||
sub sp, sp, #(8 * 4)
|
||||
stp x2, x3, [sp, #(8 * 0)]
|
||||
stp x0, x1, [sp, #(8 * 2)]
|
||||
alternative_cb spectre_bhb_patch_wa3
|
||||
/* Patched to mov WA3 when supported */
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
alternative_cb_end
|
||||
smc #0
|
||||
ldp x2, x3, [sp, #(8 * 0)]
|
||||
add sp, sp, #(8 * 2)
|
||||
@ -205,6 +212,8 @@ SYM_CODE_END(__kvm_hyp_vector)
|
||||
spectrev2_smccc_wa1_smc
|
||||
.else
|
||||
stp x0, x1, [sp, #-16]!
|
||||
mitigate_spectre_bhb_loop x0
|
||||
mitigate_spectre_bhb_clear_insn
|
||||
.endif
|
||||
.if \indirect != 0
|
||||
alternative_cb kvm_patch_vector_branch
|
||||
|
@ -132,8 +132,10 @@ int hyp_map_vectors(void)
|
||||
phys_addr_t phys;
|
||||
void *bp_base;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
|
||||
if (!kvm_system_needs_idmapped_vectors()) {
|
||||
__hyp_bp_vect_base = __bp_harden_hyp_vecs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
phys = __hyp_pa(__bp_harden_hyp_vecs);
|
||||
bp_base = (void *)__pkvm_create_private_mapping(phys,
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
#include <kvm/arm_psci.h>
|
||||
@ -26,6 +27,7 @@
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/vectors.h>
|
||||
|
||||
const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
|
||||
|
||||
@ -71,7 +73,7 @@ NOKPROBE_SYMBOL(__activate_traps);
|
||||
|
||||
static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
extern char vectors[]; /* kernel exception vectors */
|
||||
const char *host_vectors = vectors;
|
||||
|
||||
___deactivate_traps(vcpu);
|
||||
|
||||
@ -85,7 +87,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
|
||||
|
||||
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
|
||||
write_sysreg(vectors, vbar_el1);
|
||||
|
||||
if (!arm64_kernel_unmapped_at_el0())
|
||||
host_vectors = __this_cpu_read(this_cpu_vector);
|
||||
write_sysreg(host_vectors, vbar_el1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(__deactivate_traps);
|
||||
|
||||
|
@ -58,6 +58,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
switch (arm64_get_spectre_bhb_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
break;
|
||||
case SPECTRE_MITIGATED:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case SPECTRE_UNAFFECTED:
|
||||
val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
|
@ -397,7 +397,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 3; /* PSCI version and two workaround registers */
|
||||
return 4; /* PSCI version and three workaround registers */
|
||||
}
|
||||
|
||||
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
@ -411,6 +411,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
|
||||
return -EFAULT;
|
||||
|
||||
if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -450,6 +453,17 @@ static int get_kernel_wa_level(u64 regid)
|
||||
case SPECTRE_VULNERABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
switch (arm64_get_spectre_bhb_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
|
||||
case SPECTRE_MITIGATED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
|
||||
}
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
@ -466,6 +480,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
|
||||
break;
|
||||
default:
|
||||
@ -511,6 +526,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
}
|
||||
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1447,7 +1447,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
/* CRm=6 */
|
||||
ID_SANITISED(ID_AA64ISAR0_EL1),
|
||||
ID_SANITISED(ID_AA64ISAR1_EL1),
|
||||
ID_UNALLOCATED(6,2),
|
||||
ID_SANITISED(ID_AA64ISAR2_EL1),
|
||||
ID_UNALLOCATED(6,3),
|
||||
ID_UNALLOCATED(6,4),
|
||||
ID_UNALLOCATED(6,5),
|
||||
|
@ -593,6 +593,8 @@ early_param("rodata", parse_rodata);
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __init map_entry_trampoline(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
|
||||
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
|
||||
|
||||
@ -601,11 +603,15 @@ static int __init map_entry_trampoline(void)
|
||||
|
||||
/* Map only the text into the trampoline page table */
|
||||
memset(tramp_pg_dir, 0, PGD_SIZE);
|
||||
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
|
||||
prot, __pgd_pgtable_alloc, 0);
|
||||
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
|
||||
entry_tramp_text_size(), prot,
|
||||
__pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
|
||||
|
||||
/* Map both the text and data into the kernel page table */
|
||||
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
|
||||
for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
|
||||
__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
|
||||
pa_start + i * PAGE_SIZE, prot);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern char __entry_tramp_data_start[];
|
||||
|
||||
|
@ -140,10 +140,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
|
||||
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
|
||||
if (free == cached_iova ||
|
||||
(free->pfn_hi < iovad->dma_32bit_pfn &&
|
||||
free->pfn_lo >= cached_iova->pfn_lo)) {
|
||||
free->pfn_lo >= cached_iova->pfn_lo))
|
||||
iovad->cached32_node = rb_next(&free->node);
|
||||
|
||||
if (free->pfn_lo < iovad->dma_32bit_pfn)
|
||||
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
|
||||
}
|
||||
|
||||
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
|
||||
if (free->pfn_lo >= cached_iova->pfn_lo)
|
||||
|
@ -236,7 +236,6 @@ static void set_type(struct bow_context *bc, struct bow_range **br, int type)
|
||||
|
||||
(*br)->type = type;
|
||||
|
||||
mutex_lock(&bc->ranges_lock);
|
||||
if (next->type == type) {
|
||||
if (type == TRIMMED)
|
||||
list_del(&next->trimmed_list);
|
||||
@ -250,7 +249,6 @@ static void set_type(struct bow_context *bc, struct bow_range **br, int type)
|
||||
rb_erase(&(*br)->node, &bc->ranges);
|
||||
kfree(*br);
|
||||
}
|
||||
mutex_unlock(&bc->ranges_lock);
|
||||
|
||||
*br = NULL;
|
||||
}
|
||||
@ -601,7 +599,6 @@ static void dm_bow_dtr(struct dm_target *ti)
|
||||
struct bow_context *bc = (struct bow_context *) ti->private;
|
||||
struct kobject *kobj;
|
||||
|
||||
mutex_lock(&bc->ranges_lock);
|
||||
while (rb_first(&bc->ranges)) {
|
||||
struct bow_range *br = container_of(rb_first(&bc->ranges),
|
||||
struct bow_range, node);
|
||||
@ -609,8 +606,6 @@ static void dm_bow_dtr(struct dm_target *ti)
|
||||
rb_erase(&br->node, &bc->ranges);
|
||||
kfree(br);
|
||||
}
|
||||
mutex_unlock(&bc->ranges_lock);
|
||||
|
||||
if (bc->workqueue)
|
||||
destroy_workqueue(bc->workqueue);
|
||||
if (bc->bufio)
|
||||
@ -1186,7 +1181,6 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&bc->ranges_lock);
|
||||
for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
|
||||
struct bow_range *br = container_of(i, struct bow_range, node);
|
||||
|
||||
@ -1194,11 +1188,11 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
|
||||
readable_type[br->type],
|
||||
(unsigned long long)br->sector);
|
||||
if (result >= end)
|
||||
goto unlock;
|
||||
return;
|
||||
|
||||
result += scnprintf(result, end - result, "\n");
|
||||
if (result >= end)
|
||||
goto unlock;
|
||||
return;
|
||||
|
||||
if (br->type == TRIMMED)
|
||||
++trimmed_range_count;
|
||||
@ -1220,22 +1214,19 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
|
||||
if (!rb_next(i)) {
|
||||
scnprintf(result, end - result,
|
||||
"\nERROR: Last range not of type TOP");
|
||||
goto unlock;
|
||||
return;
|
||||
}
|
||||
|
||||
if (br->sector > range_top(br)) {
|
||||
scnprintf(result, end - result,
|
||||
"\nERROR: sectors out of order");
|
||||
goto unlock;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (trimmed_range_count != trimmed_list_length)
|
||||
scnprintf(result, end - result,
|
||||
"\nERROR: not all trimmed ranges in trimmed list");
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&bc->ranges_lock);
|
||||
}
|
||||
|
||||
static void dm_bow_status(struct dm_target *ti, status_type_t type,
|
||||
|
@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
/* ignore the CRC length */
|
||||
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
|
||||
|
||||
if (len > ETH_FRAME_LEN)
|
||||
if (len > ETH_FRAME_LEN || len > skb->len)
|
||||
return 0;
|
||||
|
||||
/* the last packet of current skb */
|
||||
|
@ -1828,8 +1828,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||
spin_lock_irq (&dev->lock);
|
||||
value = -EINVAL;
|
||||
if (dev->buf) {
|
||||
spin_unlock_irq(&dev->lock);
|
||||
kfree(kbuf);
|
||||
goto fail;
|
||||
return value;
|
||||
}
|
||||
dev->buf = kbuf;
|
||||
|
||||
@ -1876,8 +1877,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||
|
||||
value = usb_gadget_probe_driver(&gadgetfs_driver);
|
||||
if (value != 0) {
|
||||
kfree (dev->buf);
|
||||
dev->buf = NULL;
|
||||
spin_lock_irq(&dev->lock);
|
||||
goto fail;
|
||||
} else {
|
||||
/* at this point "good" hardware has for the first time
|
||||
* let the USB the host see us. alternatively, if users
|
||||
@ -1894,6 +1895,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||
return value;
|
||||
|
||||
fail:
|
||||
dev->config = NULL;
|
||||
dev->hs_config = NULL;
|
||||
dev->dev = NULL;
|
||||
spin_unlock_irq (&dev->lock);
|
||||
pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
|
||||
kfree (dev->buf);
|
||||
|
@ -1009,6 +1009,18 @@ static inline bool __io_match_files(struct io_kiocb *req,
|
||||
req->work.identity->files == files;
|
||||
}
|
||||
|
||||
static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
|
||||
{
|
||||
bool got = percpu_ref_tryget(ref);
|
||||
|
||||
/* already at zero, wait for ->release() */
|
||||
if (!got)
|
||||
wait_for_completion(compl);
|
||||
percpu_ref_resurrect(ref);
|
||||
if (got)
|
||||
percpu_ref_put(ref);
|
||||
}
|
||||
|
||||
static bool io_match_task(struct io_kiocb *head,
|
||||
struct task_struct *task,
|
||||
struct files_struct *files)
|
||||
@ -9756,12 +9768,11 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
||||
if (ret < 0)
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
|
||||
if (ret) {
|
||||
percpu_ref_resurrect(&ctx->refs);
|
||||
goto out_quiesce;
|
||||
io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@ -9854,7 +9865,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
||||
if (io_register_op_must_quiesce(opcode)) {
|
||||
/* bring the ctx back to life */
|
||||
percpu_ref_reinit(&ctx->refs);
|
||||
out_quiesce:
|
||||
reinit_completion(&ctx->ref_comp);
|
||||
}
|
||||
return ret;
|
||||
|
@ -87,6 +87,11 @@
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x7fff)
|
||||
|
||||
#define ARM_SMCCC_ARCH_WORKAROUND_3 \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x3fff)
|
||||
|
||||
#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
|
||||
|
||||
/* Paravirtualised time calls (defined by ARM DEN0057A) */
|
||||
|
@ -103,6 +103,7 @@ extern struct percpu_counter sctp_sockets_allocated;
|
||||
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
|
||||
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
|
||||
|
||||
typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
|
||||
void sctp_transport_walk_start(struct rhashtable_iter *iter);
|
||||
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
|
||||
struct sctp_transport *sctp_transport_get_next(struct net *net,
|
||||
@ -113,9 +114,8 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
struct net *net,
|
||||
const union sctp_addr *laddr,
|
||||
const union sctp_addr *paddr, void *p);
|
||||
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
||||
int (*cb_done)(struct sctp_transport *, void *),
|
||||
struct net *net, int *pos, void *p);
|
||||
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
|
||||
struct net *net, int *pos, void *p);
|
||||
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
|
||||
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
|
||||
struct sctp_info *info);
|
||||
|
@ -1339,6 +1339,7 @@ struct sctp_endpoint {
|
||||
|
||||
u32 secid;
|
||||
u32 peer_secid;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/* Recover the outter endpoint structure. */
|
||||
@ -1354,7 +1355,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
|
||||
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
|
||||
void sctp_endpoint_free(struct sctp_endpoint *);
|
||||
void sctp_endpoint_put(struct sctp_endpoint *);
|
||||
void sctp_endpoint_hold(struct sctp_endpoint *);
|
||||
int sctp_endpoint_hold(struct sctp_endpoint *ep);
|
||||
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
|
||||
struct sctp_association *sctp_endpoint_lookup_assoc(
|
||||
const struct sctp_endpoint *ep,
|
||||
|
@ -292,9 +292,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
struct sk_buff *skb = commp->skb;
|
||||
@ -304,6 +303,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
|
||||
int err = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
if (ep != tsp->asoc->ep)
|
||||
goto release;
|
||||
list_for_each_entry(assoc, &ep->asocs, asocs) {
|
||||
if (cb->args[4] < cb->args[1])
|
||||
goto next;
|
||||
@ -346,9 +347,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
@ -507,8 +507,8 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
|
||||
goto done;
|
||||
|
||||
sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
|
||||
net, &pos, &commp);
|
||||
sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
|
||||
net, &pos, &commp);
|
||||
cb->args[2] = pos;
|
||||
|
||||
done:
|
||||
|
@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
|
||||
}
|
||||
|
||||
/* Final destructor for endpoint. */
|
||||
static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
|
||||
struct sock *sk = ep->base.sk;
|
||||
|
||||
sctp_sk(sk)->ep = NULL;
|
||||
sock_put(sk);
|
||||
|
||||
kfree(ep);
|
||||
SCTP_DBG_OBJCNT_DEC(ep);
|
||||
}
|
||||
|
||||
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
||||
{
|
||||
struct sock *sk;
|
||||
@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
||||
if (sctp_sk(sk)->bind_hash)
|
||||
sctp_put_port(sk);
|
||||
|
||||
sctp_sk(sk)->ep = NULL;
|
||||
/* Give up our hold on the sock */
|
||||
sock_put(sk);
|
||||
|
||||
kfree(ep);
|
||||
SCTP_DBG_OBJCNT_DEC(ep);
|
||||
call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
|
||||
}
|
||||
|
||||
/* Hold a reference to an endpoint. */
|
||||
void sctp_endpoint_hold(struct sctp_endpoint *ep)
|
||||
int sctp_endpoint_hold(struct sctp_endpoint *ep)
|
||||
{
|
||||
refcount_inc(&ep->base.refcnt);
|
||||
return refcount_inc_not_zero(&ep->base.refcnt);
|
||||
}
|
||||
|
||||
/* Release a reference to an endpoint and clean up if there are
|
||||
|
@ -5228,11 +5228,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
|
||||
|
||||
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
||||
int (*cb_done)(struct sctp_transport *, void *),
|
||||
struct net *net, int *pos, void *p) {
|
||||
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
|
||||
struct net *net, int *pos, void *p)
|
||||
{
|
||||
struct rhashtable_iter hti;
|
||||
struct sctp_transport *tsp;
|
||||
struct sctp_endpoint *ep;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
@ -5241,26 +5242,32 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
||||
|
||||
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
|
||||
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
|
||||
ret = cb(tsp, p);
|
||||
if (ret)
|
||||
break;
|
||||
ep = tsp->asoc->ep;
|
||||
if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
|
||||
ret = cb(ep, tsp, p);
|
||||
if (ret)
|
||||
break;
|
||||
sctp_endpoint_put(ep);
|
||||
}
|
||||
(*pos)++;
|
||||
sctp_transport_put(tsp);
|
||||
}
|
||||
sctp_transport_walk_stop(&hti);
|
||||
|
||||
if (ret) {
|
||||
if (cb_done && !cb_done(tsp, p)) {
|
||||
if (cb_done && !cb_done(ep, tsp, p)) {
|
||||
(*pos)++;
|
||||
sctp_endpoint_put(ep);
|
||||
sctp_transport_put(tsp);
|
||||
goto again;
|
||||
}
|
||||
sctp_endpoint_put(ep);
|
||||
sctp_transport_put(tsp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_for_each_transport);
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
|
||||
|
||||
/* 7.2.1 Association Status (SCTP_STATUS)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user