Merge 302754d023
("ARM: include unprivileged BPF status in Spectre V2 reporting") into android12-5.10-lts
Steps on the way to 5.10.105 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Icf4b1cec6b09090408b7449161ebed08579dfa23
This commit is contained in:
commit
7ab81873bd
@ -107,6 +107,16 @@
|
|||||||
.endm
|
.endm
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if __LINUX_ARM_ARCH__ < 7
|
||||||
|
.macro dsb, args
|
||||||
|
mcr p15, 0, r0, c7, c10, 4
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro isb, args
|
||||||
|
mcr p15, 0, r0, c7, r5, 4
|
||||||
|
.endm
|
||||||
|
#endif
|
||||||
|
|
||||||
.macro asm_trace_hardirqs_off, save=1
|
.macro asm_trace_hardirqs_off, save=1
|
||||||
#if defined(CONFIG_TRACE_IRQFLAGS)
|
#if defined(CONFIG_TRACE_IRQFLAGS)
|
||||||
.if \save
|
.if \save
|
||||||
|
32
arch/arm/include/asm/spectre.h
Normal file
32
arch/arm/include/asm/spectre.h
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
|
|
||||||
|
#ifndef __ASM_SPECTRE_H
|
||||||
|
#define __ASM_SPECTRE_H
|
||||||
|
|
||||||
|
enum {
|
||||||
|
SPECTRE_UNAFFECTED,
|
||||||
|
SPECTRE_MITIGATED,
|
||||||
|
SPECTRE_VULNERABLE,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
__SPECTRE_V2_METHOD_BPIALL,
|
||||||
|
__SPECTRE_V2_METHOD_ICIALLU,
|
||||||
|
__SPECTRE_V2_METHOD_SMC,
|
||||||
|
__SPECTRE_V2_METHOD_HVC,
|
||||||
|
__SPECTRE_V2_METHOD_LOOP8,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
|
||||||
|
SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
|
||||||
|
SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
|
||||||
|
SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
|
||||||
|
SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
|
||||||
|
};
|
||||||
|
|
||||||
|
void spectre_v2_update_state(unsigned int state, unsigned int methods);
|
||||||
|
|
||||||
|
int spectre_bhb_update_vectors(unsigned int method);
|
||||||
|
|
||||||
|
#endif
|
@ -26,6 +26,11 @@
|
|||||||
#define ARM_MMU_DISCARD(x) x
|
#define ARM_MMU_DISCARD(x) x
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Set start/end symbol names to the LMA for the section */
|
||||||
|
#define ARM_LMA(sym, section) \
|
||||||
|
sym##_start = LOADADDR(section); \
|
||||||
|
sym##_end = LOADADDR(section) + SIZEOF(section)
|
||||||
|
|
||||||
#define PROC_INFO \
|
#define PROC_INFO \
|
||||||
. = ALIGN(4); \
|
. = ALIGN(4); \
|
||||||
__proc_info_begin = .; \
|
__proc_info_begin = .; \
|
||||||
@ -110,19 +115,31 @@
|
|||||||
* only thing that matters is their relative offsets
|
* only thing that matters is their relative offsets
|
||||||
*/
|
*/
|
||||||
#define ARM_VECTORS \
|
#define ARM_VECTORS \
|
||||||
__vectors_start = .; \
|
__vectors_lma = .; \
|
||||||
.vectors 0xffff0000 : AT(__vectors_start) { \
|
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
|
||||||
*(.vectors) \
|
.vectors { \
|
||||||
|
*(.vectors) \
|
||||||
|
} \
|
||||||
|
.vectors.bhb.loop8 { \
|
||||||
|
*(.vectors.bhb.loop8) \
|
||||||
|
} \
|
||||||
|
.vectors.bhb.bpiall { \
|
||||||
|
*(.vectors.bhb.bpiall) \
|
||||||
|
} \
|
||||||
} \
|
} \
|
||||||
. = __vectors_start + SIZEOF(.vectors); \
|
ARM_LMA(__vectors, .vectors); \
|
||||||
__vectors_end = .; \
|
ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
|
||||||
|
ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
|
||||||
|
. = __vectors_lma + SIZEOF(.vectors) + \
|
||||||
|
SIZEOF(.vectors.bhb.loop8) + \
|
||||||
|
SIZEOF(.vectors.bhb.bpiall); \
|
||||||
\
|
\
|
||||||
__stubs_start = .; \
|
__stubs_lma = .; \
|
||||||
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \
|
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
|
||||||
*(.stubs) \
|
*(.stubs) \
|
||||||
} \
|
} \
|
||||||
. = __stubs_start + SIZEOF(.stubs); \
|
ARM_LMA(__stubs, .stubs); \
|
||||||
__stubs_end = .; \
|
. = __stubs_lma + SIZEOF(.stubs); \
|
||||||
\
|
\
|
||||||
PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
|
PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
|
||||||
|
|
||||||
|
@ -106,4 +106,6 @@ endif
|
|||||||
|
|
||||||
obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
|
obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
|
||||||
|
|
||||||
|
obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
|
||||||
|
|
||||||
extra-y := $(head-y) vmlinux.lds
|
extra-y := $(head-y) vmlinux.lds
|
||||||
|
@ -1005,12 +1005,11 @@ vector_\name:
|
|||||||
sub lr, lr, #\correction
|
sub lr, lr, #\correction
|
||||||
.endif
|
.endif
|
||||||
|
|
||||||
@
|
@ Save r0, lr_<exception> (parent PC)
|
||||||
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
|
|
||||||
@ (parent CPSR)
|
|
||||||
@
|
|
||||||
stmia sp, {r0, lr} @ save r0, lr
|
stmia sp, {r0, lr} @ save r0, lr
|
||||||
mrs lr, spsr
|
|
||||||
|
@ Save spsr_<exception> (parent CPSR)
|
||||||
|
2: mrs lr, spsr
|
||||||
str lr, [sp, #8] @ save spsr
|
str lr, [sp, #8] @ save spsr
|
||||||
|
|
||||||
@
|
@
|
||||||
@ -1031,6 +1030,44 @@ vector_\name:
|
|||||||
movs pc, lr @ branch to handler in SVC mode
|
movs pc, lr @ branch to handler in SVC mode
|
||||||
ENDPROC(vector_\name)
|
ENDPROC(vector_\name)
|
||||||
|
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
|
.subsection 1
|
||||||
|
.align 5
|
||||||
|
vector_bhb_loop8_\name:
|
||||||
|
.if \correction
|
||||||
|
sub lr, lr, #\correction
|
||||||
|
.endif
|
||||||
|
|
||||||
|
@ Save r0, lr_<exception> (parent PC)
|
||||||
|
stmia sp, {r0, lr}
|
||||||
|
|
||||||
|
@ bhb workaround
|
||||||
|
mov r0, #8
|
||||||
|
1: b . + 4
|
||||||
|
subs r0, r0, #1
|
||||||
|
bne 1b
|
||||||
|
dsb
|
||||||
|
isb
|
||||||
|
b 2b
|
||||||
|
ENDPROC(vector_bhb_loop8_\name)
|
||||||
|
|
||||||
|
vector_bhb_bpiall_\name:
|
||||||
|
.if \correction
|
||||||
|
sub lr, lr, #\correction
|
||||||
|
.endif
|
||||||
|
|
||||||
|
@ Save r0, lr_<exception> (parent PC)
|
||||||
|
stmia sp, {r0, lr}
|
||||||
|
|
||||||
|
@ bhb workaround
|
||||||
|
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
|
||||||
|
@ isb not needed due to "movs pc, lr" in the vector stub
|
||||||
|
@ which gives a "context synchronisation".
|
||||||
|
b 2b
|
||||||
|
ENDPROC(vector_bhb_bpiall_\name)
|
||||||
|
.previous
|
||||||
|
#endif
|
||||||
|
|
||||||
.align 2
|
.align 2
|
||||||
@ handler addresses follow this label
|
@ handler addresses follow this label
|
||||||
1:
|
1:
|
||||||
@ -1039,6 +1076,10 @@ ENDPROC(vector_\name)
|
|||||||
.section .stubs, "ax", %progbits
|
.section .stubs, "ax", %progbits
|
||||||
@ This must be the first word
|
@ This must be the first word
|
||||||
.word vector_swi
|
.word vector_swi
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
|
.word vector_bhb_loop8_swi
|
||||||
|
.word vector_bhb_bpiall_swi
|
||||||
|
#endif
|
||||||
|
|
||||||
vector_rst:
|
vector_rst:
|
||||||
ARM( swi SYS_ERROR0 )
|
ARM( swi SYS_ERROR0 )
|
||||||
@ -1153,8 +1194,10 @@ vector_addrexcptn:
|
|||||||
* FIQ "NMI" handler
|
* FIQ "NMI" handler
|
||||||
*-----------------------------------------------------------------------------
|
*-----------------------------------------------------------------------------
|
||||||
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
|
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
|
||||||
* systems.
|
* systems. This must be the last vector stub, so lets place it in its own
|
||||||
|
* subsection.
|
||||||
*/
|
*/
|
||||||
|
.subsection 2
|
||||||
vector_stub fiq, FIQ_MODE, 4
|
vector_stub fiq, FIQ_MODE, 4
|
||||||
|
|
||||||
.long __fiq_usr @ 0 (USR_26 / USR_32)
|
.long __fiq_usr @ 0 (USR_26 / USR_32)
|
||||||
@ -1187,6 +1230,30 @@ vector_addrexcptn:
|
|||||||
W(b) vector_irq
|
W(b) vector_irq
|
||||||
W(b) vector_fiq
|
W(b) vector_fiq
|
||||||
|
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
|
.section .vectors.bhb.loop8, "ax", %progbits
|
||||||
|
.L__vectors_bhb_loop8_start:
|
||||||
|
W(b) vector_rst
|
||||||
|
W(b) vector_bhb_loop8_und
|
||||||
|
W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
|
||||||
|
W(b) vector_bhb_loop8_pabt
|
||||||
|
W(b) vector_bhb_loop8_dabt
|
||||||
|
W(b) vector_addrexcptn
|
||||||
|
W(b) vector_bhb_loop8_irq
|
||||||
|
W(b) vector_bhb_loop8_fiq
|
||||||
|
|
||||||
|
.section .vectors.bhb.bpiall, "ax", %progbits
|
||||||
|
.L__vectors_bhb_bpiall_start:
|
||||||
|
W(b) vector_rst
|
||||||
|
W(b) vector_bhb_bpiall_und
|
||||||
|
W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
|
||||||
|
W(b) vector_bhb_bpiall_pabt
|
||||||
|
W(b) vector_bhb_bpiall_dabt
|
||||||
|
W(b) vector_addrexcptn
|
||||||
|
W(b) vector_bhb_bpiall_irq
|
||||||
|
W(b) vector_bhb_bpiall_fiq
|
||||||
|
#endif
|
||||||
|
|
||||||
.data
|
.data
|
||||||
.align 2
|
.align 2
|
||||||
|
|
||||||
|
@ -162,6 +162,29 @@ ENDPROC(ret_from_fork)
|
|||||||
*-----------------------------------------------------------------------------
|
*-----------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
.align 5
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
|
ENTRY(vector_bhb_loop8_swi)
|
||||||
|
sub sp, sp, #PT_REGS_SIZE
|
||||||
|
stmia sp, {r0 - r12}
|
||||||
|
mov r8, #8
|
||||||
|
1: b 2f
|
||||||
|
2: subs r8, r8, #1
|
||||||
|
bne 1b
|
||||||
|
dsb
|
||||||
|
isb
|
||||||
|
b 3f
|
||||||
|
ENDPROC(vector_bhb_loop8_swi)
|
||||||
|
|
||||||
|
.align 5
|
||||||
|
ENTRY(vector_bhb_bpiall_swi)
|
||||||
|
sub sp, sp, #PT_REGS_SIZE
|
||||||
|
stmia sp, {r0 - r12}
|
||||||
|
mcr p15, 0, r8, c7, c5, 6 @ BPIALL
|
||||||
|
isb
|
||||||
|
b 3f
|
||||||
|
ENDPROC(vector_bhb_bpiall_swi)
|
||||||
|
#endif
|
||||||
.align 5
|
.align 5
|
||||||
ENTRY(vector_swi)
|
ENTRY(vector_swi)
|
||||||
#ifdef CONFIG_CPU_V7M
|
#ifdef CONFIG_CPU_V7M
|
||||||
@ -169,6 +192,7 @@ ENTRY(vector_swi)
|
|||||||
#else
|
#else
|
||||||
sub sp, sp, #PT_REGS_SIZE
|
sub sp, sp, #PT_REGS_SIZE
|
||||||
stmia sp, {r0 - r12} @ Calling r0 - r12
|
stmia sp, {r0 - r12} @ Calling r0 - r12
|
||||||
|
3:
|
||||||
ARM( add r8, sp, #S_PC )
|
ARM( add r8, sp, #S_PC )
|
||||||
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
|
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
|
||||||
THUMB( mov r8, sp )
|
THUMB( mov r8, sp )
|
||||||
|
71
arch/arm/kernel/spectre.c
Normal file
71
arch/arm/kernel/spectre.c
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/device.h>
|
||||||
|
|
||||||
|
#include <asm/spectre.h>
|
||||||
|
|
||||||
|
static bool _unprivileged_ebpf_enabled(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_BPF_SYSCALL
|
||||||
|
return !sysctl_unprivileged_bpf_disabled;
|
||||||
|
#else
|
||||||
|
return false
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int spectre_v2_state;
|
||||||
|
static unsigned int spectre_v2_methods;
|
||||||
|
|
||||||
|
void spectre_v2_update_state(unsigned int state, unsigned int method)
|
||||||
|
{
|
||||||
|
if (state > spectre_v2_state)
|
||||||
|
spectre_v2_state = state;
|
||||||
|
spectre_v2_methods |= method;
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
const char *method;
|
||||||
|
|
||||||
|
if (spectre_v2_state == SPECTRE_UNAFFECTED)
|
||||||
|
return sprintf(buf, "%s\n", "Not affected");
|
||||||
|
|
||||||
|
if (spectre_v2_state != SPECTRE_MITIGATED)
|
||||||
|
return sprintf(buf, "%s\n", "Vulnerable");
|
||||||
|
|
||||||
|
if (_unprivileged_ebpf_enabled())
|
||||||
|
return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
|
||||||
|
|
||||||
|
switch (spectre_v2_methods) {
|
||||||
|
case SPECTRE_V2_METHOD_BPIALL:
|
||||||
|
method = "Branch predictor hardening";
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_ICIALLU:
|
||||||
|
method = "I-cache invalidation";
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_SMC:
|
||||||
|
case SPECTRE_V2_METHOD_HVC:
|
||||||
|
method = "Firmware call";
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_LOOP8:
|
||||||
|
method = "History overwrite";
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
method = "Multiple mitigations";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return sprintf(buf, "Mitigation: %s\n", method);
|
||||||
|
}
|
@ -30,6 +30,7 @@
|
|||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/exception.h>
|
#include <asm/exception.h>
|
||||||
|
#include <asm/spectre.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
@ -806,10 +807,59 @@ static inline void __init kuser_init(void *vectors)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef CONFIG_CPU_V7M
|
||||||
|
static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
|
||||||
|
{
|
||||||
|
memcpy(vma, lma_start, lma_end - lma_start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void flush_vectors(void *vma, size_t offset, size_t size)
|
||||||
|
{
|
||||||
|
unsigned long start = (unsigned long)vma + offset;
|
||||||
|
unsigned long end = start + size;
|
||||||
|
|
||||||
|
flush_icache_range(start, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
|
int spectre_bhb_update_vectors(unsigned int method)
|
||||||
|
{
|
||||||
|
extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
|
||||||
|
extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
|
||||||
|
void *vec_start, *vec_end;
|
||||||
|
|
||||||
|
if (system_state > SYSTEM_SCHEDULING) {
|
||||||
|
pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
|
||||||
|
smp_processor_id());
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (method) {
|
||||||
|
case SPECTRE_V2_METHOD_LOOP8:
|
||||||
|
vec_start = __vectors_bhb_loop8_start;
|
||||||
|
vec_end = __vectors_bhb_loop8_end;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_BPIALL:
|
||||||
|
vec_start = __vectors_bhb_bpiall_start;
|
||||||
|
vec_end = __vectors_bhb_bpiall_end;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
pr_err("CPU%u: unknown Spectre BHB state %d\n",
|
||||||
|
smp_processor_id(), method);
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
copy_from_lma(vectors_page, vec_start, vec_end);
|
||||||
|
flush_vectors(vectors_page, 0, vec_end - vec_start);
|
||||||
|
|
||||||
|
return SPECTRE_MITIGATED;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void __init early_trap_init(void *vectors_base)
|
void __init early_trap_init(void *vectors_base)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_CPU_V7M
|
|
||||||
unsigned long vectors = (unsigned long)vectors_base;
|
|
||||||
extern char __stubs_start[], __stubs_end[];
|
extern char __stubs_start[], __stubs_end[];
|
||||||
extern char __vectors_start[], __vectors_end[];
|
extern char __vectors_start[], __vectors_end[];
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -830,17 +880,20 @@ void __init early_trap_init(void *vectors_base)
|
|||||||
* into the vector page, mapped at 0xffff0000, and ensure these
|
* into the vector page, mapped at 0xffff0000, and ensure these
|
||||||
* are visible to the instruction stream.
|
* are visible to the instruction stream.
|
||||||
*/
|
*/
|
||||||
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
|
copy_from_lma(vectors_base, __vectors_start, __vectors_end);
|
||||||
memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
|
copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
|
||||||
|
|
||||||
kuser_init(vectors_base);
|
kuser_init(vectors_base);
|
||||||
|
|
||||||
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
|
flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
|
||||||
|
}
|
||||||
#else /* ifndef CONFIG_CPU_V7M */
|
#else /* ifndef CONFIG_CPU_V7M */
|
||||||
|
void __init early_trap_init(void *vectors_base)
|
||||||
|
{
|
||||||
/*
|
/*
|
||||||
* on V7-M there is no need to copy the vector table to a dedicated
|
* on V7-M there is no need to copy the vector table to a dedicated
|
||||||
* memory area. The address is configurable and so a table in the kernel
|
* memory area. The address is configurable and so a table in the kernel
|
||||||
* image can be used.
|
* image can be used.
|
||||||
*/
|
*/
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
@ -833,6 +833,7 @@ config CPU_BPREDICT_DISABLE
|
|||||||
|
|
||||||
config CPU_SPECTRE
|
config CPU_SPECTRE
|
||||||
bool
|
bool
|
||||||
|
select GENERIC_CPU_VULNERABILITIES
|
||||||
|
|
||||||
config HARDEN_BRANCH_PREDICTOR
|
config HARDEN_BRANCH_PREDICTOR
|
||||||
bool "Harden the branch predictor against aliasing attacks" if EXPERT
|
bool "Harden the branch predictor against aliasing attacks" if EXPERT
|
||||||
@ -853,6 +854,16 @@ config HARDEN_BRANCH_PREDICTOR
|
|||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
config HARDEN_BRANCH_HISTORY
|
||||||
|
bool "Harden Spectre style attacks against branch history" if EXPERT
|
||||||
|
depends on CPU_SPECTRE
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Speculation attacks against some high-performance processors can
|
||||||
|
make use of branch history to influence future speculation. When
|
||||||
|
taking an exception, a sequence of branches overwrites the branch
|
||||||
|
history, or branch history is invalidated.
|
||||||
|
|
||||||
config TLS_REG_EMUL
|
config TLS_REG_EMUL
|
||||||
bool
|
bool
|
||||||
select NEED_KUSER_HELPERS
|
select NEED_KUSER_HELPERS
|
||||||
|
@ -6,8 +6,35 @@
|
|||||||
#include <asm/cp15.h>
|
#include <asm/cp15.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
#include <asm/proc-fns.h>
|
#include <asm/proc-fns.h>
|
||||||
|
#include <asm/spectre.h>
|
||||||
#include <asm/system_misc.h>
|
#include <asm/system_misc.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM_PSCI
|
||||||
|
static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
|
||||||
|
{
|
||||||
|
struct arm_smccc_res res;
|
||||||
|
|
||||||
|
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||||
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||||
|
|
||||||
|
switch ((int)res.a0) {
|
||||||
|
case SMCCC_RET_SUCCESS:
|
||||||
|
return SPECTRE_MITIGATED;
|
||||||
|
|
||||||
|
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
|
||||||
|
return SPECTRE_UNAFFECTED;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
|
||||||
|
{
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||||
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
|
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
|
||||||
|
|
||||||
@ -36,13 +63,60 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
|
|||||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_v7_spectre_init(void)
|
static unsigned int spectre_v2_install_workaround(unsigned int method)
|
||||||
{
|
{
|
||||||
const char *spectre_v2_method = NULL;
|
const char *spectre_v2_method = NULL;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
if (per_cpu(harden_branch_predictor_fn, cpu))
|
if (per_cpu(harden_branch_predictor_fn, cpu))
|
||||||
return;
|
return SPECTRE_MITIGATED;
|
||||||
|
|
||||||
|
switch (method) {
|
||||||
|
case SPECTRE_V2_METHOD_BPIALL:
|
||||||
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
|
harden_branch_predictor_bpiall;
|
||||||
|
spectre_v2_method = "BPIALL";
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_ICIALLU:
|
||||||
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
|
harden_branch_predictor_iciallu;
|
||||||
|
spectre_v2_method = "ICIALLU";
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_HVC:
|
||||||
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
|
call_hvc_arch_workaround_1;
|
||||||
|
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
||||||
|
spectre_v2_method = "hypervisor";
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_SMC:
|
||||||
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
|
call_smc_arch_workaround_1;
|
||||||
|
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
||||||
|
spectre_v2_method = "firmware";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spectre_v2_method)
|
||||||
|
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
||||||
|
smp_processor_id(), spectre_v2_method);
|
||||||
|
|
||||||
|
return SPECTRE_MITIGATED;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static unsigned int spectre_v2_install_workaround(unsigned int method)
|
||||||
|
{
|
||||||
|
pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n");
|
||||||
|
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void cpu_v7_spectre_v2_init(void)
|
||||||
|
{
|
||||||
|
unsigned int state, method = 0;
|
||||||
|
|
||||||
switch (read_cpuid_part()) {
|
switch (read_cpuid_part()) {
|
||||||
case ARM_CPU_PART_CORTEX_A8:
|
case ARM_CPU_PART_CORTEX_A8:
|
||||||
@ -51,69 +125,133 @@ static void cpu_v7_spectre_init(void)
|
|||||||
case ARM_CPU_PART_CORTEX_A17:
|
case ARM_CPU_PART_CORTEX_A17:
|
||||||
case ARM_CPU_PART_CORTEX_A73:
|
case ARM_CPU_PART_CORTEX_A73:
|
||||||
case ARM_CPU_PART_CORTEX_A75:
|
case ARM_CPU_PART_CORTEX_A75:
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
state = SPECTRE_MITIGATED;
|
||||||
harden_branch_predictor_bpiall;
|
method = SPECTRE_V2_METHOD_BPIALL;
|
||||||
spectre_v2_method = "BPIALL";
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ARM_CPU_PART_CORTEX_A15:
|
case ARM_CPU_PART_CORTEX_A15:
|
||||||
case ARM_CPU_PART_BRAHMA_B15:
|
case ARM_CPU_PART_BRAHMA_B15:
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
state = SPECTRE_MITIGATED;
|
||||||
harden_branch_predictor_iciallu;
|
method = SPECTRE_V2_METHOD_ICIALLU;
|
||||||
spectre_v2_method = "ICIALLU";
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
#ifdef CONFIG_ARM_PSCI
|
|
||||||
case ARM_CPU_PART_BRAHMA_B53:
|
case ARM_CPU_PART_BRAHMA_B53:
|
||||||
/* Requires no workaround */
|
/* Requires no workaround */
|
||||||
|
state = SPECTRE_UNAFFECTED;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
/* Other ARM CPUs require no workaround */
|
/* Other ARM CPUs require no workaround */
|
||||||
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
|
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
|
||||||
|
state = SPECTRE_UNAFFECTED;
|
||||||
break;
|
break;
|
||||||
fallthrough;
|
}
|
||||||
/* Cortex A57/A72 require firmware workaround */
|
|
||||||
case ARM_CPU_PART_CORTEX_A57:
|
|
||||||
case ARM_CPU_PART_CORTEX_A72: {
|
|
||||||
struct arm_smccc_res res;
|
|
||||||
|
|
||||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
fallthrough;
|
||||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
|
||||||
if ((int)res.a0 != 0)
|
/* Cortex A57/A72 require firmware workaround */
|
||||||
return;
|
case ARM_CPU_PART_CORTEX_A57:
|
||||||
|
case ARM_CPU_PART_CORTEX_A72:
|
||||||
|
state = spectre_v2_get_cpu_fw_mitigation_state();
|
||||||
|
if (state != SPECTRE_MITIGATED)
|
||||||
|
break;
|
||||||
|
|
||||||
switch (arm_smccc_1_1_get_conduit()) {
|
switch (arm_smccc_1_1_get_conduit()) {
|
||||||
case SMCCC_CONDUIT_HVC:
|
case SMCCC_CONDUIT_HVC:
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
method = SPECTRE_V2_METHOD_HVC;
|
||||||
call_hvc_arch_workaround_1;
|
|
||||||
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
|
||||||
spectre_v2_method = "hypervisor";
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SMCCC_CONDUIT_SMC:
|
case SMCCC_CONDUIT_SMC:
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
method = SPECTRE_V2_METHOD_SMC;
|
||||||
call_smc_arch_workaround_1;
|
|
||||||
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
|
||||||
spectre_v2_method = "firmware";
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
state = SPECTRE_VULNERABLE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
if (state == SPECTRE_MITIGATED)
|
||||||
|
state = spectre_v2_install_workaround(method);
|
||||||
|
|
||||||
|
spectre_v2_update_state(state, method);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||||
|
static int spectre_bhb_method;
|
||||||
|
|
||||||
|
static const char *spectre_bhb_method_name(int method)
|
||||||
|
{
|
||||||
|
switch (method) {
|
||||||
|
case SPECTRE_V2_METHOD_LOOP8:
|
||||||
|
return "loop";
|
||||||
|
|
||||||
|
case SPECTRE_V2_METHOD_BPIALL:
|
||||||
|
return "BPIALL";
|
||||||
|
|
||||||
|
default:
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int spectre_bhb_install_workaround(int method)
|
||||||
|
{
|
||||||
|
if (spectre_bhb_method != method) {
|
||||||
|
if (spectre_bhb_method) {
|
||||||
|
pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
|
||||||
|
smp_processor_id());
|
||||||
|
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
|
|
||||||
|
spectre_bhb_method = method;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (spectre_v2_method)
|
pr_info("CPU%u: Spectre BHB: using %s workaround\n",
|
||||||
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
smp_processor_id(), spectre_bhb_method_name(method));
|
||||||
smp_processor_id(), spectre_v2_method);
|
|
||||||
|
return SPECTRE_MITIGATED;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void cpu_v7_spectre_init(void)
|
static int spectre_bhb_install_workaround(int method)
|
||||||
{
|
{
|
||||||
|
return SPECTRE_VULNERABLE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void cpu_v7_spectre_bhb_init(void)
|
||||||
|
{
|
||||||
|
unsigned int state, method = 0;
|
||||||
|
|
||||||
|
switch (read_cpuid_part()) {
|
||||||
|
case ARM_CPU_PART_CORTEX_A15:
|
||||||
|
case ARM_CPU_PART_BRAHMA_B15:
|
||||||
|
case ARM_CPU_PART_CORTEX_A57:
|
||||||
|
case ARM_CPU_PART_CORTEX_A72:
|
||||||
|
state = SPECTRE_MITIGATED;
|
||||||
|
method = SPECTRE_V2_METHOD_LOOP8;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case ARM_CPU_PART_CORTEX_A73:
|
||||||
|
case ARM_CPU_PART_CORTEX_A75:
|
||||||
|
state = SPECTRE_MITIGATED;
|
||||||
|
method = SPECTRE_V2_METHOD_BPIALL;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
state = SPECTRE_UNAFFECTED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state == SPECTRE_MITIGATED)
|
||||||
|
state = spectre_bhb_install_workaround(method);
|
||||||
|
|
||||||
|
spectre_v2_update_state(state, method);
|
||||||
|
}
|
||||||
|
|
||||||
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
|
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
|
||||||
u32 mask, const char *msg)
|
u32 mask, const char *msg)
|
||||||
{
|
{
|
||||||
@ -142,16 +280,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
|
|||||||
void cpu_v7_ca8_ibe(void)
|
void cpu_v7_ca8_ibe(void)
|
||||||
{
|
{
|
||||||
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
|
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
|
||||||
cpu_v7_spectre_init();
|
cpu_v7_spectre_v2_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_v7_ca15_ibe(void)
|
void cpu_v7_ca15_ibe(void)
|
||||||
{
|
{
|
||||||
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
|
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
|
||||||
cpu_v7_spectre_init();
|
cpu_v7_spectre_v2_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_v7_bugs_init(void)
|
void cpu_v7_bugs_init(void)
|
||||||
{
|
{
|
||||||
cpu_v7_spectre_init();
|
cpu_v7_spectre_v2_init();
|
||||||
|
cpu_v7_spectre_bhb_init();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user