Merge 6.1.48 into android14-6.1-lts
Changes in 6.1.48 x86/cpu: Fix __x86_return_thunk symbol type x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() x86/alternative: Make custom return thunk unconditional x86/cpu: Clean up SRSO return thunk mess x86/cpu: Rename original retbleed methods x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 x86/cpu: Cleanup the untrain mess x86/srso: Explain the untraining sequences a bit more x86/static_call: Fix __static_call_fixup() x86/retpoline: Don't clobber RFLAGS during srso_safe_ret() x86/CPU/AMD: Fix the DIV(0) initial fix attempt x86/srso: Disable the mitigation on unaffected configurations x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG objtool/x86: Fixup frame-pointer vs rethunk x86/srso: Correct the mitigation status when SMT is disabled Linux 6.1.48 Change-Id: I9e2a6d887a9041b0203fdf8ad3d3ebc8177e2d24 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
13f6afea0c
@ -124,8 +124,8 @@ sequence.
|
||||
To ensure the safety of this mitigation, the kernel must ensure that the
|
||||
safe return sequence is itself free from attacker interference. In Zen3
|
||||
and Zen4, this is accomplished by creating a BTB alias between the
|
||||
untraining function srso_untrain_ret_alias() and the safe return
|
||||
function srso_safe_ret_alias() which results in evicting a potentially
|
||||
untraining function srso_alias_untrain_ret() and the safe return
|
||||
function srso_alias_safe_ret() which results in evicting a potentially
|
||||
poisoned BTB entry and using that safe one for all function returns.
|
||||
|
||||
In older Zen1 and Zen2, this is accomplished using a reinterpretation
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 47
|
||||
SUBLEVEL = 48
|
||||
EXTRAVERSION =
|
||||
NAME = Curry Ramen
|
||||
|
||||
|
@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
||||
static __always_inline void arch_exit_to_user_mode(void)
|
||||
{
|
||||
mds_user_clear_cpu_buffers();
|
||||
amd_clear_divider();
|
||||
}
|
||||
#define arch_exit_to_user_mode arch_exit_to_user_mode
|
||||
|
||||
|
@ -168,9 +168,9 @@
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
|
||||
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
|
||||
#else
|
||||
#define CALL_ZEN_UNTRAIN_RET ""
|
||||
#define CALL_UNTRAIN_RET ""
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -178,7 +178,7 @@
|
||||
* return thunk isn't mapped into the userspace tables (then again, AMD
|
||||
* typically has NO_MELTDOWN).
|
||||
*
|
||||
* While zen_untrain_ret() doesn't clobber anything but requires stack,
|
||||
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
|
||||
* entry_ibpb() will clobber AX, CX, DX.
|
||||
*
|
||||
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
|
||||
@ -189,14 +189,9 @@
|
||||
defined(CONFIG_CPU_SRSO)
|
||||
ANNOTATE_UNRET_END
|
||||
ALTERNATIVE_2 "", \
|
||||
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
@ -210,10 +205,21 @@
|
||||
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
extern void __x86_return_thunk(void);
|
||||
extern void zen_untrain_ret(void);
|
||||
#else
|
||||
static inline void __x86_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void retbleed_return_thunk(void);
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
||||
extern void retbleed_untrain_ret(void);
|
||||
extern void srso_untrain_ret(void);
|
||||
extern void srso_untrain_ret_alias(void);
|
||||
extern void srso_alias_untrain_ret(void);
|
||||
|
||||
extern void entry_untrain_ret(void);
|
||||
extern void entry_ibpb(void);
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
|
@ -1295,3 +1295,4 @@ void noinstr amd_clear_divider(void)
|
||||
asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
|
||||
:: "a" (0), "d" (0), "r" (1));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_clear_divider);
|
||||
|
@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
static void update_spec_ctrl(u64 val)
|
||||
{
|
||||
@ -164,8 +166,13 @@ void __init cpu_select_mitigations(void)
|
||||
md_clear_select_mitigation();
|
||||
srbds_select_mitigation();
|
||||
l1d_flush_select_mitigation();
|
||||
gds_select_mitigation();
|
||||
|
||||
/*
|
||||
* srso_select_mitigation() depends and must run after
|
||||
* retbleed_select_mitigation().
|
||||
*/
|
||||
srso_select_mitigation();
|
||||
gds_select_mitigation();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1013,6 +1020,9 @@ static void __init retbleed_select_mitigation(void)
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RETHUNK))
|
||||
x86_return_thunk = retbleed_return_thunk;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
pr_err(RETBLEED_UNTRAIN_MSG);
|
||||
@ -2388,9 +2398,10 @@ static void __init srso_select_mitigation(void)
|
||||
* Zen1/2 with SMT off aren't vulnerable after the right
|
||||
* IBPB microcode has been applied.
|
||||
*/
|
||||
if ((boot_cpu_data.x86 < 0x19) &&
|
||||
(!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
|
||||
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
||||
@ -2419,11 +2430,15 @@ static void __init srso_select_mitigation(void)
|
||||
* like ftrace, static_call, etc.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||
|
||||
if (boot_cpu_data.x86 == 0x19)
|
||||
if (boot_cpu_data.x86 == 0x19) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
|
||||
else
|
||||
x86_return_thunk = srso_alias_return_thunk;
|
||||
} else {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO);
|
||||
x86_return_thunk = srso_return_thunk;
|
||||
}
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
||||
@ -2672,6 +2687,9 @@ static ssize_t gds_show_state(char *buf)
|
||||
|
||||
static ssize_t srso_show_state(char *buf)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_SRSO_NO))
|
||||
return sysfs_emit(buf, "Mitigation: SMT disabled\n");
|
||||
|
||||
return sysfs_emit(buf, "%s%s\n",
|
||||
srso_strings[srso_mitigation],
|
||||
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
|
||||
|
@ -184,6 +184,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
|
||||
*/
|
||||
bool __static_call_fixup(void *tramp, u8 op, void *dest)
|
||||
{
|
||||
unsigned long addr = (unsigned long)tramp;
|
||||
/*
|
||||
* Not all .return_sites are a static_call trampoline (most are not).
|
||||
* Check if the 3 bytes after the return are still kernel text, if not,
|
||||
* then this definitely is not a trampoline and we need not worry
|
||||
* further.
|
||||
*
|
||||
* This avoids the memcmp() below tripping over pagefaults etc..
|
||||
*/
|
||||
if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
|
||||
!kernel_text_address(addr + 7))
|
||||
return false;
|
||||
|
||||
if (memcmp(tramp+5, tramp_ud, 3)) {
|
||||
/* Not a trampoline site, not our problem. */
|
||||
return false;
|
||||
|
@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
|
||||
{
|
||||
do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
|
||||
FPE_INTDIV, error_get_trap_addr(regs));
|
||||
|
||||
amd_clear_divider();
|
||||
}
|
||||
|
||||
DEFINE_IDTENTRY(exc_overflow)
|
||||
|
@ -134,18 +134,18 @@ SECTIONS
|
||||
KPROBES_TEXT
|
||||
ALIGN_ENTRY_TEXT_BEGIN
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
*(.text.__x86.rethunk_untrain)
|
||||
*(.text..__x86.rethunk_untrain)
|
||||
#endif
|
||||
|
||||
ENTRY_TEXT
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
/*
|
||||
* See the comment above srso_untrain_ret_alias()'s
|
||||
* See the comment above srso_alias_untrain_ret()'s
|
||||
* definition.
|
||||
*/
|
||||
. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
|
||||
*(.text.__x86.rethunk_safe)
|
||||
. = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
|
||||
*(.text..__x86.rethunk_safe)
|
||||
#endif
|
||||
ALIGN_ENTRY_TEXT_END
|
||||
SOFTIRQENTRY_TEXT
|
||||
@ -154,8 +154,8 @@ SECTIONS
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.indirect_thunk)
|
||||
*(.text.__x86.return_thunk)
|
||||
*(.text..__x86.indirect_thunk)
|
||||
*(.text..__x86.return_thunk)
|
||||
__indirect_thunk_end = .;
|
||||
#endif
|
||||
} :text =0xcccc
|
||||
@ -507,8 +507,8 @@ INIT_PER_CPU(irq_stack_backing_store);
|
||||
"fixed_percpu_data is not at start of per-cpu area");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
|
||||
#ifdef CONFIG_RETHUNK
|
||||
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
|
||||
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
|
||||
#endif
|
||||
|
||||
@ -523,8 +523,8 @@ INIT_PER_CPU(irq_stack_backing_store);
|
||||
* Instead do: (A | B) - (A & B) in order to compute the XOR
|
||||
* of the two function addresses:
|
||||
*/
|
||||
. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
|
||||
(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
|
||||
. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
|
||||
(ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
|
||||
"SRSO function pair won't alias");
|
||||
#endif
|
||||
|
||||
|
@ -3947,6 +3947,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
amd_clear_divider();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
else
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <asm/frame.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
.section .text.__x86.indirect_thunk
|
||||
.section .text..__x86.indirect_thunk
|
||||
|
||||
.macro RETPOLINE reg
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
@ -76,75 +76,106 @@ SYM_CODE_END(__x86_indirect_thunk_array)
|
||||
#ifdef CONFIG_RETHUNK
|
||||
|
||||
/*
|
||||
* srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
|
||||
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
|
||||
* special addresses:
|
||||
*
|
||||
* - srso_untrain_ret_alias() is 2M aligned
|
||||
* - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
|
||||
* - srso_alias_untrain_ret() is 2M aligned
|
||||
* - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
|
||||
* and 20 in its virtual address are set (while those bits in the
|
||||
* srso_untrain_ret_alias() function are cleared).
|
||||
* srso_alias_untrain_ret() function are cleared).
|
||||
*
|
||||
* This guarantees that those two addresses will alias in the branch
|
||||
* target buffer of Zen3/4 generations, leading to any potential
|
||||
* poisoned entries at that BTB slot to get evicted.
|
||||
*
|
||||
* As a result, srso_safe_ret_alias() becomes a safe return.
|
||||
* As a result, srso_alias_safe_ret() becomes a safe return.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
.section .text.__x86.rethunk_untrain
|
||||
.section .text..__x86.rethunk_untrain
|
||||
|
||||
SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ASM_NOP2
|
||||
lfence
|
||||
jmp __x86_return_thunk
|
||||
SYM_FUNC_END(srso_untrain_ret_alias)
|
||||
__EXPORT_THUNK(srso_untrain_ret_alias)
|
||||
jmp srso_alias_return_thunk
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
|
||||
.section .text.__x86.rethunk_safe
|
||||
#endif
|
||||
|
||||
/* Needs a definition for the __x86_return_thunk alternative below. */
|
||||
SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
add $8, %_ASM_SP
|
||||
UNWIND_HINT_FUNC
|
||||
#endif
|
||||
.section .text..__x86.rethunk_safe
|
||||
#else
|
||||
/* dummy definition for alternatives */
|
||||
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_safe_ret_alias)
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
#endif
|
||||
|
||||
.section .text.__x86.return_thunk
|
||||
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_safe_ret)
|
||||
|
||||
.section .text..__x86.return_thunk
|
||||
|
||||
SYM_CODE_START(srso_alias_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_alias_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_alias_return_thunk)
|
||||
|
||||
/*
|
||||
* Some generic notes on the untraining sequences:
|
||||
*
|
||||
* They are interchangeable when it comes to flushing potentially wrong
|
||||
* RET predictions from the BTB.
|
||||
*
|
||||
* The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
|
||||
* Retbleed sequence because the return sequence done there
|
||||
* (srso_safe_ret()) is longer and the return sequence must fully nest
|
||||
* (end before) the untraining sequence. Therefore, the untraining
|
||||
* sequence must fully overlap the return sequence.
|
||||
*
|
||||
* Regarding alignment - the instructions which need to be untrained,
|
||||
* must all start at a cacheline boundary for Zen1/2 generations. That
|
||||
* is, instruction sequences starting at srso_safe_ret() and
|
||||
* the respective instruction sequences at retbleed_return_thunk()
|
||||
* must start at a cacheline boundary.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Safety details here pertain to the AMD Zen{1,2} microarchitecture:
|
||||
* 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
|
||||
* 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
|
||||
* alignment within the BTB.
|
||||
* 2) The instruction at zen_untrain_ret must contain, and not
|
||||
* 2) The instruction at retbleed_untrain_ret must contain, and not
|
||||
* end with, the 0xc3 byte of the RET.
|
||||
* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
|
||||
* from re-poisioning the BTB prediction.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (__ret - zen_untrain_ret), 0xcc
|
||||
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
|
||||
SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
/*
|
||||
* As executed from zen_untrain_ret, this is:
|
||||
* As executed from retbleed_untrain_ret, this is:
|
||||
*
|
||||
* TEST $0xcc, %bl
|
||||
* LFENCE
|
||||
* JMP __x86_return_thunk
|
||||
* JMP retbleed_return_thunk
|
||||
*
|
||||
* Executing the TEST instruction has a side effect of evicting any BTB
|
||||
* prediction (potentially attacker controlled) attached to the RET, as
|
||||
* __x86_return_thunk + 1 isn't an instruction boundary at the moment.
|
||||
* retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
|
||||
*/
|
||||
.byte 0xf6
|
||||
|
||||
/*
|
||||
* As executed from __x86_return_thunk, this is a plain RET.
|
||||
* As executed from retbleed_return_thunk, this is a plain RET.
|
||||
*
|
||||
* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
|
||||
*
|
||||
@ -156,13 +187,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
* With SMT enabled and STIBP active, a sibling thread cannot poison
|
||||
* RET's prediction to a type of its choice, but can evict the
|
||||
* prediction due to competitive sharing. If the prediction is
|
||||
* evicted, __x86_return_thunk will suffer Straight Line Speculation
|
||||
* evicted, retbleed_return_thunk will suffer Straight Line Speculation
|
||||
* which will be contained safely by the INT3.
|
||||
*/
|
||||
SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
|
||||
SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__ret)
|
||||
SYM_CODE_END(retbleed_return_thunk)
|
||||
|
||||
/*
|
||||
* Ensure the TEST decoding / BTB invalidation is complete.
|
||||
@ -173,16 +204,16 @@ SYM_CODE_END(__ret)
|
||||
* Jump back and execute the RET in the middle of the TEST instruction.
|
||||
* INT3 is for SLS protection.
|
||||
*/
|
||||
jmp __ret
|
||||
jmp retbleed_return_thunk
|
||||
int3
|
||||
SYM_FUNC_END(zen_untrain_ret)
|
||||
__EXPORT_THUNK(zen_untrain_ret)
|
||||
SYM_FUNC_END(retbleed_untrain_ret)
|
||||
__EXPORT_THUNK(retbleed_untrain_ret)
|
||||
|
||||
/*
|
||||
* SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
|
||||
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
|
||||
* above. On kernel entry, srso_untrain_ret() is executed which is a
|
||||
*
|
||||
* movabs $0xccccccc308c48348,%rax
|
||||
* movabs $0xccccc30824648d48,%rax
|
||||
*
|
||||
* and when the return thunk executes the inner label srso_safe_ret()
|
||||
* later, it is a stack manipulation and a RET which is mispredicted and
|
||||
@ -194,22 +225,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
.byte 0x48, 0xb8
|
||||
|
||||
/*
|
||||
* This forces the function return instruction to speculate into a trap
|
||||
* (UD2 in srso_return_thunk() below). This RET will then mispredict
|
||||
* and execution will continue at the return site read from the top of
|
||||
* the stack.
|
||||
*/
|
||||
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
|
||||
add $8, %_ASM_SP
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
ret
|
||||
int3
|
||||
int3
|
||||
int3
|
||||
/* end of movabs */
|
||||
lfence
|
||||
call srso_safe_ret
|
||||
int3
|
||||
ud2
|
||||
SYM_CODE_END(srso_safe_ret)
|
||||
SYM_FUNC_END(srso_untrain_ret)
|
||||
__EXPORT_THUNK(srso_untrain_ret)
|
||||
|
||||
SYM_FUNC_START(__x86_return_thunk)
|
||||
ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
|
||||
"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
|
||||
SYM_CODE_START(srso_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
|
||||
SYM_FUNC_START(entry_untrain_ret)
|
||||
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
|
||||
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
SYM_FUNC_END(entry_untrain_ret)
|
||||
__EXPORT_THUNK(entry_untrain_ret)
|
||||
|
||||
SYM_CODE_START(__x86_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
@ -799,5 +799,5 @@ bool arch_is_rethunk(struct symbol *sym)
|
||||
return !strcmp(sym->name, "__x86_return_thunk") ||
|
||||
!strcmp(sym->name, "srso_untrain_ret") ||
|
||||
!strcmp(sym->name, "srso_safe_ret") ||
|
||||
!strcmp(sym->name, "__ret");
|
||||
!strcmp(sym->name, "retbleed_return_thunk");
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ static int decode_instructions(struct objtool_file *file)
|
||||
|
||||
if (!strcmp(sec->name, ".noinstr.text") ||
|
||||
!strcmp(sec->name, ".entry.text") ||
|
||||
!strncmp(sec->name, ".text.__x86.", 12))
|
||||
!strncmp(sec->name, ".text..__x86.", 13))
|
||||
sec->noinstr = true;
|
||||
|
||||
for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
|
||||
@ -1430,7 +1430,7 @@ static int add_jump_destinations(struct objtool_file *file)
|
||||
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
|
||||
|
||||
/*
|
||||
* This is a special case for zen_untrain_ret().
|
||||
* This is a special case for retbleed_untrain_ret().
|
||||
* It jumps to __x86_return_thunk(), but objtool
|
||||
* can't find the thunk's starting RET
|
||||
* instruction, because the RET is also in the
|
||||
@ -2450,12 +2450,17 @@ static int decode_sections(struct objtool_file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_fentry_call(struct instruction *insn)
|
||||
static bool is_special_call(struct instruction *insn)
|
||||
{
|
||||
if (insn->type == INSN_CALL &&
|
||||
insn->call_dest &&
|
||||
insn->call_dest->fentry)
|
||||
return true;
|
||||
if (insn->type == INSN_CALL) {
|
||||
struct symbol *dest = insn->call_dest;
|
||||
|
||||
if (!dest)
|
||||
return false;
|
||||
|
||||
if (dest->fentry)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -3448,7 +3453,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (opts.stackval && func && !is_fentry_call(insn) &&
|
||||
if (opts.stackval && func && !is_special_call(insn) &&
|
||||
!has_valid_stack_frame(&state)) {
|
||||
WARN_FUNC("call without frame pointer save/setup",
|
||||
sec, insn->offset);
|
||||
|
Loading…
Reference in New Issue
Block a user