init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()
commit 439e17576eb47f26b78c5bbc72e344d4206d2327 upstream Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and remove the weak fallback from the core code. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
09658b81d1
commit
18fcd72da1
@ -47,14 +47,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
|
||||
|
||||
void __init mem_encrypt_free_decrypted_mem(void);
|
||||
|
||||
/* Architecture __weak replacement functions */
|
||||
void __init mem_encrypt_init(void);
|
||||
|
||||
void __init sev_es_init_vc_handling(void);
|
||||
bool sme_active(void);
|
||||
bool sev_active(void);
|
||||
bool sev_es_active(void);
|
||||
|
||||
void __init mem_encrypt_init(void);
|
||||
|
||||
#define __bss_decrypted __section(".bss..decrypted")
|
||||
|
||||
#else /* !CONFIG_AMD_MEM_ENCRYPT */
|
||||
@ -86,6 +85,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
|
||||
|
||||
static inline void mem_encrypt_free_decrypted_mem(void) { }
|
||||
|
||||
static inline void mem_encrypt_init(void) { }
|
||||
|
||||
#define __bss_decrypted
|
||||
|
||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/io.h>
|
||||
@ -2240,4 +2241,14 @@ void __init arch_cpu_finalize_init(void)
|
||||
} else {
|
||||
fpu__init_check_bugs();
|
||||
}
|
||||
|
||||
/*
|
||||
* This needs to be called before any devices perform DMA
|
||||
* operations that might use the SWIOTLB bounce buffers. It will
|
||||
* mark the bounce buffers as decrypted so that their usage will
|
||||
* not cause "plain-text" data to be decrypted when accessed. It
|
||||
* must be called after late_time_init() so that Hyper-V x86/x64
|
||||
* hypercalls work when the SWIOTLB bounce buffers are decrypted.
|
||||
*/
|
||||
mem_encrypt_init();
|
||||
}
|
||||
|
11
init/main.c
11
init/main.c
@ -95,7 +95,6 @@
|
||||
#include <linux/cache.h>
|
||||
#include <linux/rodata_test.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/kcsan.h>
|
||||
#include <linux/init_syscalls.h>
|
||||
|
||||
@ -773,8 +772,6 @@ void __init __weak thread_stack_cache_init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init __weak mem_encrypt_init(void) { }
|
||||
|
||||
void __init __weak poking_init(void) { }
|
||||
|
||||
void __init __weak pgtable_cache_init(void) { }
|
||||
@ -992,14 +989,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
|
||||
*/
|
||||
locking_selftest();
|
||||
|
||||
/*
|
||||
* This needs to be called before any devices perform DMA
|
||||
* operations that might use the SWIOTLB bounce buffers. It will
|
||||
* mark the bounce buffers as decrypted so that their usage will
|
||||
* not cause "plain-text" data to be decrypted when accessed.
|
||||
*/
|
||||
mem_encrypt_init();
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start && !initrd_below_start_ok &&
|
||||
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
|
||||
|
Loading…
Reference in New Issue
Block a user