ANDROID: KVM: Update nVHE stack size to 8KB
In order to make the nVHE stack size easily configurable, introduce NVHE_STACK_SHIFT which must be >= PAGE_SHIFT. Increase the stack size to 8KB if PAGE_SIZE is 4KB, since some vendors require a larger stack in the hypervisor. Bug: 305486112 Change-Id: Ic7612d5d5bf9d20db811ce67b177bbda192adf92 Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
parent
53771c1826
commit
908a530787
@ -113,13 +113,21 @@
|
||||
|
||||
#define OVERFLOW_STACK_SIZE SZ_4K
|
||||
|
||||
#if PAGE_SIZE == SZ_4K
|
||||
#define NVHE_STACK_SHIFT (PAGE_SHIFT + 1)
|
||||
#else
|
||||
#define NVHE_STACK_SHIFT PAGE_SHIFT
|
||||
#endif
|
||||
|
||||
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
|
||||
|
||||
/*
|
||||
* With the minimum frame size of [x29, x30], exactly half the combined
|
||||
* sizes of the hyp and overflow stacks is the maximum size needed to
|
||||
* save the unwinded stacktrace; plus an additional entry to delimit the
|
||||
* end.
|
||||
*/
|
||||
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
|
||||
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
|
||||
|
||||
/*
|
||||
* Alignment of kernel segments (e.g. .text, .data).
|
||||
|
@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
|
||||
|
||||
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
|
||||
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
|
||||
|
||||
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
|
||||
|
||||
|
@ -50,7 +50,7 @@ static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
|
||||
|
||||
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
|
||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
DECLARE_KVM_NVHE_PER_CPU(int, hyp_cpu_number);
|
||||
|
||||
@ -2009,7 +2009,7 @@ static void teardown_hyp_mode(void)
|
||||
|
||||
free_hyp_pgds();
|
||||
for_each_possible_cpu(cpu) {
|
||||
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
|
||||
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
|
||||
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
|
||||
free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
|
||||
pkvm_host_fp_state_order());
|
||||
@ -2168,15 +2168,15 @@ static int init_hyp_mode(void)
|
||||
* Allocate stack pages for Hypervisor-mode
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
unsigned long stack_page;
|
||||
unsigned long stack_base;
|
||||
|
||||
stack_page = __get_free_page(GFP_KERNEL);
|
||||
if (!stack_page) {
|
||||
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
|
||||
if (!stack_base) {
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
|
||||
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2252,7 +2252,7 @@ static int init_hyp_mode(void)
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
|
||||
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
|
||||
unsigned long hyp_addr;
|
||||
|
||||
/*
|
||||
@ -2260,7 +2260,7 @@ static int init_hyp_mode(void)
|
||||
* and guard page. The allocation is also aligned based on
|
||||
* the order of its size.
|
||||
*/
|
||||
err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
|
||||
err = hyp_alloc_private_va_range(NVHE_STACK_SIZE * 2, &hyp_addr);
|
||||
if (err) {
|
||||
kvm_err("Cannot allocate hyp stack guard page\n");
|
||||
goto out_err;
|
||||
@ -2271,12 +2271,12 @@ static int init_hyp_mode(void)
|
||||
* at the higher address and leave the lower guard page
|
||||
* unbacked.
|
||||
*
|
||||
* Any valid stack address now has the PAGE_SHIFT bit as 1
|
||||
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
|
||||
* and addresses corresponding to the guard page have the
|
||||
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
|
||||
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
|
||||
*/
|
||||
err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
|
||||
__pa(stack_page), PAGE_HYP);
|
||||
err = __create_hyp_mappings(hyp_addr + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
|
||||
__pa(stack_base), PAGE_HYP);
|
||||
if (err) {
|
||||
kvm_err("Cannot map hyp stack\n");
|
||||
goto out_err;
|
||||
@ -2288,9 +2288,9 @@ static int init_hyp_mode(void)
|
||||
* __hyp_pa() won't do the right thing there, since the stack
|
||||
* has been mapped in the flexible private VA space.
|
||||
*/
|
||||
params->stack_pa = __pa(stack_page);
|
||||
params->stack_pa = __pa(stack_base);
|
||||
|
||||
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
|
||||
params->stack_hyp_va = hyp_addr + (2 * NVHE_STACK_SIZE);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
@ -154,12 +154,12 @@ SYM_FUNC_END(__host_hvc)
|
||||
|
||||
/*
|
||||
* Test whether the SP has overflowed, without corrupting a GPR.
|
||||
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
|
||||
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
|
||||
* of SP should always be 1.
|
||||
*/
|
||||
add sp, sp, x0 // sp' = sp + x0
|
||||
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
|
||||
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
|
||||
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
|
||||
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
|
||||
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
|
||||
|
||||
|
@ -150,7 +150,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
* and guard page. The allocation is also aligned based on
|
||||
* the order of its size.
|
||||
*/
|
||||
ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
|
||||
ret = pkvm_alloc_private_va_range(NVHE_STACK_SIZE * 2, &hyp_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -159,19 +159,19 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
* at the higher address and leave the lower guard page
|
||||
* unbacked.
|
||||
*
|
||||
* Any valid stack address now has the PAGE_SHIFT bit as 1
|
||||
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
|
||||
* and addresses corresponding to the guard page have the
|
||||
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
|
||||
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
|
||||
*/
|
||||
hyp_spin_lock(&pkvm_pgd_lock);
|
||||
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
|
||||
PAGE_SIZE, params->stack_pa, PAGE_HYP);
|
||||
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + NVHE_STACK_SIZE,
|
||||
NVHE_STACK_SIZE, params->stack_pa, PAGE_HYP);
|
||||
hyp_spin_unlock(&pkvm_pgd_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Update stack_hyp_va to end of the stack's private VA range */
|
||||
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
|
||||
params->stack_hyp_va = hyp_addr + (2 * NVHE_STACK_SIZE);
|
||||
}
|
||||
|
||||
create_hyp_host_fp_mappings();
|
||||
|
@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
|
||||
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
|
||||
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
||||
|
||||
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
|
||||
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
|
||||
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
|
||||
stacktrace_info->fp = fp;
|
||||
stacktrace_info->pc = pc;
|
||||
@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
||||
unsigned long high = params->stack_hyp_va;
|
||||
unsigned long low = high - PAGE_SIZE;
|
||||
unsigned long low = high - NVHE_STACK_SIZE;
|
||||
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
|
@ -50,7 +50,7 @@ static struct stack_info stackinfo_get_hyp(void)
|
||||
struct kvm_nvhe_stacktrace_info *stacktrace_info
|
||||
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
|
||||
unsigned long low = (unsigned long)stacktrace_info->stack_base;
|
||||
unsigned long high = low + PAGE_SIZE;
|
||||
unsigned long high = low + NVHE_STACK_SIZE;
|
||||
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
@ -60,8 +60,8 @@ static struct stack_info stackinfo_get_hyp(void)
|
||||
|
||||
static struct stack_info stackinfo_get_hyp_kern_va(void)
|
||||
{
|
||||
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
|
||||
unsigned long high = low + PAGE_SIZE;
|
||||
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
|
||||
unsigned long high = low + NVHE_STACK_SIZE;
|
||||
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
|
Loading…
Reference in New Issue
Block a user