KVM: arm64: Ensure I-cache isolation between vcpus of a same VM
Commit 01dc9262ff5797b675c32c0c6bc682777d23de05 upstream. It recently became apparent that the ARMv8 architecture has interesting rules regarding attributes being used when fetching instructions if the MMU is off at Stage-1. In this situation, the CPU is allowed to fetch from the PoC and allocate into the I-cache (unless the memory is mapped with the XN attribute at Stage-2). If we transpose this to vcpus sharing a single physical CPU, it is possible for a vcpu running with its MMU off to influence another vcpu running with its MMU on, as the latter is expected to fetch from the PoU (and self-patching code doesn't flush below that level). In order to solve this, reuse the vcpu-private TLB invalidation code to apply the same policy to the I-cache, nuking it every time the vcpu runs on a physical CPU that ran another vcpu of the same VM in the past. This involve renaming __kvm_tlb_flush_local_vmid() to __kvm_flush_cpu_context(), and inserting a local i-cache invalidation there. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20210303164505.68492-1-maz@kernel.org [maz: added 32bit ARM support] Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
4e2156c0d3
commit
da2e37b55d
@ -56,7 +56,7 @@ extern char __kvm_hyp_init_end[];
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
|
||||
|
||||
|
@ -45,7 +45,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
__kvm_tlb_flush_vmid(kvm);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
|
||||
@ -54,6 +54,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
isb();
|
||||
|
||||
write_sysreg(0, TLBIALL);
|
||||
write_sysreg(0, ICIALLU);
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
@ -60,7 +60,7 @@ extern char __kvm_hyp_vector[];
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
|
||||
|
||||
|
@ -182,7 +182,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
__tlb_switch_to_host(kvm, &cxt);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
struct tlb_inv_context cxt;
|
||||
@ -191,6 +191,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
__tlb_switch_to_guest(kvm, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
@ -373,11 +373,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
cpu_data = this_cpu_ptr(&kvm_host_data);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
* previously run on the same physical CPU, call into the
|
||||
* hypervisor code to nuke the relevant contexts.
|
||||
*
|
||||
* We might get preempted before the vCPU actually runs, but
|
||||
* We might get preempted before the vCPU actually runs, but
|
||||
* over-invalidation doesn't affect correctness.
|
||||
*/
|
||||
if (*last_ran != vcpu->vcpu_id) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
|
||||
kvm_call_hyp(__kvm_flush_cpu_context, vcpu);
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user