ANDROID: KVM: arm64: Deprecate late pKVM module loading

Previously it was possible to load a pKVM module after the userspace has
started, leaving on the modules the task of disabling the feature
(__pkvm_close_module_registration HVC).

Depreacte this way of loading modules in favor of the pre-userspace
loading via the cmdline kvm-arm.protected_modules=<module1>,<module2>.

Bug: 254835242
Change-Id: I38eef46b1482ff03af610b3b5d21b3ebfadda59b
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
This commit is contained in:
Vincent Donnefort 2023-02-21 16:51:43 +00:00 committed by Treehugger Robot
parent 0fbbb18ab0
commit aacbded3ac
6 changed files with 8 additions and 64 deletions

View File

@ -63,17 +63,11 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
/*
* __pkvm_alloc_module_va may temporarily serve as the privileged hcall
* limit when module loading is enabled, see early_pkvm_enable_modules().
*/
__KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va,
__KVM_HOST_SMCCC_FUNC___pkvm_map_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
__KVM_HOST_SMCCC_FUNC___pkvm_register_hcall,
__KVM_HOST_SMCCC_FUNC___pkvm_close_module_registration,
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
/* Hypercalls available after pKVM finalisation */

View File

@ -11,13 +11,10 @@ int __pkvm_register_hyp_panic_notifier(void (*cb)(struct kvm_cpu_context *));
enum pkvm_psci_notification;
int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *));
int reset_pkvm_priv_hcall_limit(void);
#ifdef CONFIG_MODULES
int __pkvm_init_module(void *module_init);
int __pkvm_register_hcall(unsigned long hfn_hyp_va);
int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt);
int __pkvm_close_late_module_registration(void);
void __pkvm_close_module_registration(void);
#else
static inline int __pkvm_init_module(void *module_init) { return -EOPNOTSUPP; }
@ -27,6 +24,5 @@ static inline int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt)
{
return HCALL_UNHANDLED;
}
static inline int __pkvm_close_late_module_registration(void) { return -EOPNOTSUPP; }
static inline void __pkvm_close_module_registration(void) { }
#endif

View File

@ -1212,12 +1212,6 @@ static void handle___pkvm_register_hcall(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_register_hcall(hfn_hyp_va);
}
static void
handle___pkvm_close_module_registration(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_close_late_module_registration();
}
static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, pack_hva, host_ctxt, 1);
@ -1290,13 +1284,11 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__pkvm_alloc_module_va),
HANDLE_FUNC(__pkvm_map_module_page),
HANDLE_FUNC(__pkvm_unmap_module_page),
HANDLE_FUNC(__pkvm_init_module),
HANDLE_FUNC(__pkvm_register_hcall),
HANDLE_FUNC(__pkvm_close_module_registration),
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp),
@ -1330,22 +1322,6 @@ static const hcall_t host_hcall[] = {
#endif
};
unsigned long pkvm_priv_hcall_limit __ro_after_init = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
int reset_pkvm_priv_hcall_limit(void)
{
unsigned long *addr;
if (pkvm_priv_hcall_limit == __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize)
return -EACCES;
addr = hyp_fixmap_map(__hyp_pa(&pkvm_priv_hcall_limit));
*addr = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
hyp_fixmap_unmap();
return 0;
}
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, id, host_ctxt, 0);
@ -1365,7 +1341,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
* returns -EPERM after the first call for a given CPU.
*/
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = pkvm_priv_hcall_limit;
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
id -= KVM_HOST_SMCCC_ID(0);

View File

@ -470,8 +470,6 @@ int __pkvm_iommu_finalize(int err)
if (!ret && err)
pkvm_handle_system_misconfiguration(NO_DMA_ISOLATION);
__pkvm_close_late_module_registration();
return ret;
}

View File

@ -77,15 +77,6 @@ void __pkvm_close_module_registration(void)
*/
}
int __pkvm_close_late_module_registration(void)
{
__pkvm_close_module_registration();
return reset_pkvm_priv_hcall_limit();
/* The fuse is blown! No way back until reset */
}
const struct pkvm_module_ops module_ops = {
.create_private_mapping = __pkvm_create_private_mapping,
.alloc_module_va = __pkvm_alloc_module_va,

View File

@ -578,26 +578,14 @@ int pkvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
#ifdef CONFIG_MODULES
static char early_pkvm_modules[COMMAND_LINE_SIZE] __initdata;
static int __init pkvm_enable_module_late_loading(void)
{
extern unsigned long kvm_nvhe_sym(pkvm_priv_hcall_limit);
WARN(1, "Loading pKVM modules with kvm-arm.protected_modules is deprecated\n"
"Use kvm-arm.protected_modules=<module1>,<module2>");
/*
* Move the limit to allow module loading HVCs. It will be moved back to
* its original position in __pkvm_close_module_registration().
*/
kvm_nvhe_sym(pkvm_priv_hcall_limit) = __KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va;
return 0;
}
static int __init early_pkvm_modules_cfg(char *arg)
{
/*
* Loading pKVM modules with kvm-arm.protected_modules is deprecated
* Use kvm-arm.protected_modules=<module1>,<module2>
*/
if (!arg)
return pkvm_enable_module_late_loading();
return -EINVAL;
strscpy(early_pkvm_modules, arg, COMMAND_LINE_SIZE);
@ -800,7 +788,8 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
int ret, i, secs_first;
size_t offset, size;
if (!is_protected_kvm_enabled())
/* The pKVM hyp only allows loading before it is fully initialized */
if (!is_protected_kvm_enabled() || is_pkvm_initialized())
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(secs_map); i++) {