ANDROID: KVM: arm64: iommu: Pass device specific flags to hypervisor

We need to pass some device specific flags that are detected from EL1
(as built-in sync device) to the hypervisor. The flags are defined
by the driver but hosted in the main iommu struct.

As we use SMCCC1.1 we only have 7 args, which were already used, so
mem_size is removed as it really not needed as all page donations
are 1 page. so passing the base address is enough.

Bug: 255266847
Change-Id: I14e6d2573d7a822334455999aa9fd6f01ac97450
Signed-off-by: Mostafa Saleh <smostafa@google.com>
This commit is contained in:
Mostafa Saleh 2023-04-12 16:36:42 +00:00 committed by Carlos Llamas
parent 98d9ae3046
commit 90b95f6a81
5 changed files with 18 additions and 19 deletions

View File

@ -394,8 +394,8 @@ struct pkvm_iommu_driver {
};
int pkvm_iommu_driver_init(u64 drv, void *data, size_t size);
int pkvm_iommu_register(struct device *dev, u64 drv,
phys_addr_t pa, size_t size, struct device *parent);
int pkvm_iommu_register(struct device *dev, u64 drv, phys_addr_t pa,
size_t size, struct device *parent, u8 flags);
int pkvm_iommu_suspend(struct device *dev);
int pkvm_iommu_resume(struct device *dev);

View File

@ -81,14 +81,15 @@ struct pkvm_iommu {
void *va;
size_t size;
bool powered;
u8 flags;
char data[];
};
int __pkvm_iommu_driver_init(struct pkvm_iommu_driver *drv, void *data, size_t size);
int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
phys_addr_t dev_pa, size_t dev_size,
unsigned long parent_id,
void *kern_mem_va, size_t mem_size);
unsigned long parent_id, u8 flags,
void *kern_mem_va);
int __pkvm_iommu_pm_notify(unsigned long dev_id,
enum pkvm_iommu_pm_event event);
int __pkvm_iommu_finalize(int err);

View File

@ -1152,12 +1152,11 @@ static void handle___pkvm_iommu_register(struct kvm_cpu_context *host_ctxt)
DECLARE_REG(phys_addr_t, dev_pa, host_ctxt, 3);
DECLARE_REG(size_t, dev_size, host_ctxt, 4);
DECLARE_REG(unsigned long, parent_id, host_ctxt, 5);
DECLARE_REG(void *, mem, host_ctxt, 6);
DECLARE_REG(size_t, mem_size, host_ctxt, 7);
DECLARE_REG(u8, flags, host_ctxt, 6);
DECLARE_REG(void *, mem, host_ctxt, 7);
cpu_reg(host_ctxt, 1) = __pkvm_iommu_register(dev_id, drv_id, dev_pa,
dev_size, parent_id,
mem, mem_size);
dev_size, parent_id, flags, mem);
}
static void handle___pkvm_iommu_pm_notify(struct kvm_cpu_context *host_ctxt)

View File

@ -329,8 +329,8 @@ int __pkvm_iommu_driver_init(struct pkvm_iommu_driver *drv, void *data, size_t s
int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
phys_addr_t dev_pa, size_t dev_size,
unsigned long parent_id,
void *kern_mem_va, size_t mem_size)
unsigned long parent_id, u8 flags,
void *kern_mem_va)
{
struct pkvm_iommu *dev = NULL;
struct pkvm_iommu_driver *drv;
@ -364,16 +364,15 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
* Accept memory donation if the host is providing new memory.
* Note: We do not return the memory even if there is an error later.
*/
if (kern_mem_va && mem_size) {
if (kern_mem_va) {
mem_va = kern_hyp_va(kern_mem_va);
if (!PAGE_ALIGNED(mem_va) || !PAGE_ALIGNED(mem_size)) {
if (!PAGE_ALIGNED(mem_va)) {
ret = -EINVAL;
goto out_unlock;
}
ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn(mem_va),
mem_size >> PAGE_SHIFT);
ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn(mem_va), 1);
if (ret)
goto out_unlock;
}
@ -381,7 +380,7 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
host_lock_component();
/* Allocate memory for the new device entry. */
dev = alloc_iommu(drv, mem_va, mem_size);
dev = alloc_iommu(drv, mem_va, PAGE_SIZE);
if (!dev) {
ret = -ENOMEM;
goto out_free;
@ -394,6 +393,7 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
.ops = drv->ops,
.pa = dev_pa,
.size = dev_size,
.flags = flags,
};
if (!validate_against_existing_iommus(dev)) {

View File

@ -19,7 +19,7 @@ int pkvm_iommu_driver_init(u64 drv, void *data, size_t size)
EXPORT_SYMBOL_GPL(pkvm_iommu_driver_init);
int pkvm_iommu_register(struct device *dev, u64 drv, phys_addr_t pa,
size_t size, struct device *parent)
size_t size, struct device *parent, u8 flags)
{
void *mem;
int ret;
@ -30,15 +30,14 @@ int pkvm_iommu_register(struct device *dev, u64 drv, phys_addr_t pa,
* We assume that hyp never allocates more than a page per hypcall.
*/
ret = kvm_call_hyp_nvhe(__pkvm_iommu_register, dev_to_id(dev),
drv, pa, size, dev_to_id(parent), NULL, 0);
drv, pa, size, dev_to_id(parent), flags, NULL);
if (ret == -ENOMEM) {
mem = (void *)__get_free_page(GFP_KERNEL);
if (!mem)
return -ENOMEM;
ret = kvm_call_hyp_nvhe(__pkvm_iommu_register, dev_to_id(dev),
drv, pa, size, dev_to_id(parent),
mem, PAGE_SIZE);
drv, pa, size, dev_to_id(parent), flags, mem);
}
return ret;
}