ANDROID: virt: gunyah: Sync with latest Gunyah patches

Sync changes to Gunyah stack to align with latest changes
posted to kernel.org:

https://lore.kernel.org/all/20230613172054.3959700-1-quic_eberman@quicinc.com/

Bug: 287037804
Change-Id: Ia36044894860bb94ff5518cf304254cdad14aaf5
Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
This commit is contained in:
Elliot Berman 2023-06-12 09:57:59 -07:00 committed by Carlos Llamas
parent 705a9b5feb
commit 86409bb4e1
8 changed files with 63 additions and 56 deletions

View File

@ -16,13 +16,15 @@ bool arch_is_gh_guest(void)
{
struct arm_smccc_res res;
uuid_t uuid;
u32 *up;
arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
up = (u32 *)&uuid.b[0];
up[0] = lower_32_bits(res.a0);
up[1] = lower_32_bits(res.a1);
up[2] = lower_32_bits(res.a2);
up[3] = lower_32_bits(res.a3);
return uuid_equal(&uuid, &GUNYAH_UUID);
}

View File

@ -96,8 +96,9 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
if (gh_error == GH_ERROR_OK) {
if (!ready)
return 0;
} else
} else {
dev_err(msgq->mbox.dev, "Failed to send data: %d (%d)\n", gh_error, msgq->last_ret);
}
/**
* We can send more messages. Mailbox framework requires that tx done
@ -165,6 +166,8 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
if (ret)
goto err_tx_ghrsc;
enable_irq_wake(msgq->tx_ghrsc->irq);
tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
}
@ -175,6 +178,8 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
IRQF_ONESHOT, "gh_msgq_rx", msgq);
if (ret)
goto err_tx_irq;
enable_irq_wake(msgq->rx_ghrsc->irq);
}
return 0;
@ -193,6 +198,8 @@ EXPORT_SYMBOL_GPL(gh_msgq_init);
void gh_msgq_remove(struct gh_msgq *msgq)
{
mbox_free_channel(gh_msgq_chan(msgq));
if (msgq->rx_ghrsc)
free_irq(msgq->rx_ghrsc->irq, msgq);

View File

@ -38,36 +38,40 @@ static int qcom_scm_gh_rm_pre_mem_share(void *rm, struct gh_rm_mem_parcel *mem_p
new_perms[n].perm |= QCOM_SCM_PERM_READ;
}
src = (1ull << QCOM_SCM_VMID_HLOS);
src = BIT_ULL(QCOM_SCM_VMID_HLOS);
for (i = 0; i < mem_parcel->n_mem_entries; i++) {
src_cpy = src;
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
le64_to_cpu(mem_parcel->mem_entries[i].size),
&src_cpy, new_perms, mem_parcel->n_acl_entries);
if (ret) {
src = 0;
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
src |= (1ull << vmid);
else
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
}
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
for (i--; i >= 0; i--) {
src_cpy = src;
WARN_ON_ONCE(qcom_scm_assign_mem(
le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
le64_to_cpu(mem_parcel->mem_entries[i].size),
&src_cpy, new_perms, 1));
}
if (ret)
break;
}
}
if (!ret)
goto out;
src = 0;
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
src |= BIT_ULL(vmid);
else
src |= BIT_ULL(QCOM_SCM_RM_MANAGED_VMID);
}
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
for (i--; i >= 0; i--) {
src_cpy = src;
WARN_ON_ONCE(qcom_scm_assign_mem(
le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
le64_to_cpu(mem_parcel->mem_entries[i].size),
&src_cpy, new_perms, 1));
}
out:
kfree(new_perms);
return ret;
}
@ -117,13 +121,15 @@ static bool gh_has_qcom_extensions(void)
{
struct arm_smccc_res res;
uuid_t uuid;
u32 *up;
arm_smccc_1_1_smc(GH_QCOM_EXT_CALL_UUID_ID, &res);
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
up = (u32 *)&uuid.b[0];
up[0] = lower_32_bits(res.a0);
up[1] = lower_32_bits(res.a1);
up[2] = lower_32_bits(res.a2);
up[3] = lower_32_bits(res.a3);
return uuid_equal(&uuid, &QCOM_EXT_UUID);
}

View File

@ -335,6 +335,8 @@ static bool gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_res
if (ret)
pr_warn("Failed to request vcpu irq %d: %d", vcpu->rsc->irq, ret);
enable_irq_wake(vcpu->rsc->irq);
out:
mutex_unlock(&vcpu->run_lock);
return !ret;

View File

@ -160,10 +160,10 @@ struct gh_rm {
};
/**
* gh_rm_remap_error() - Remap Gunyah resource manager errors into a Linux error code
* gh_rm_error_remap() - Remap Gunyah resource manager errors into a Linux error code
* @rm_error: "Standard" return value from Gunyah resource manager
*/
static inline int gh_rm_remap_error(enum gh_rm_error rm_error)
static inline int gh_rm_error_remap(enum gh_rm_error rm_error)
{
switch (rm_error) {
case GH_RM_ERROR_OK:
@ -226,7 +226,7 @@ static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsig
void *arg)
{
struct gh_irq_chip_data *chip_data, *spec = arg;
struct irq_fwspec parent_fwspec;
struct irq_fwspec parent_fwspec = {};
struct gh_rm *rm = d->host_data;
u32 gh_virq = spec->gh_virq;
int ret;
@ -309,7 +309,9 @@ struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_reso
if (ret < 0) {
dev_err(rm->dev,
"Failed to allocate interrupt for resource %d label: %d: %d\n",
ghrsc->type, ghrsc->rm_label, ghrsc->irq);
ghrsc->type, ghrsc->rm_label, ret);
kfree(ghrsc);
return NULL;
} else {
ghrsc->irq = ret;
}
@ -417,7 +419,7 @@ static void gh_rm_process_notif(struct gh_rm *rm, void *msg, size_t msg_size)
rm->active_rx_connection = connection;
}
static void gh_rm_process_rply(struct gh_rm *rm, void *msg, size_t msg_size)
static void gh_rm_process_reply(struct gh_rm *rm, void *msg, size_t msg_size)
{
struct gh_rm_rpc_reply_hdr *reply_hdr = msg;
struct gh_rm_connection *connection;
@ -514,7 +516,7 @@ static void gh_rm_msgq_rx_data(struct mbox_client *cl, void *mssg)
gh_rm_process_notif(rm, msg, msg_size);
break;
case RM_RPC_TYPE_REPLY:
gh_rm_process_rply(rm, msg, msg_size);
gh_rm_process_reply(rm, msg, msg_size);
break;
case RM_RPC_TYPE_CONTINUATION:
gh_rm_process_cont(rm, rm->active_rx_connection, msg, msg_size);
@ -665,10 +667,10 @@ int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_si
if (ret < 0)
goto out;
/* Wait for response */
ret = wait_for_completion_interruptible(&connection->reply.seq_done);
if (ret)
goto out;
/* Wait for response. Uninterruptible because rollback based on what RM did to VM
* requires us to know how RM handled the call.
*/
wait_for_completion(&connection->reply.seq_done);
/* Check for internal (kernel) error waiting for the response */
if (connection->reply.ret) {
@ -682,8 +684,7 @@ int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_si
if (connection->reply.rm_error != GH_RM_ERROR_OK) {
dev_warn(rm->dev, "RM rejected message %08x. Error: %d\n", message_id,
connection->reply.rm_error);
dump_stack();
ret = gh_rm_remap_error(connection->reply.rm_error);
ret = gh_rm_error_remap(connection->reply.rm_error);
kfree(connection->payload);
goto out;
}
@ -913,7 +914,6 @@ static int gh_rm_drv_probe(struct platform_device *pdev)
err_irq_domain:
irq_domain_remove(rm->irq_domain);
err_msgq:
mbox_free_channel(gh_msgq_chan(&rm->msgq));
gh_msgq_remove(&rm->msgq);
err_cache:
kmem_cache_destroy(rm->cache);
@ -928,7 +928,6 @@ static int gh_rm_drv_remove(struct platform_device *pdev)
auxiliary_device_uninit(&rm->adev);
misc_deregister(&rm->miscdev);
irq_domain_remove(rm->irq_domain);
mbox_free_channel(gh_msgq_chan(&rm->msgq));
gh_msgq_remove(&rm->msgq);
kmem_cache_destroy(rm->cache);

View File

@ -139,7 +139,7 @@ static int _gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle, bool end_append,
return -ENOMEM;
req_header = msg;
mem_section = (void *)req_header + sizeof(struct gh_rm_mem_append_req_header);
mem_section = (void *)(req_header + 1);
req_header->mem_handle = cpu_to_le32(mem_handle);
if (end_append)

View File

@ -31,13 +31,10 @@ static void gh_vm_put_function(struct gh_vm_function *fn)
static struct gh_vm_function *gh_vm_get_function(u32 type)
{
struct gh_vm_function *fn;
int r;
fn = xa_load(&gh_vm_functions, type);
if (!fn) {
r = request_module("ghfunc:%d", type);
if (r)
return ERR_PTR(r > 0 ? -r : r);
request_module("ghfunc:%d", type);
fn = xa_load(&gh_vm_functions, type);
}
@ -668,10 +665,6 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (overflows_type(dtb_config.guest_phys_addr + dtb_config.size, u64))
return -EOVERFLOW;
/* Gunyah requires that dtb_config is page aligned */
if (!PAGE_ALIGNED(dtb_config.guest_phys_addr) || !PAGE_ALIGNED(dtb_config.size))
return -EINVAL;
ghvm->dtb_config = dtb_config;
r = 0;

View File

@ -14,9 +14,7 @@
static bool pages_are_mergeable(struct page *a, struct page *b)
{
if (page_to_pfn(a) + 1 != page_to_pfn(b))
return false;
return true;
return page_to_pfn(a) + 1 == page_to_pfn(b);
}
static bool gh_vm_mem_overlap(struct gh_vm_mem *a, u64 addr, u64 size)