gunyah: Rename gh_rm_* to ghd_rm_* for all the APIs that have conflict

As we are merging upstream patches, resolve conflicts of namespaces in
downstream gunyah modules.

Change-Id: If7d731837704585635de2096b141acb9151d26dc
Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
This commit is contained in:
Prakruthi Deepak Heragu 2023-02-28 13:32:12 -08:00
parent 17344b6010
commit 0c65e49be7
12 changed files with 176 additions and 59 deletions

View File

@ -135,7 +135,7 @@ static int qcom_ddump_share_mem(struct qcom_dmesg_dumper *qdd, gh_vmid_t self,
sgl->sgl_entries[0].ipa_base = qdd->res.start;
sgl->sgl_entries[0].size = resource_size(&qdd->res);
ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, qdd->label,
ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, qdd->label,
acl, sgl, NULL, &qdd->memparcel);
if (ret) {
dev_err(qdd->dev, "Gunyah mem share addr=%x size=%u failed: %d\n",
@ -163,7 +163,7 @@ static void qcom_ddump_unshare_mem(struct qcom_dmesg_dumper *qdd, gh_vmid_t self
u64 src_vmid = BIT(self) | BIT(peer);
int ret;
ret = gh_rm_mem_reclaim(qdd->memparcel, 0);
ret = ghd_rm_mem_reclaim(qdd->memparcel, 0);
if (ret)
dev_err(qdd->dev, "Gunyah mem reclaim failed: %d\n", ret);
@ -192,9 +192,9 @@ static int qcom_ddump_rm_cb(struct notifier_block *nb, unsigned long cmd,
if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
return NOTIFY_DONE;
if (gh_rm_get_vmid(qdd->peer_name, &peer_vmid))
if (ghd_rm_get_vmid(qdd->peer_name, &peer_vmid))
return NOTIFY_DONE;
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
return NOTIFY_DONE;
if (peer_vmid != vm_status_payload->vmid)
return NOTIFY_DONE;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -37,8 +38,8 @@ static struct gh_acl_desc *gh_tlmm_vm_get_acl(enum gh_vm_names vm_name)
gh_vmid_t vmid;
gh_vmid_t primary_vmid;
gh_rm_get_vmid(vm_name, &vmid);
gh_rm_get_vmid(GH_PRIMARY_VM, &primary_vmid);
ghd_rm_get_vmid(vm_name, &vmid);
ghd_rm_get_vmid(GH_PRIMARY_VM, &primary_vmid);
acl_desc = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]),
GFP_KERNEL);
@ -96,7 +97,7 @@ static int gh_tlmm_vm_mem_share(struct gh_tlmm_vm_info *gh_tlmm_vm_info_data)
goto sgl_error;
}
rc = gh_rm_mem_share(GH_RM_MEM_TYPE_IO, 0, GH_TLMM_MEM_LABEL,
rc = ghd_rm_mem_share(GH_RM_MEM_TYPE_IO, 0, GH_TLMM_MEM_LABEL,
acl_desc, sgl_desc, NULL, &mem_handle);
if (rc) {
dev_err(gh_tlmm_dev, "Failed to share IO memories for TLMM rc:%d\n", rc);
@ -126,7 +127,7 @@ static int __maybe_unused gh_guest_memshare_nb_handler(struct notifier_block *th
if (cmd != GH_RM_NOTIF_VM_STATUS)
return NOTIFY_DONE;
gh_rm_get_vmid(GH_TRUSTED_VM, &peer_vmid);
ghd_rm_get_vmid(GH_TRUSTED_VM, &peer_vmid);
if (peer_vmid != vm_status_payload->vmid)
return NOTIFY_DONE;
@ -174,7 +175,7 @@ static int gh_tlmm_vm_mem_reclaim(struct gh_tlmm_vm_info *gh_tlmm_vm_info_data)
return -EINVAL;
}
rc = gh_rm_mem_reclaim(gh_tlmm_vm_info_data->vm_mem_handle, 0);
rc = ghd_rm_mem_reclaim(gh_tlmm_vm_info_data->vm_mem_handle, 0);
if (rc)
dev_err(gh_tlmm_dev, "VM mem reclaim failed rc:%d\n", rc);
@ -346,7 +347,7 @@ static int gh_tlmm_vm_mem_access_probe(struct platform_device *pdev)
if (ret)
return ret;
} else {
ret = gh_rm_get_vmid(GH_TRUSTED_VM, &vmid);
ret = ghd_rm_get_vmid(GH_TRUSTED_VM, &vmid);
if (ret)
return ret;

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -382,12 +382,12 @@ int mem_buf_assign_mem_gunyah(u32 op, struct sg_table *sgt,
pr_debug("%s: Invoking Gunyah Lend/Share\n", __func__);
if (op == GH_RM_TRANS_TYPE_LEND) {
ret = gh_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, arg->flags,
ret = ghd_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, arg->flags,
arg->label, gh_acl, gh_sgl,
NULL /* Default memory attributes */,
&arg->memparcel_hdl);
} else if (op == GH_RM_TRANS_TYPE_SHARE) {
ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, arg->flags,
ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, arg->flags,
arg->label, gh_acl, gh_sgl,
NULL /* Default memory attributes */,
&arg->memparcel_hdl);
@ -424,7 +424,7 @@ int mem_buf_unassign_mem_gunyah(gh_memparcel_handle_t memparcel_hdl)
int ret;
pr_debug("%s: Beginning gunyah reclaim\n", __func__);
ret = gh_rm_mem_reclaim(memparcel_hdl, 0);
ret = ghd_rm_mem_reclaim(memparcel_hdl, 0);
if (ret) {
pr_err("%s: Gunyah reclaim failed\n", __func__);
return ret;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "hvc_gunyah: " fmt
@ -98,7 +99,7 @@ static void gh_hvc_put_work_fn(struct work_struct *ws)
int count, ret;
struct gh_hvc_prv *prv = container_of(ws, struct gh_hvc_prv, put_work);
ret = gh_rm_get_vmid(prv->vm_name, &vmid);
ret = ghd_rm_get_vmid(prv->vm_name, &vmid);
if (ret) {
pr_warn_once("%s: gh_rm_get_vmid failed for %d: %d\n",
__func__, prv->vm_name, ret);
@ -158,7 +159,7 @@ static int gh_hvc_flush(uint32_t vtermno, bool wait)
if (vm_name < 0 || vm_name >= GH_VM_MAX)
return -EINVAL;
ret = gh_rm_get_vmid(vm_name, &vmid);
ret = ghd_rm_get_vmid(vm_name, &vmid);
if (ret)
return ret;
@ -181,7 +182,7 @@ static int gh_hvc_notify_add(struct hvc_struct *hp, int vm_name)
return 0;
#endif /* CONFIG_HVC_GUNYAH_CONSOLE */
ret = gh_rm_get_vmid(vm_name, &vmid);
ret = ghd_rm_get_vmid(vm_name, &vmid);
if (ret) {
pr_err("%s: gh_rm_get_vmid failed for %d: %d\n", __func__,
vm_name, ret);
@ -210,7 +211,7 @@ static void gh_hvc_notify_del(struct hvc_struct *hp, int vm_name)
gh_hvc_put_work_fn(&gh_hvc_data[vm_name].put_work);
}
ret = gh_rm_get_vmid(vm_name, &vmid);
ret = ghd_rm_get_vmid(vm_name, &vmid);
if (ret)
return;

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*
*/
@ -176,7 +177,7 @@ int gh_irq_lend_v2(enum gh_irq_label label, enum gh_vm_names name,
return -EINVAL;
}
ret = gh_rm_get_vmid(name, &entry->vmid);
ret = ghd_rm_get_vmid(name, &entry->vmid);
if (ret) {
entry->state = GH_IRQ_STATE_NONE;
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);

View File

@ -34,7 +34,7 @@ SRCU_NOTIFIER_HEAD_STATIC(gh_vm_notifier);
static int gh_##name(struct gh_vm *vm, int vm_status) \
{ \
int ret = 0; \
ret = gh_rm_##name(vm->vmid); \
ret = ghd_rm_##name(vm->vmid); \
if (!ret) \
vm->status.vm_status = vm_status; \
return ret; \
@ -140,7 +140,7 @@ static void gh_vm_cleanup(struct gh_vm *vm)
fallthrough;
case GH_RM_VM_STATUS_INIT:
case GH_RM_VM_STATUS_AUTH:
ret = gh_rm_vm_reset(vmid);
ret = ghd_rm_vm_reset(vmid);
if (!ret) {
ret = gh_wait_for_vm_status(vm, GH_RM_VM_STATUS_RESET);
if (ret < 0)
@ -179,7 +179,7 @@ static int gh_exit_vm(struct gh_vm *vm, u32 stop_reason, u8 stop_flags)
return -ENODEV;
}
ret = gh_rm_vm_stop(vmid, stop_reason, stop_flags);
ret = ghd_rm_vm_stop(vmid, stop_reason, stop_flags);
if (ret) {
pr_err("Failed to stop the VM:%d ret %d\n", vmid, ret);
mutex_unlock(&vm->vm_lock);
@ -447,7 +447,7 @@ int gh_reclaim_mem(struct gh_vm *vm, phys_addr_t phys,
int ret = 0;
if (!is_system_vm) {
ret = gh_rm_mem_reclaim(vm->mem_handle, 0);
ret = ghd_rm_mem_reclaim(vm->mem_handle, 0);
if (ret)
pr_err("Failed to reclaim memory for %d, %d\n",
@ -515,7 +515,7 @@ int gh_provide_mem(struct gh_vm *vm, phys_addr_t phys,
ret = gh_rm_mem_donate(GH_RM_MEM_TYPE_NORMAL, 0, 0,
acl_desc, sgl_desc, NULL, &vm->mem_handle);
else
ret = gh_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, 0, 0, acl_desc,
ret = ghd_rm_mem_lend(GH_RM_MEM_TYPE_NORMAL, 0, 0, acl_desc,
sgl_desc, NULL, &vm->mem_handle);
if (ret) {
@ -572,7 +572,7 @@ long gh_vm_configure(u16 auth_mech, u64 image_offset,
return ret;
}
ret = gh_rm_vm_init(vm->vmid);
ret = ghd_rm_vm_init(vm->vmid);
if (ret) {
pr_err("VM_INIT_IMAGE failed for VM:%d %d\n",
vm->vmid, ret);

View File

@ -14,7 +14,7 @@
* This driver is based on idea from Hafnium Hypervisor Linux Driver,
* but modified to work with Gunyah Hypervisor as needed.
*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "gh_proxy_sched: " fmt
@ -126,8 +126,8 @@ static inline bool is_vm_supports_proxy(gh_vmid_t gh_vmid)
{
gh_vmid_t vmid;
if ((!gh_rm_get_vmid(GH_TRUSTED_VM, &vmid) && vmid == gh_vmid) ||
(!gh_rm_get_vmid(GH_OEM_VM, &vmid) && vmid == gh_vmid))
if ((!ghd_rm_get_vmid(GH_TRUSTED_VM, &vmid) && vmid == gh_vmid) ||
(!ghd_rm_get_vmid(GH_OEM_VM, &vmid) && vmid == gh_vmid))
return true;
return false;

View File

@ -1271,7 +1271,7 @@ static void gh_rm_get_svm_res_work_fn(struct work_struct *work)
gh_vmid_t vmid;
int ret;
ret = gh_rm_get_vmid(GH_PRIMARY_VM, &vmid);
ret = ghd_rm_get_vmid(GH_PRIMARY_VM, &vmid);
if (ret)
pr_err("%s: Unable to get VMID for VM label %d\n",
__func__, GH_PRIMARY_VM);

View File

@ -116,7 +116,7 @@ void gh_reset_vm_prop_table_entry(gh_vmid_t vmid)
}
/**
* gh_rm_get_vmid: Translate VM name to vmid
* ghd_rm_get_vmid: Translate VM name to vmid
* @vm_name: VM name to lookup
* @vmid: out pointer to store found vmid if VM is ofund
*
@ -124,7 +124,7 @@ void gh_reset_vm_prop_table_entry(gh_vmid_t vmid)
* If no VM is known to RM with the supplied name, returns -EINVAL.
* Returns 0 on success.
*/
int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
{
gh_vmid_t _vmid;
int ret = 0;
@ -153,6 +153,12 @@ int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
spin_unlock(&gh_vm_table_lock);
return ret;
}
EXPORT_SYMBOL(ghd_rm_get_vmid);
int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
{
return ghd_rm_get_vmid(vm_name, vmid);
}
EXPORT_SYMBOL(gh_rm_get_vmid);
/**
@ -1200,13 +1206,13 @@ int gh_rm_vm_auth_image(gh_vmid_t vmid, ssize_t n_entries,
EXPORT_SYMBOL(gh_rm_vm_auth_image);
/**
* gh_rm_vm_init: Request to allocate resources of the VM
* ghd_rm_vm_init: Request to allocate resources of the VM
* @vmid: The vmid of VM to initialize.
*
* The function returns 0 on success and a negative error code
* upon failure.
*/
int gh_rm_vm_init(gh_vmid_t vmid)
int ghd_rm_vm_init(gh_vmid_t vmid)
{
struct gh_vm_init_req_payload req_payload = {
.vmid = vmid,
@ -1241,16 +1247,22 @@ int gh_rm_vm_init(gh_vmid_t vmid)
return 0;
}
EXPORT_SYMBOL(ghd_rm_vm_init);
int gh_rm_vm_init(gh_vmid_t vmid)
{
return ghd_rm_vm_init(vmid);
}
EXPORT_SYMBOL(gh_rm_vm_init);
/**
* gh_rm_vm_start: Send a request to Resource Manager VM to start a VM.
* ghd_rm_vm_start: Send a request to Resource Manager VM to start a VM.
* @vmid: The vmid of the vm to be started.
*
* The function encodes the error codes via ERR_PTR. Hence, the caller is
* responsible to check it with IS_ERR_OR_NULL().
*/
int gh_rm_vm_start(int vmid)
int ghd_rm_vm_start(int vmid)
{
struct gh_vm_start_resp_payload *resp_payload;
struct gh_vm_start_req_payload req_payload = {0};
@ -1276,16 +1288,22 @@ int gh_rm_vm_start(int vmid)
return 0;
}
EXPORT_SYMBOL(ghd_rm_vm_start);
int gh_rm_vm_start(int vmid)
{
return ghd_rm_vm_start(vmid);
}
EXPORT_SYMBOL(gh_rm_vm_start);
/**
* gh_rm_vm_stop: Send a request to Resource Manager VM to stop a VM.
* ghd_rm_vm_stop: Send a request to Resource Manager VM to stop a VM.
* @vmid: The vmid of the vm to be stopped.
*
* The function encodes the error codes via ERR_PTR. Hence, the caller is
* responsible to check it with IS_ERR_OR_NULL().
*/
int gh_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
int ghd_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
{
struct gh_vm_stop_req_payload req_payload = {0};
size_t resp_payload_size;
@ -1320,17 +1338,23 @@ int gh_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
return 0;
}
EXPORT_SYMBOL(ghd_rm_vm_stop);
int gh_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
{
return ghd_rm_vm_stop(vmid, stop_reason, flags);
}
EXPORT_SYMBOL(gh_rm_vm_stop);
/**
* gh_rm_vm_reset: Send a request to Resource Manager VM to free up all
* ghd_rm_vm_reset: Send a request to Resource Manager VM to free up all
* resources used by the VM.
* @vmid: The vmid of the vm to be cleaned up.
*
* The function returns 0 on success and a negative error code
* upon failure.
*/
int gh_rm_vm_reset(gh_vmid_t vmid)
int ghd_rm_vm_reset(gh_vmid_t vmid)
{
struct gh_vm_reset_req_payload req_payload = {
.vmid = vmid,
@ -1358,8 +1382,13 @@ int gh_rm_vm_reset(gh_vmid_t vmid)
return 0;
}
EXPORT_SYMBOL(gh_rm_vm_reset);
EXPORT_SYMBOL(ghd_rm_vm_reset);
int gh_rm_vm_reset(gh_vmid_t vmid)
{
return ghd_rm_vm_reset(vmid);
}
EXPORT_SYMBOL(gh_rm_vm_reset);
/**
* gh_rm_console_open: Open a console with a VM
* @vmid: The vmid of the vm to be started.
@ -1795,7 +1824,7 @@ int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags)
EXPORT_SYMBOL(gh_rm_mem_release);
/**
* gh_rm_mem_reclaim: Reclaim a memory represented by a handle. This results in
* ghd_rm_mem_reclaim: Reclaim a memory represented by a handle. This results in
* the RM mapping the associated memory into the stage-2
* page-tables of the owner VM
* @handle: The memparcel handle associated with the memory
@ -1805,7 +1834,7 @@ EXPORT_SYMBOL(gh_rm_mem_release);
* On success, the function will return 0. Otherwise, a negative number will be
* returned.
*/
int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
int ghd_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
{
int ret;
@ -1818,6 +1847,12 @@ int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
return ret;
}
EXPORT_SYMBOL(ghd_rm_mem_reclaim);
int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
{
return ghd_rm_mem_reclaim(handle, flags);
}
EXPORT_SYMBOL(gh_rm_mem_reclaim);
static void gh_sgl_fragment_release(struct gh_sgl_fragment *gather)
@ -2265,7 +2300,7 @@ static int gh_rm_mem_share_lend_helper(u32 fn_id, u8 mem_type, u8 flags,
return 0;
err_mem_append:
gh_rm_mem_reclaim(resp_payload->memparcel_handle, 0);
ghd_rm_mem_reclaim(resp_payload->memparcel_handle, 0);
err_resp_size:
kfree(resp_payload);
err_rm_call:
@ -2274,7 +2309,7 @@ static int gh_rm_mem_share_lend_helper(u32 fn_id, u8 mem_type, u8 flags,
}
/**
* gh_rm_mem_share: Share memory with other VM(s) without excluding the owner
* ghd_rm_mem_share: Share memory with other VM(s) without excluding the owner
* @mem_type: The type of memory being shared (i.e. normal or I/O)
* @flags: Bitmask of values to influence the behavior of the RM when it shares
* the memory
@ -2293,7 +2328,7 @@ static int gh_rm_mem_share_lend_helper(u32 fn_id, u8 mem_type, u8 flags,
* @handle with the memparcel handle. Otherwise, a negative number will be
* returned.
*/
int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
int ghd_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle)
@ -2311,10 +2346,20 @@ int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
return ret;
}
EXPORT_SYMBOL(ghd_rm_mem_share);
int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle)
{
return ghd_rm_mem_share(mem_type, flags, label, acl_desc, sgl_desc,
mem_attr_desc, handle);
}
EXPORT_SYMBOL(gh_rm_mem_share);
/**
* gh_rm_mem_lend: Lend memory to other VM(s)--excluding the owner
* ghd_rm_mem_lend: Lend memory to other VM(s)--excluding the owner
* @mem_type: The type of memory being lent (i.e. normal or I/O)
* @flags: Bitmask of values to influence the behavior of the RM when it lends
* the memory
@ -2333,7 +2378,7 @@ EXPORT_SYMBOL(gh_rm_mem_share);
* @handle with the memparcel handle. Otherwise, a negative number will be
* returned.
*/
int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
int ghd_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle)
@ -2351,8 +2396,17 @@ int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
return ret;
}
EXPORT_SYMBOL(gh_rm_mem_lend);
EXPORT_SYMBOL(ghd_rm_mem_lend);
int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle)
{
return ghd_rm_mem_lend(mem_type, flags, label, acl_desc, sgl_desc,
mem_attr_desc, handle);
}
EXPORT_SYMBOL(gh_rm_mem_lend);
/**
* gh_rm_mem_donate: Donate memory to a single VM.
* @mem_type: The type of memory being lent (i.e. normal or I/O)

View File

@ -1072,7 +1072,7 @@ unshare_a_vm_buffer(gh_vmid_t self, gh_vmid_t peer, struct resource *r,
PERM_READ | PERM_WRITE | PERM_EXEC}};
int ret;
ret = gh_rm_mem_reclaim(shmem->shm_memparcel, 0);
ret = ghd_rm_mem_reclaim(shmem->shm_memparcel, 0);
if (ret) {
pr_err("%s: gh_rm_mem_reclaim failed for handle %x addr=%llx size=%lld err=%d\n",
VIRTIO_PRINT_MARKER, shmem->shm_memparcel, r->start,
@ -1097,7 +1097,7 @@ static int unshare_vm_buffers(struct virt_machine *vm, gh_vmid_t peer)
if (!vm->hyp_assign_done)
return 0;
ret = gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid);
ret = ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid);
if (ret)
return ret;
@ -1154,7 +1154,7 @@ static int share_a_vm_buffer(gh_vmid_t self, gh_vmid_t peer, int gunyah_label,
sgl->sgl_entries[0].ipa_base = r->start;
sgl->sgl_entries[0].size = resource_size(r);
ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, gunyah_label,
ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, gunyah_label,
acl, sgl, NULL, shm_memparcel);
if (ret) {
pr_err("%s: Sharing memory failed %d\n", VIRTIO_PRINT_MARKER, ret);
@ -1176,7 +1176,7 @@ static int share_vm_buffers(struct virt_machine *vm, gh_vmid_t peer)
int i, ret;
gh_vmid_t self_vmid;
ret = gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid);
ret = ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid);
if (ret)
return ret;

View File

@ -361,16 +361,21 @@ int gh_rm_vm_config_image(gh_vmid_t vmid, u16 auth_mech, u32 mem_handle,
u64 image_offset, u64 image_size, u64 dtb_offset, u64 dtb_size);
int gh_rm_vm_auth_image(gh_vmid_t vmid, ssize_t n_entries,
struct gh_vm_auth_param_entry *entry);
int ghd_rm_vm_init(gh_vmid_t vmid);
int gh_rm_vm_init(gh_vmid_t vmid);
int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid);
int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid);
int gh_rm_get_vm_id_info(gh_vmid_t vmid);
int gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name);
int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vminfo);
int ghd_rm_vm_start(int vmid);
int gh_rm_vm_start(int vmid);
enum gh_vm_names gh_get_image_name(const char *str);
enum gh_vm_names gh_get_vm_name(const char *str);
int gh_rm_get_this_vmid(gh_vmid_t *vmid);
int ghd_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags);
int gh_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags);
int ghd_rm_vm_reset(gh_vmid_t vmid);
int gh_rm_vm_reset(gh_vmid_t vmid);
/* Client APIs for VM query */
@ -393,6 +398,7 @@ int gh_rm_mem_qcom_lookup_sgl(u8 mem_type, gh_label_t label,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle);
int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags);
int ghd_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags);
int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags);
struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type,
u8 trans_type, u8 flags, gh_label_t label,
@ -400,10 +406,18 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type,
struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
u16 map_vmid);
int ghd_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle);
int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle);
int ghd_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle);
int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
@ -516,11 +530,21 @@ static inline int gh_rm_vm_auth_image(gh_vmid_t vmid, ssize_t n_entries,
return -EINVAL;
}
static inline int ghd_rm_vm_init(gh_vmid_t vmid)
{
return -EINVAL;
}
static inline int gh_rm_vm_init(gh_vmid_t vmid)
{
return -EINVAL;
}
static inline int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
{
return -EINVAL;
}
static inline int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
{
return -EINVAL;
@ -541,6 +565,11 @@ static inline int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *v
return -EINVAL;
}
static inline int ghd_rm_vm_start(int vmid)
{
return -EINVAL;
}
static inline int gh_rm_vm_start(int vmid)
{
return -EINVAL;
@ -551,11 +580,21 @@ static inline int gh_rm_get_vm_id_info(gh_vmid_t vmid)
return -EINVAL;
}
static inline int ghd_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
{
return -EINVAL;
}
static inline int gh_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
{
return -EINVAL;
}
static inline int ghd_rm_vm_reset(gh_vmid_t vmid)
{
return -EINVAL;
}
static inline int gh_rm_vm_reset(gh_vmid_t vmid)
{
return -EINVAL;
@ -628,6 +667,11 @@ static inline int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags)
return -EINVAL;
}
static inline int ghd_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
{
return -EINVAL;
}
static inline int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
{
return -EINVAL;
@ -644,6 +688,14 @@ static inline struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle,
return ERR_PTR(-EINVAL);
}
static inline int ghd_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle)
{
return -EINVAL;
}
static inline int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
@ -652,6 +704,14 @@ static inline int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
return -EINVAL;
}
static inline int ghd_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle)
{
return -EINVAL;
}
static inline int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,

View File

@ -473,8 +473,8 @@ static int qrtr_gunyah_share_mem(struct qrtr_gunyah_dev *qdev, gh_vmid_t self,
sgl->sgl_entries[0].ipa_base = qdev->res.start;
sgl->sgl_entries[0].size = resource_size(&qdev->res);
ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, qdev->label,
acl, sgl, NULL, &qdev->memparcel);
ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, qdev->label,
acl, sgl, NULL, &qdev->memparcel);
if (ret) {
pr_err("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
__func__, qdev->res.start, qdev->size, ret);
@ -498,7 +498,7 @@ static void qrtr_gunyah_unshare_mem(struct qrtr_gunyah_dev *qdev,
struct qcom_scm_vmperm dst_vmlist[1] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}};
int ret;
ret = gh_rm_mem_reclaim(qdev->memparcel, 0);
ret = ghd_rm_mem_reclaim(qdev->memparcel, 0);
if (ret)
pr_err("%s: Gunyah reclaim failed\n", __func__);
@ -526,9 +526,9 @@ static int qrtr_gunyah_rm_cb(struct notifier_block *nb, unsigned long cmd,
if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
return NOTIFY_DONE;
if (gh_rm_get_vmid(qdev->peer_name, &peer_vmid))
if (ghd_rm_get_vmid(qdev->peer_name, &peer_vmid))
return NOTIFY_DONE;
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
return NOTIFY_DONE;
if (peer_vmid != vm_status_payload->vmid)
return NOTIFY_DONE;
@ -794,9 +794,9 @@ static int qrtr_gunyah_remove(struct platform_device *pdev)
if (!qdev->master)
return 0;
if (gh_rm_get_vmid(qdev->peer_name, &peer_vmid))
if (ghd_rm_get_vmid(qdev->peer_name, &peer_vmid))
return 0;
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
return 0;
qrtr_gunyah_unshare_mem(qdev, self_vmid, peer_vmid);