Merge "gunyah: Split gh_rm_mem_accept"

This commit is contained in:
qctecmdr 2022-12-19 14:03:37 -08:00 committed by Gerrit - the friendly Code Review server
commit f84111ed8b
9 changed files with 829 additions and 68 deletions

View File

@ -10,6 +10,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/of_reserved_mem.h>
#include <soc/qcom/secure_buffer.h> #include <soc/qcom/secure_buffer.h>
#include <linux/mem-buf.h> #include <linux/mem-buf.h>
@ -102,6 +103,7 @@ static int mem_buf_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
u64 dma_mask = IS_ENABLED(CONFIG_ARM64) ? DMA_BIT_MASK(64) : u64 dma_mask = IS_ENABLED(CONFIG_ARM64) ? DMA_BIT_MASK(64) :
DMA_BIT_MASK(32); DMA_BIT_MASK(32);
int unused;
if (of_property_match_string(dev->of_node, "qcom,mem-buf-capabilities", if (of_property_match_string(dev->of_node, "qcom,mem-buf-capabilities",
"supplier") >= 0) "supplier") >= 0)
@ -123,14 +125,26 @@ static int mem_buf_probe(struct platform_device *pdev)
return ret; return ret;
} }
if (of_find_property(dev->of_node, "memory-region", &unused)) {
ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
if (ret) {
dev_err(dev, "Failed to get memory-region property %d\n", ret);
return ret;
}
}
ret = mem_buf_vm_init(dev); ret = mem_buf_vm_init(dev);
if (ret) { if (ret) {
dev_err(dev, "mem_buf_vm_init failed %d\n", ret); dev_err(dev, "mem_buf_vm_init failed %d\n", ret);
return ret; goto err_vm_init;
} }
mem_buf_dev = dev; mem_buf_dev = dev;
return 0; return 0;
err_vm_init:
of_reserved_mem_device_release(dev);
return ret;
} }
static int mem_buf_remove(struct platform_device *pdev) static int mem_buf_remove(struct platform_device *pdev)

View File

@ -11,6 +11,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/qcom_dma_heap.h> #include <linux/qcom_dma_heap.h>
#include <linux/qcom_tui_heap.h> #include <linux/qcom_tui_heap.h>
#include <linux/dma-map-ops.h>
#include <linux/cma.h>
#include "../../../../drivers/dma-buf/heaps/qcom_sg_ops.h" #include "../../../../drivers/dma-buf/heaps/qcom_sg_ops.h"
#include "mem-buf-gh.h" #include "mem-buf-gh.h"
@ -202,12 +204,75 @@ static int mem_buf_rmt_alloc_dmaheap_mem(struct mem_buf_xfer_mem *xfer_mem)
return 0; return 0;
} }
/* In future, try allocating from buddy if cma not available */
static int mem_buf_rmt_alloc_buddy_mem(struct mem_buf_xfer_mem *xfer_mem)
{
struct cma *cma;
struct sg_table *table;
struct page *page;
int ret;
u32 align;
size_t nr_pages;
pr_debug("%s: Starting DMAHEAP-BUDDY allocation\n", __func__);
/*
* For the common case of 4Mb transfer, we want it to be nicely aligned
* to allow for 2Mb block mappings in S2 pagetable.
*/
align = min(get_order(xfer_mem->size), get_order(SZ_2M));
nr_pages = xfer_mem->size >> PAGE_SHIFT;
/*
* Don't use dev_get_cma_area() as we don't want to fall back to
* dma_contiguous_default_area.
*/
cma = mem_buf_dev->cma_area;
if (!cma)
return -ENOMEM;
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto err_alloc_table;
}
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret)
goto err_sg_init;
page = cma_alloc(cma, nr_pages, align, false);
if (!page) {
ret = -ENOMEM;
goto err_cma_alloc;
}
sg_set_page(table->sgl, page, nr_pages << PAGE_SHIFT, 0);
/* Zero memory before transferring to Guest VM */
memset(page_address(page), 0, nr_pages << PAGE_SHIFT);
xfer_mem->mem_sgt = table;
xfer_mem->secure_alloc = false;
pr_debug("%s: DMAHEAP-BUDDY allocation complete\n", __func__);
return 0;
err_cma_alloc:
sg_free_table(table);
err_sg_init:
kfree(table);
err_alloc_table:
return ret;
}
static int mem_buf_rmt_alloc_mem(struct mem_buf_xfer_mem *xfer_mem) static int mem_buf_rmt_alloc_mem(struct mem_buf_xfer_mem *xfer_mem)
{ {
int ret = -EINVAL; int ret = -EINVAL;
if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
ret = mem_buf_rmt_alloc_dmaheap_mem(xfer_mem); ret = mem_buf_rmt_alloc_dmaheap_mem(xfer_mem);
else if (xfer_mem->mem_type == MEM_BUF_BUDDY_MEM_TYPE)
ret = mem_buf_rmt_alloc_buddy_mem(xfer_mem);
return ret; return ret;
} }
@ -233,10 +298,22 @@ static void mem_buf_rmt_free_dmaheap_mem(struct mem_buf_xfer_mem *xfer_mem)
pr_debug("%s: DMAHEAP memory freed\n", __func__); pr_debug("%s: DMAHEAP memory freed\n", __func__);
} }
static void mem_buf_rmt_free_buddy_mem(struct mem_buf_xfer_mem *xfer_mem)
{
struct sg_table *table = xfer_mem->mem_sgt;
pr_debug("%s: Freeing DMAHEAP-BUDDY memory\n", __func__);
cma_release(dev_get_cma_area(mem_buf_dev), sg_page(table->sgl),
table->sgl->length >> PAGE_SHIFT);
pr_debug("%s: DMAHEAP-BUDDY memory freed\n", __func__);
}
static void mem_buf_rmt_free_mem(struct mem_buf_xfer_mem *xfer_mem) static void mem_buf_rmt_free_mem(struct mem_buf_xfer_mem *xfer_mem)
{ {
if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
mem_buf_rmt_free_dmaheap_mem(xfer_mem); mem_buf_rmt_free_dmaheap_mem(xfer_mem);
else if (xfer_mem->mem_type == MEM_BUF_BUDDY_MEM_TYPE)
mem_buf_rmt_free_buddy_mem(xfer_mem);
} }
static static
@ -263,6 +340,8 @@ static void *mem_buf_alloc_xfer_mem_type_data(enum mem_buf_mem_type type,
if (type == MEM_BUF_DMAHEAP_MEM_TYPE) if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
data = mem_buf_alloc_dmaheap_xfer_mem_type_data(rmt_data); data = mem_buf_alloc_dmaheap_xfer_mem_type_data(rmt_data);
else if (type == MEM_BUF_BUDDY_MEM_TYPE)
data = NULL;
return data; return data;
} }
@ -278,6 +357,7 @@ static void mem_buf_free_xfer_mem_type_data(enum mem_buf_mem_type type,
{ {
if (type == MEM_BUF_DMAHEAP_MEM_TYPE) if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
mem_buf_free_dmaheap_xfer_mem_type_data(data); mem_buf_free_dmaheap_xfer_mem_type_data(data);
/* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
} }
static static
@ -784,6 +864,8 @@ static void *mem_buf_retrieve_mem_type_data_user(enum mem_buf_mem_type mem_type,
if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
data = mem_buf_retrieve_dmaheap_mem_type_data_user(mem_type_data); data = mem_buf_retrieve_dmaheap_mem_type_data_user(mem_type_data);
else if (mem_type == MEM_BUF_BUDDY_MEM_TYPE)
data = NULL;
return data; return data;
} }
@ -800,6 +882,8 @@ static void *mem_buf_retrieve_mem_type_data(enum mem_buf_mem_type mem_type,
if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
data = mem_buf_retrieve_dmaheap_mem_type_data(mem_type_data); data = mem_buf_retrieve_dmaheap_mem_type_data(mem_type_data);
else if (mem_type == MEM_BUF_BUDDY_MEM_TYPE)
data = NULL;
return data; return data;
} }
@ -814,11 +898,18 @@ static void mem_buf_free_mem_type_data(enum mem_buf_mem_type mem_type,
{ {
if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE)
mem_buf_free_dmaheap_mem_type_data(mem_type_data); mem_buf_free_dmaheap_mem_type_data(mem_type_data);
/* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
} }
static bool is_valid_mem_type(enum mem_buf_mem_type mem_type) static bool is_valid_mem_type(enum mem_buf_mem_type mem_type)
{ {
return mem_type == MEM_BUF_DMAHEAP_MEM_TYPE; return (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) ||
(mem_type == MEM_BUF_BUDDY_MEM_TYPE);
}
static bool is_valid_ioctl_mem_type(enum mem_buf_mem_type mem_type)
{
return (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE);
} }
void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data) void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data)
@ -1090,6 +1181,7 @@ static void mem_buf_free_alloc_data(struct mem_buf_allocation_data *alloc_data)
kfree(alloc_data->perms); kfree(alloc_data->perms);
} }
/* FIXME - remove is_valid_ioctl_mem_type. Its already handled */
int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args) int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args)
{ {
struct mem_buf_allocation_data alloc_data; struct mem_buf_allocation_data alloc_data;
@ -1098,8 +1190,8 @@ int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args)
if (!allocation_args->size || !allocation_args->nr_acl_entries || if (!allocation_args->size || !allocation_args->nr_acl_entries ||
!allocation_args->acl_list || !allocation_args->acl_list ||
(allocation_args->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS) || (allocation_args->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS) ||
!is_valid_mem_type(allocation_args->src_mem_type) || !is_valid_ioctl_mem_type(allocation_args->src_mem_type) ||
!is_valid_mem_type(allocation_args->dst_mem_type) || !is_valid_ioctl_mem_type(allocation_args->dst_mem_type) ||
allocation_args->reserved0 || allocation_args->reserved1 || allocation_args->reserved0 || allocation_args->reserved1 ||
allocation_args->reserved2) allocation_args->reserved2)
return -EINVAL; return -EINVAL;

View File

@ -82,6 +82,12 @@ struct mem_buf_vm *pdata_array[] = {
NULL, NULL,
}; };
int mem_buf_current_vmid(void)
{
return current_vmid;
}
EXPORT_SYMBOL(mem_buf_current_vmid);
/* /*
* Opening this file acquires a refcount on vm->dev's kobject - see * Opening this file acquires a refcount on vm->dev's kobject - see
* chrdev_open(). So private data won't be free'd out from * chrdev_open(). So private data won't be free'd out from

View File

@ -69,6 +69,7 @@ static size_t mem_buf_get_mem_type_alloc_req_size(enum mem_buf_mem_type type)
{ {
if (type == MEM_BUF_DMAHEAP_MEM_TYPE) if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
return MEM_BUF_MAX_DMAHEAP_NAME_LEN; return MEM_BUF_MAX_DMAHEAP_NAME_LEN;
/* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
return 0; return 0;
} }
@ -78,6 +79,7 @@ static void mem_buf_populate_alloc_req_arb_payload(void *dst, void *src,
{ {
if (type == MEM_BUF_DMAHEAP_MEM_TYPE) if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
strscpy(dst, src, MEM_BUF_MAX_DMAHEAP_NAME_LEN); strscpy(dst, src, MEM_BUF_MAX_DMAHEAP_NAME_LEN);
/* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
} }
/* /*

View File

@ -13,7 +13,10 @@
#include <linux/gunyah/gh_common.h> #include <linux/gunyah/gh_common.h>
#include <linux/mm.h> #include <linux/mm.h>
#define CREATE_TRACE_POINTS
#include "gh_rm_drv_private.h" #include "gh_rm_drv_private.h"
#include <trace/events/gunyah.h>
#define GH_RM_MEM_RELEASE_VALID_FLAGS GH_RM_MEM_RELEASE_CLEAR #define GH_RM_MEM_RELEASE_VALID_FLAGS GH_RM_MEM_RELEASE_CLEAR
#define GH_RM_MEM_RECLAIM_VALID_FLAGS GH_RM_MEM_RECLAIM_CLEAR #define GH_RM_MEM_RECLAIM_VALID_FLAGS GH_RM_MEM_RECLAIM_CLEAR
@ -1742,8 +1745,16 @@ static int gh_rm_mem_release_helper(u32 fn_id, gh_memparcel_handle_t handle,
*/ */
int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags) int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags)
{ {
return gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RELEASE, int ret;
handle, flags);
trace_gh_rm_mem_release(handle, flags);
ret = gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RELEASE,
handle, flags);
trace_gh_rm_mem_call_return(handle, ret);
return ret;
} }
EXPORT_SYMBOL(gh_rm_mem_release); EXPORT_SYMBOL(gh_rm_mem_release);
@ -1760,11 +1771,79 @@ EXPORT_SYMBOL(gh_rm_mem_release);
*/ */
int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags) int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
{ {
return gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RECLAIM, int ret;
handle, flags);
trace_gh_rm_mem_reclaim(handle, flags);
ret = gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RECLAIM,
handle, flags);
trace_gh_rm_mem_call_return(handle, ret);
return ret;
} }
EXPORT_SYMBOL(gh_rm_mem_reclaim); EXPORT_SYMBOL(gh_rm_mem_reclaim);
static struct gh_mem_accept_req_payload_hdr *
gh_rm_mem_accept_prepare_request(gh_memparcel_handle_t handle, u8 mem_type,
u8 trans_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc,
struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
u16 map_vmid, size_t *req_payload_size)
{
void *req_buf;
struct gh_mem_accept_req_payload_hdr *req_payload_hdr;
u16 req_sgl_entries = 0, req_mem_attr_entries = 0;
u32 req_acl_entries = 0;
u32 fn_id = GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT;
if ((mem_type != GH_RM_MEM_TYPE_NORMAL &&
mem_type != GH_RM_MEM_TYPE_IO) ||
(trans_type != GH_RM_TRANS_TYPE_DONATE &&
trans_type != GH_RM_TRANS_TYPE_LEND &&
trans_type != GH_RM_TRANS_TYPE_SHARE) ||
(flags & ~GH_RM_MEM_ACCEPT_VALID_FLAGS) ||
(sgl_desc && sgl_desc->n_sgl_entries > GH_RM_MEM_MAX_SGL_ENTRIES))
return ERR_PTR(-EINVAL);
if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS &&
(!acl_desc || !acl_desc->n_acl_entries) &&
(!mem_attr_desc || !mem_attr_desc->n_mem_attr_entries))
return ERR_PTR(-EINVAL);
if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS) {
if (acl_desc)
req_acl_entries = acl_desc->n_acl_entries;
if (mem_attr_desc)
req_mem_attr_entries =
mem_attr_desc->n_mem_attr_entries;
}
if (sgl_desc)
req_sgl_entries = sgl_desc->n_sgl_entries;
req_buf = gh_rm_alloc_mem_request_buf(fn_id, req_acl_entries,
req_sgl_entries,
req_mem_attr_entries,
req_payload_size);
if (IS_ERR(req_buf))
return req_buf;
req_payload_hdr = req_buf;
req_payload_hdr->memparcel_handle = handle;
req_payload_hdr->mem_type = mem_type;
req_payload_hdr->trans_type = trans_type;
req_payload_hdr->flags = flags;
if (flags & GH_RM_MEM_ACCEPT_VALIDATE_LABEL)
req_payload_hdr->validate_label = label;
gh_rm_populate_mem_request(req_buf, fn_id, acl_desc, sgl_desc, map_vmid,
mem_attr_desc);
return req_payload_hdr;
}
/** /**
* gh_rm_mem_accept: Accept a handle representing memory. This results in * gh_rm_mem_accept: Accept a handle representing memory. This results in
* the RM mapping the associated memory from the stage-2 * the RM mapping the associated memory from the stage-2
@ -1804,58 +1883,23 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type,
struct gh_mem_attr_desc *mem_attr_desc, struct gh_mem_attr_desc *mem_attr_desc,
u16 map_vmid) u16 map_vmid)
{ {
struct gh_mem_accept_req_payload_hdr *req_payload_hdr; struct gh_mem_accept_req_payload_hdr *req_payload;
struct gh_sgl_desc *ret_sgl; struct gh_sgl_desc *ret_sgl;
struct gh_mem_accept_resp_payload *resp_payload; struct gh_mem_accept_resp_payload *resp_payload;
void *req_buf;
size_t req_payload_size, resp_payload_size; size_t req_payload_size, resp_payload_size;
u16 req_sgl_entries = 0, req_mem_attr_entries = 0;
u32 req_acl_entries = 0;
int gh_ret; int gh_ret;
u32 fn_id = GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT; u32 fn_id = GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT;
if ((mem_type != GH_RM_MEM_TYPE_NORMAL && trace_gh_rm_mem_accept(mem_type, flags, label, acl_desc, sgl_desc,
mem_type != GH_RM_MEM_TYPE_IO) || mem_attr_desc, &handle, map_vmid, trans_type);
(trans_type != GH_RM_TRANS_TYPE_DONATE &&
trans_type != GH_RM_TRANS_TYPE_LEND &&
trans_type != GH_RM_TRANS_TYPE_SHARE) ||
(flags & ~GH_RM_MEM_ACCEPT_VALID_FLAGS))
return ERR_PTR(-EINVAL);
if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS && req_payload = gh_rm_mem_accept_prepare_request(handle, mem_type, trans_type, flags,
(!acl_desc || !acl_desc->n_acl_entries) && label, acl_desc, sgl_desc, mem_attr_desc,
(!mem_attr_desc || !mem_attr_desc->n_mem_attr_entries)) map_vmid, &req_payload_size);
return ERR_PTR(-EINVAL); if (IS_ERR(req_payload))
return ERR_CAST(req_payload);
if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS) { resp_payload = gh_rm_call(fn_id, req_payload, req_payload_size,
if (acl_desc)
req_acl_entries = acl_desc->n_acl_entries;
if (mem_attr_desc)
req_mem_attr_entries =
mem_attr_desc->n_mem_attr_entries;
}
if (sgl_desc)
req_sgl_entries = sgl_desc->n_sgl_entries;
req_buf = gh_rm_alloc_mem_request_buf(fn_id, req_acl_entries,
req_sgl_entries,
req_mem_attr_entries,
&req_payload_size);
if (IS_ERR(req_buf))
return req_buf;
req_payload_hdr = req_buf;
req_payload_hdr->memparcel_handle = handle;
req_payload_hdr->mem_type = mem_type;
req_payload_hdr->trans_type = trans_type;
req_payload_hdr->flags = flags;
if (flags & GH_RM_MEM_ACCEPT_VALIDATE_LABEL)
req_payload_hdr->validate_label = label;
gh_rm_populate_mem_request(req_buf, fn_id, acl_desc, sgl_desc, map_vmid,
mem_attr_desc);
resp_payload = gh_rm_call(fn_id, req_buf, req_payload_size,
&resp_payload_size, &gh_ret); &resp_payload_size, &gh_ret);
if (gh_ret || IS_ERR(resp_payload)) { if (gh_ret || IS_ERR(resp_payload)) {
ret_sgl = ERR_CAST(resp_payload); ret_sgl = ERR_CAST(resp_payload);
@ -1880,7 +1924,9 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type,
} }
err_rm_call: err_rm_call:
kfree(req_buf); kfree(req_payload);
trace_gh_rm_mem_accept_reply(ret_sgl);
return ret_sgl; return ret_sgl;
} }
EXPORT_SYMBOL(gh_rm_mem_accept); EXPORT_SYMBOL(gh_rm_mem_accept);
@ -2067,9 +2113,18 @@ int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
struct gh_mem_attr_desc *mem_attr_desc, struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle) gh_memparcel_handle_t *handle)
{ {
return gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_SHARE, int ret;
mem_type, flags, label, acl_desc,
sgl_desc, mem_attr_desc, handle); trace_gh_rm_mem_share(mem_type, flags, label, acl_desc, sgl_desc,
mem_attr_desc, handle, 0, SHARE);
ret = gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_SHARE,
mem_type, flags, label, acl_desc,
sgl_desc, mem_attr_desc, handle);
trace_gh_rm_mem_call_return(*handle, ret);
return ret;
} }
EXPORT_SYMBOL(gh_rm_mem_share); EXPORT_SYMBOL(gh_rm_mem_share);
@ -2098,9 +2153,18 @@ int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
struct gh_mem_attr_desc *mem_attr_desc, struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle) gh_memparcel_handle_t *handle)
{ {
return gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_LEND, int ret;
mem_type, flags, label, acl_desc,
sgl_desc, mem_attr_desc, handle); trace_gh_rm_mem_lend(mem_type, flags, label, acl_desc, sgl_desc,
mem_attr_desc, handle, 0, LEND);
ret = gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_LEND,
mem_type, flags, label, acl_desc,
sgl_desc, mem_attr_desc, handle);
trace_gh_rm_mem_call_return(*handle, ret);
return ret;
} }
EXPORT_SYMBOL(gh_rm_mem_lend); EXPORT_SYMBOL(gh_rm_mem_lend);
@ -2138,6 +2202,11 @@ int gh_rm_mem_donate(u8 mem_type, u8 flags, gh_label_t label,
struct gh_mem_attr_desc *mem_attr_desc, struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle) gh_memparcel_handle_t *handle)
{ {
int ret;
trace_gh_rm_mem_donate(mem_type, flags, label, acl_desc, sgl_desc,
mem_attr_desc, handle, 0, DONATE);
if (sgl_desc->n_sgl_entries != 1) { if (sgl_desc->n_sgl_entries != 1) {
pr_err("%s: Physically contiguous memory required\n", __func__); pr_err("%s: Physically contiguous memory required\n", __func__);
return -EINVAL; return -EINVAL;
@ -2153,9 +2222,13 @@ int gh_rm_mem_donate(u8 mem_type, u8 flags, gh_label_t label,
return -EINVAL; return -EINVAL;
} }
return gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_DONATE, ret = gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_DONATE,
mem_type, flags, label, acl_desc, mem_type, flags, label, acl_desc,
sgl_desc, mem_attr_desc, handle); sgl_desc, mem_attr_desc, handle);
trace_gh_rm_mem_call_return(*handle, ret);
return ret;
} }
EXPORT_SYMBOL(gh_rm_mem_donate); EXPORT_SYMBOL(gh_rm_mem_donate);
@ -2189,6 +2262,8 @@ int gh_rm_mem_notify(gh_memparcel_handle_t handle, u8 flags,
unsigned int i; unsigned int i;
int ret = 0, gh_ret; int ret = 0, gh_ret;
trace_gh_rm_mem_notify(handle, flags, mem_info_tag, vmid_desc);
if ((flags & ~GH_RM_MEM_NOTIFY_VALID_FLAGS) || if ((flags & ~GH_RM_MEM_NOTIFY_VALID_FLAGS) ||
((flags & GH_RM_MEM_NOTIFY_RECIPIENT_SHARED) && (!vmid_desc || ((flags & GH_RM_MEM_NOTIFY_RECIPIENT_SHARED) && (!vmid_desc ||
(vmid_desc && (vmid_desc &&

View File

@ -10,6 +10,9 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/mem-buf.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/xarray.h>
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/virtio_mem.h> #include <linux/virtio_mem.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
@ -267,6 +270,7 @@ struct virtio_mem {
/* For now, only allow one virtio-mem device */ /* For now, only allow one virtio-mem device */
static struct virtio_mem *virtio_mem_dev; static struct virtio_mem *virtio_mem_dev;
static DEFINE_XARRAY(xa_membuf);
/* /*
* We have to share a single online_page callback among all virtio-mem * We have to share a single online_page callback among all virtio-mem
@ -283,6 +287,8 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
static void virtio_mem_retry(struct virtio_mem *vm); static void virtio_mem_retry(struct virtio_mem *vm);
static int virtio_mem_create_resource(struct virtio_mem *vm); static int virtio_mem_create_resource(struct virtio_mem *vm);
static void virtio_mem_delete_resource(struct virtio_mem *vm); static void virtio_mem_delete_resource(struct virtio_mem *vm);
static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
uint64_t size);
/* /*
* Register a virtio-mem device so it will be considered for the online_page * Register a virtio-mem device so it will be considered for the online_page
@ -1325,23 +1331,110 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
generic_online_page(page, order); generic_online_page(page, order);
} }
/* Default error values to -ENOMEM - virtio_mem_run_wq expects certain rc only */
static int virtio_mem_convert_error_code(int rc)
{
if (rc == -ENOSPC || rc == -ETXTBSY || rc == -EBUSY || rc == -EAGAIN)
return rc;
return -ENOMEM;
}
/*
* mem-buf currently is handle based. This means we must break up requests into
* the common unit size(device_block_size). GH_RM_MEM_DONATE does not actually require
* tracking the handle, so this could be optimized further.
*
* This function must return one of ENOSPC, ETXTBSY, EBUSY, ENOMEM, EAGAIN
*/
static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
uint64_t size) uint64_t size)
{ {
void *membuf;
struct mem_buf_allocation_data alloc_data;
u32 vmids[1];
u32 perms[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
struct gh_sgl_desc *gh_sgl;
uint64_t orig_addr = addr;
int ret;
u64 block_size = vm->device_block_size;
dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr, dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1); addr + size - 1);
WARN_ON(1);
return -EINVAL; vmids[0] = mem_buf_current_vmid();
alloc_data.size = block_size;
alloc_data.nr_acl_entries = ARRAY_SIZE(vmids);
alloc_data.vmids = vmids;
alloc_data.perms = perms;
alloc_data.trans_type = GH_RM_TRANS_TYPE_DONATE;
gh_sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
if (!gh_sgl)
return -ENOMEM;
/* ipa_base/size configured below */
gh_sgl->n_sgl_entries = 1;
alloc_data.sgl_desc = gh_sgl;
alloc_data.src_mem_type = MEM_BUF_BUDDY_MEM_TYPE;
alloc_data.src_data = NULL;
alloc_data.dst_mem_type = MEM_BUF_BUDDY_MEM_TYPE;
alloc_data.dst_data = NULL;
while (size) {
gh_sgl->sgl_entries[0].ipa_base = addr;
gh_sgl->sgl_entries[0].size = block_size;
membuf = mem_buf_alloc(&alloc_data);
if (IS_ERR(membuf)) {
dev_err(&vm->vdev->dev, "mem_buf_alloc failed with %d\n", PTR_ERR(membuf));
ret = virtio_mem_convert_error_code(PTR_ERR(membuf));
goto err_mem_buf_alloc;
}
xa_store(&xa_membuf, addr, membuf, GFP_KERNEL);
vm->plugged_size += block_size;
size -= block_size;
addr += block_size;
}
kfree(gh_sgl);
return 0;
err_mem_buf_alloc:
if (addr > orig_addr)
virtio_mem_send_unplug_request(vm, orig_addr, addr - orig_addr);
kfree(gh_sgl);
return ret;
} }
static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
uint64_t size) uint64_t size)
{ {
void *membuf;
u64 block_size = vm->device_block_size;
uint64_t saved_size = size;
dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1); addr + size - 1);
WARN_ON(1);
return -EINVAL; while (size) {
membuf = xa_load(&xa_membuf, addr);
if (WARN(!membuf, "No membuf for %llx\n", addr))
return -EINVAL;
mem_buf_free(membuf);
size -= block_size;
addr += block_size;
}
/*
* Only update if all successful to be in-line with how errors
* are handled by this function's callers
*/
vm->plugged_size -= saved_size;
return 0;
} }
static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
@ -2784,7 +2877,7 @@ static int virtio_mem_remove(struct platform_device *vdev)
return 0; return 0;
} }
static void __maybe_unused virtio_mem_config_changed(struct platform_device *vdev) static void virtio_mem_config_changed(struct platform_device *vdev)
{ {
struct virtio_mem *vm = platform_get_drvdata(vdev); struct virtio_mem *vm = platform_get_drvdata(vdev);
@ -2819,9 +2912,16 @@ int virtio_mem_update_config_size(s64 size, bool sync)
virtio_mem_config_changed(vm->vdev); virtio_mem_config_changed(vm->vdev);
if (sync) if (sync) {
flush_work(&vm->wq); flush_work(&vm->wq);
if (vm->requested_size != vm->plugged_size) {
dev_err(&vm->vdev->dev, "Request failed: 0x%llx, plugged: 0x%llx\n",
vm->requested_size, vm->plugged_size);
return -ENOMEM;
}
}
return 0; return 0;
} }

View File

@ -14,6 +14,9 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <uapi/linux/mem-buf.h> #include <uapi/linux/mem-buf.h>
/* For in-kernel use only, not allowed for userspace ioctl */
#define MEM_BUF_BUDDY_MEM_TYPE (MEM_BUF_ION_MEM_TYPE + 2)
/* Used to obtain the underlying vmperm struct of a DMA-BUF */ /* Used to obtain the underlying vmperm struct of a DMA-BUF */
struct mem_buf_vmperm *to_mem_buf_vmperm(struct dma_buf *dmabuf); struct mem_buf_vmperm *to_mem_buf_vmperm(struct dma_buf *dmabuf);
@ -101,6 +104,7 @@ int mem_buf_reclaim(struct dma_buf *dmabuf);
void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data); void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data);
void mem_buf_free(void *membuf); void mem_buf_free(void *membuf);
struct gh_sgl_desc *mem_buf_get_sgl(void *membuf); struct gh_sgl_desc *mem_buf_get_sgl(void *membuf);
int mem_buf_current_vmid(void);
#else #else
static inline void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data) static inline void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data)
@ -114,6 +118,10 @@ static inline struct gh_sgl_desc *mem_buf_get_sgl(void *membuf)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static inline int mem_buf_current_vmid(void)
{
return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_QCOM_MEM_BUF */ #endif /* CONFIG_QCOM_MEM_BUF */

View File

@ -0,0 +1,462 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gunyah
#if !defined(_TRACE_GUNYAH_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GUNYAH_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include <soc/qcom/secure_buffer.h>
#ifndef __GUNYAH_HELPER_FUNCTIONS
#define __GUNYAH_HELPER_FUNCTIONS
#define MAX_ENTRIES_TO_PRINT 4
enum {
DONATE = 0,
LEND = 1,
SHARE = 2
};
static const char *__print_acl_arr(struct trace_seq *p, u8 *acl_perms, u16 *acl_vmids,
int count)
{
const char *ret;
int i = 0;
u8 *perms = acl_perms;
u16 *vmids = acl_vmids;
ret = trace_seq_buffer_ptr(p);
trace_seq_putc(p, '{');
for (i = 0; i < count; i++) {
trace_seq_printf(p, "(0x%x,", *vmids);
trace_seq_printf(p, "%s%s%s)",
((*perms & 0x4) ? "R" : ""),
((*perms & 0x2) ? "W" : ""),
((*perms & 0x1) ? "X" : "")
);
perms++;
vmids++;
if (i != count-1)
trace_seq_printf(p, ", ");
}
trace_seq_putc(p, '}');
trace_seq_putc(p, 0);
return ret;
}
#endif
DECLARE_EVENT_CLASS(gh_rm_mem_accept_donate_lend_share,
TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
TP_ARGS(mem_type, flags, label,
acl_desc, sgl_desc,
mem_attr_desc,
handle, map_vmid, trans_type),
TP_STRUCT__entry(
__field(u8, mem_type)
__field(u8, flags)
__field(gh_label_t, label)
/* gh_acl_desc */
__field(u32, n_acl_entries)
__dynamic_array(u16, acl_vmid_arr,
((acl_desc != NULL) ? acl_desc->n_acl_entries : 0))
__dynamic_array(u8, acl_perm_arr,
((acl_desc != NULL) ? acl_desc->n_acl_entries : 0))
/* gh_sgl_desc */
__field(u16, n_sgl_entries)
__dynamic_array(u64, sgl_ipa_base_arr,
((sgl_desc != NULL) ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
? MAX_ENTRIES_TO_PRINT
: sgl_desc->n_sgl_entries)
: 0))
__dynamic_array(u64, sgl_size_arr,
((sgl_desc != NULL) ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
? MAX_ENTRIES_TO_PRINT
: sgl_desc->n_sgl_entries)
: 0))
/* mem_attr_desc */
__field(u16, n_mem_attr_entries)
__dynamic_array(u16, mem_attr_attr_arr,
((mem_attr_desc != NULL)
? mem_attr_desc->n_mem_attr_entries : 0))
__dynamic_array(u16, mem_attr_vmid_arr,
((mem_attr_desc != NULL)
? mem_attr_desc->n_mem_attr_entries : 0))
__field(gh_memparcel_handle_t, handle)
__field(u16, map_vmid)
__field(u8, trans_type)
__field(int, sgl_entries_to_print)
),
TP_fast_assign(
unsigned int i;
/* gh_acl_desc */
u16 *acl_vmids_arr_ptr = __get_dynamic_array(acl_vmid_arr);
u8 *acl_perms_arr_ptr = __get_dynamic_array(acl_perm_arr);
/* gh_sgl_desc */
u64 *sgl_ipa_base_arr_ptr = __get_dynamic_array(sgl_ipa_base_arr);
u64 *sgl_size_arr_ptr = __get_dynamic_array(sgl_size_arr);
/* mem_attr_desc */
u16 *mem_attr_attr_arr_ptr = __get_dynamic_array(mem_attr_attr_arr);
u16 *mem_attr_vmid_arr_ptr = __get_dynamic_array(mem_attr_vmid_arr);
__entry->mem_type = mem_type;
__entry->flags = flags;
__entry->label = label;
/* gh_acl_desc */
if (acl_desc != NULL) {
__entry->n_acl_entries = acl_desc->n_acl_entries;
for (i = 0; i < __entry->n_acl_entries; i++) {
acl_vmids_arr_ptr[i] = acl_desc->acl_entries[i].vmid;
acl_perms_arr_ptr[i] = acl_desc->acl_entries[i].perms;
}
} else {
__entry->n_acl_entries = 0;
}
/* gh_sgl_desc */
if (sgl_desc != NULL) {
__entry->n_sgl_entries = sgl_desc->n_sgl_entries;
__entry->sgl_entries_to_print =
__entry->n_sgl_entries > MAX_ENTRIES_TO_PRINT
? MAX_ENTRIES_TO_PRINT
: __entry->n_sgl_entries;
for (i = 0; i < __entry->sgl_entries_to_print; i++) {
sgl_ipa_base_arr_ptr[i] = sgl_desc->sgl_entries[i].ipa_base;
sgl_size_arr_ptr[i] = sgl_desc->sgl_entries[i].size;
}
} else {
__entry->n_sgl_entries = 0;
__entry->sgl_entries_to_print = 0;
}
/* mem_attr_desc */
if (mem_attr_desc != NULL) {
__entry->n_mem_attr_entries = mem_attr_desc->n_mem_attr_entries;
for (i = 0; i < __entry->n_mem_attr_entries; i++) {
mem_attr_attr_arr_ptr[i] = mem_attr_desc->attr_entries[i].attr;
mem_attr_vmid_arr_ptr[i] = mem_attr_desc->attr_entries[i].vmid;
}
} else {
__entry->n_mem_attr_entries = 0;
}
__entry->handle = *handle;
__entry->map_vmid = map_vmid;
__entry->trans_type = trans_type;
),
TP_printk("mem_type = %s flags = 0x%x label = %u\t\t"
"acl_entries = %u acl_arr = %s\t\t"
"sgl_entries = %u sgl_ipa_base = %s sgl_size = %s\t\t"
"mem_attr_entries = %u mem_attr_attr = %s mem_attr_vmid = %s\t\t"
"handle = %u map_vmid = 0x%x trans_type = %s",
__print_symbolic(__entry->mem_type,
{ 0, "Normal Memory" },
{ 1, "IO Memory" }),
__entry->flags,
__entry->label,
__entry->n_acl_entries,
(__entry->n_acl_entries
? __print_acl_arr(p, __get_dynamic_array(acl_perm_arr),
__get_dynamic_array(acl_vmid_arr), __entry->n_acl_entries)
: "N/A"),
__entry->n_sgl_entries,
(__entry->n_sgl_entries
? __print_array(__get_dynamic_array(sgl_ipa_base_arr),
__entry->sgl_entries_to_print, sizeof(u64))
: "N/A"),
(__entry->n_sgl_entries
? __print_array(__get_dynamic_array(sgl_size_arr),
__entry->sgl_entries_to_print, sizeof(u64))
: "N/A"),
__entry->n_mem_attr_entries,
(__entry->n_mem_attr_entries
? __print_array(__get_dynamic_array(mem_attr_attr_arr),
__entry->n_mem_attr_entries, sizeof(u16))
: "N/A"),
(__entry->n_mem_attr_entries
? __print_array(__get_dynamic_array(mem_attr_vmid_arr),
__entry->n_mem_attr_entries, sizeof(u16))
: "N/A"),
__entry->handle, __entry->map_vmid,
__print_symbolic(__entry->trans_type,
{ 0, "Donate" },
{ 1, "Lend" },
{ 2, "Share" })
)
);
DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_accept,
TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
TP_ARGS(mem_type, flags, label,
acl_desc, sgl_desc,
mem_attr_desc,
handle, map_vmid, trans_type)
);
DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_donate,
TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
TP_ARGS(mem_type, flags, label,
acl_desc, sgl_desc,
mem_attr_desc,
handle, map_vmid, trans_type)
);
DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_lend,
TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
TP_ARGS(mem_type, flags, label,
acl_desc, sgl_desc,
mem_attr_desc,
handle, map_vmid, trans_type)
);
DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_share,
TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
struct gh_mem_attr_desc *mem_attr_desc,
gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
TP_ARGS(mem_type, flags, label,
acl_desc, sgl_desc,
mem_attr_desc,
handle, map_vmid, trans_type)
);
TRACE_EVENT(gh_rm_mem_accept_reply,
TP_PROTO(struct gh_sgl_desc *sgl_desc),
TP_ARGS(sgl_desc),
TP_STRUCT__entry(
__field(u16, n_sgl_entries)
__dynamic_array(u64, sgl_ipa_base_arr,
((sgl_desc != NULL)
? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
? MAX_ENTRIES_TO_PRINT
: sgl_desc->n_sgl_entries)
: 0))
__dynamic_array(u64, sgl_size_arr,
((sgl_desc != NULL)
? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
? MAX_ENTRIES_TO_PRINT
: sgl_desc->n_sgl_entries)
: 0))
__field(int, sgl_entries_to_print)
__field(bool, is_error)
),
TP_fast_assign(
unsigned int i;
u64 *sgl_ipa_base_arr_ptr = __get_dynamic_array(sgl_ipa_base_arr);
u64 *sgl_size_arr_ptr = __get_dynamic_array(sgl_size_arr);
__entry->is_error = IS_ERR(sgl_desc);
if (sgl_desc != NULL && __entry->is_error == false) {
__entry->n_sgl_entries = sgl_desc->n_sgl_entries;
__entry->sgl_entries_to_print =
__entry->n_sgl_entries > MAX_ENTRIES_TO_PRINT
? MAX_ENTRIES_TO_PRINT
: __entry->n_sgl_entries;
for (i = 0; i < __entry->sgl_entries_to_print; i++) {
sgl_ipa_base_arr_ptr[i] = sgl_desc->sgl_entries[i].ipa_base;
sgl_size_arr_ptr[i] = sgl_desc->sgl_entries[i].size;
}
} else {
__entry->n_sgl_entries = 0;
__entry->sgl_entries_to_print = 0;
}
),
TP_printk("sgl_entries = %u sgl_ipa_base = %s sgl_size = %s\t\t",
__entry->n_sgl_entries,
((__entry->n_sgl_entries && __entry->is_error == false)
? __print_array(__get_dynamic_array(sgl_ipa_base_arr),
__entry->sgl_entries_to_print, sizeof(u64))
: "N/A"),
((__entry->n_sgl_entries && __entry->is_error == false)
? __print_array(__get_dynamic_array(sgl_size_arr),
__entry->sgl_entries_to_print, sizeof(u64))
: "N/A")
)
);
DECLARE_EVENT_CLASS(gh_rm_mem_release_reclaim,
TP_PROTO(gh_memparcel_handle_t handle, u8 flags),
TP_ARGS(handle, flags),
TP_STRUCT__entry(
__field(gh_memparcel_handle_t, handle)
__field(u8, flags)
),
TP_fast_assign(
__entry->handle = handle;
__entry->flags = flags;
),
TP_printk("handle_s = %u flags = 0x%x",
__entry->handle,
__entry->flags
)
);
DEFINE_EVENT(gh_rm_mem_release_reclaim, gh_rm_mem_release,
TP_PROTO(gh_memparcel_handle_t handle, u8 flags),
TP_ARGS(handle, flags)
);
DEFINE_EVENT(gh_rm_mem_release_reclaim, gh_rm_mem_reclaim,
TP_PROTO(gh_memparcel_handle_t handle, u8 flags),
TP_ARGS(handle, flags)
);
TRACE_EVENT(gh_rm_mem_call_return,
TP_PROTO(gh_memparcel_handle_t handle, int return_val),
TP_ARGS(handle, return_val),
TP_STRUCT__entry(
__field(gh_memparcel_handle_t, handle)
__field(int, return_val)
),
TP_fast_assign(
__entry->handle = handle;
__entry->return_val = return_val;
),
TP_printk("handle = %u, return_value = %d", __entry->handle, __entry->return_val)
);
TRACE_EVENT(gh_rm_mem_notify,
TP_PROTO(gh_memparcel_handle_t handle, u8 flags, gh_label_t mem_info_tag,
struct gh_notify_vmid_desc *vmid_desc),
TP_ARGS(handle, flags, mem_info_tag, vmid_desc),
TP_STRUCT__entry(
__field(gh_memparcel_handle_t, handle)
__field(u8, flags)
__field(gh_label_t, mem_info_tag)
__field(u16, n_vmid_entries)
__dynamic_array(u16, entry_vmid_arr,
((vmid_desc != NULL) ? vmid_desc->n_vmid_entries : 0))
),
TP_fast_assign(
unsigned int i;
/* vmid_desc */
u16 *entry_vmid_arr_ptr = __get_dynamic_array(entry_vmid_arr);
__entry->handle = handle;
__entry->flags = flags;
__entry->mem_info_tag = mem_info_tag;
if (vmid_desc != NULL) {
__entry->n_vmid_entries = vmid_desc->n_vmid_entries;
for (i = 0; i < __entry->n_vmid_entries; i++)
entry_vmid_arr_ptr[i] = vmid_desc->vmid_entries[i].vmid;
} else {
__entry->n_vmid_entries = 0;
}
),
TP_printk("handle = %u flags = 0x%x mem_info_tag = %u\t\t"
"vmid_entries = %u entry_vmid_arr = %s",
__entry->handle,
__entry->flags,
__entry->mem_info_tag,
__entry->n_vmid_entries,
(__entry->n_vmid_entries
? __print_array(__get_dynamic_array(entry_vmid_arr),
__entry->n_vmid_entries, sizeof(u16))
: "N/A")
)
);
#endif /* _TRACE_GUNYAH_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/* /*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef _UAPI_LINUX_MEM_BUF_H #ifndef _UAPI_LINUX_MEM_BUF_H
@ -20,6 +21,7 @@ enum mem_buf_mem_type {
MEM_BUF_MAX_MEM_TYPE, MEM_BUF_MAX_MEM_TYPE,
}; };
#define MEM_BUF_DMAHEAP_MEM_TYPE (MEM_BUF_ION_MEM_TYPE + 1) #define MEM_BUF_DMAHEAP_MEM_TYPE (MEM_BUF_ION_MEM_TYPE + 1)
/* RESERVED for MEM_BUF_BUDDY_MEM_TYPE: MEM_BUF_ION_MEM_TYPE + 2 */
/* The mem-buf values that represent VMIDs for an ACL. */ /* The mem-buf values that represent VMIDs for an ACL. */
#define MEM_BUF_VMID_PRIMARY_VM 0 #define MEM_BUF_VMID_PRIMARY_VM 0