From a375851b6a05b07b3516f4b4f20a921bc8e21361 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Fri, 15 Apr 2022 20:09:58 -0700 Subject: [PATCH 1/7] mem-buf: Define MEM_BUF_MEM_TYPE_BUDDY Define a new remote alloc type which tries to allocate contiguous memory from cma. This memory will be transferred to a guest VM and added to the buddy allocator using memory hotplug. Change-Id: I04a4e734d24b6d8decac4e936d12d5e43b7f750b Signed-off-by: Patrick Daly --- drivers/soc/qcom/mem_buf/mem-buf-dev.c | 16 +++- drivers/soc/qcom/mem_buf/mem-buf-gh.c | 97 ++++++++++++++++++++++++- drivers/soc/qcom/mem_buf/mem-buf-msgq.c | 2 + include/linux/mem-buf.h | 3 + include/uapi/linux/mem-buf.h | 2 + 5 files changed, 116 insertions(+), 4 deletions(-) diff --git a/drivers/soc/qcom/mem_buf/mem-buf-dev.c b/drivers/soc/qcom/mem_buf/mem-buf-dev.c index 30cb429adca3..2f8a769e211f 100644 --- a/drivers/soc/qcom/mem_buf/mem-buf-dev.c +++ b/drivers/soc/qcom/mem_buf/mem-buf-dev.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -102,6 +103,7 @@ static int mem_buf_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; u64 dma_mask = IS_ENABLED(CONFIG_ARM64) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + int unused; if (of_property_match_string(dev->of_node, "qcom,mem-buf-capabilities", "supplier") >= 0) @@ -123,14 +125,26 @@ static int mem_buf_probe(struct platform_device *pdev) return ret; } + if (of_find_property(dev->of_node, "memory-region", &unused)) { + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0); + if (ret) { + dev_err(dev, "Failed to get memory-region property %d\n", ret); + return ret; + } + } + ret = mem_buf_vm_init(dev); if (ret) { dev_err(dev, "mem_buf_vm_init failed %d\n", ret); - return ret; + goto err_vm_init; } mem_buf_dev = dev; return 0; + +err_vm_init: + of_reserved_mem_device_release(dev); + return ret; } static int mem_buf_remove(struct platform_device *pdev) diff --git a/drivers/soc/qcom/mem_buf/mem-buf-gh.c b/drivers/soc/qcom/mem_buf/mem-buf-gh.c index 119a54ffcd96..545d18bdf737 100644 --- a/drivers/soc/qcom/mem_buf/mem-buf-gh.c +++ b/drivers/soc/qcom/mem_buf/mem-buf-gh.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include "../../../../drivers/dma-buf/heaps/qcom_sg_ops.h" #include "mem-buf-gh.h" @@ -202,12 +204,75 @@ static int mem_buf_rmt_alloc_dmaheap_mem(struct mem_buf_xfer_mem *xfer_mem) return 0; } +/* In future, try allocating from buddy if cma not available */ +static int mem_buf_rmt_alloc_buddy_mem(struct mem_buf_xfer_mem *xfer_mem) +{ + struct cma *cma; + struct sg_table *table; + struct page *page; + int ret; + u32 align; + size_t nr_pages; + + pr_debug("%s: Starting DMAHEAP-BUDDY allocation\n", __func__); + + /* + * For the common case of 4Mb transfer, we want it to be nicely aligned + * to allow for 2Mb block mappings in S2 pagetable. + */ + align = min(get_order(xfer_mem->size), get_order(SZ_2M)); + nr_pages = xfer_mem->size >> PAGE_SHIFT; + + /* + * Don't use dev_get_cma_area() as we don't want to fall back to + * dma_contiguous_default_area. + */ + cma = mem_buf_dev->cma_area; + if (!cma) + return -ENOMEM; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto err_alloc_table; + } + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto err_sg_init; + + page = cma_alloc(cma, nr_pages, align, false); + if (!page) { + ret = -ENOMEM; + goto err_cma_alloc; + } + + sg_set_page(table->sgl, page, nr_pages << PAGE_SHIFT, 0); + + /* Zero memory before transferring to Guest VM */ + memset(page_address(page), 0, nr_pages << PAGE_SHIFT); + + xfer_mem->mem_sgt = table; + xfer_mem->secure_alloc = false; + pr_debug("%s: DMAHEAP-BUDDY allocation complete\n", __func__); + return 0; + +err_cma_alloc: + sg_free_table(table); +err_sg_init: + kfree(table); +err_alloc_table: + return ret; +} + static int mem_buf_rmt_alloc_mem(struct mem_buf_xfer_mem *xfer_mem) { int ret = -EINVAL; if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) ret = mem_buf_rmt_alloc_dmaheap_mem(xfer_mem); + else if (xfer_mem->mem_type == MEM_BUF_BUDDY_MEM_TYPE) + ret = mem_buf_rmt_alloc_buddy_mem(xfer_mem); return ret; } @@ -233,10 +298,22 @@ static void mem_buf_rmt_free_dmaheap_mem(struct mem_buf_xfer_mem *xfer_mem) pr_debug("%s: DMAHEAP memory freed\n", __func__); } +static void mem_buf_rmt_free_buddy_mem(struct mem_buf_xfer_mem *xfer_mem) +{ + struct sg_table *table = xfer_mem->mem_sgt; + + pr_debug("%s: Freeing DMAHEAP-BUDDY memory\n", __func__); + cma_release(dev_get_cma_area(mem_buf_dev), sg_page(table->sgl), + table->sgl->length >> PAGE_SHIFT); + pr_debug("%s: DMAHEAP-BUDDY memory freed\n", __func__); +} + static void mem_buf_rmt_free_mem(struct mem_buf_xfer_mem *xfer_mem) { if (xfer_mem->mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) mem_buf_rmt_free_dmaheap_mem(xfer_mem); + else if (xfer_mem->mem_type == MEM_BUF_BUDDY_MEM_TYPE) + mem_buf_rmt_free_buddy_mem(xfer_mem); } static @@ -263,6 +340,8 @@ static void *mem_buf_alloc_xfer_mem_type_data(enum mem_buf_mem_type type, if (type == MEM_BUF_DMAHEAP_MEM_TYPE) data = mem_buf_alloc_dmaheap_xfer_mem_type_data(rmt_data); + else if (type == MEM_BUF_BUDDY_MEM_TYPE) + data = NULL; return data; } @@ -278,6 +357,7 @@ static void mem_buf_free_xfer_mem_type_data(enum mem_buf_mem_type type, { if (type == MEM_BUF_DMAHEAP_MEM_TYPE) mem_buf_free_dmaheap_xfer_mem_type_data(data); + /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */ } static @@ -784,6 +864,8 @@ static void *mem_buf_retrieve_mem_type_data_user(enum mem_buf_mem_type mem_type, if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) data = mem_buf_retrieve_dmaheap_mem_type_data_user(mem_type_data); + else if (mem_type == MEM_BUF_BUDDY_MEM_TYPE) + data = NULL; return data; } @@ -800,6 +882,8 @@ static void *mem_buf_retrieve_mem_type_data(enum mem_buf_mem_type mem_type, if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) data = mem_buf_retrieve_dmaheap_mem_type_data(mem_type_data); + else if (mem_type == MEM_BUF_BUDDY_MEM_TYPE) + data = NULL; return data; } @@ -814,11 +898,18 @@ static void mem_buf_free_mem_type_data(enum mem_buf_mem_type mem_type, { if (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) mem_buf_free_dmaheap_mem_type_data(mem_type_data); + /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */ } static bool is_valid_mem_type(enum mem_buf_mem_type mem_type) { - return mem_type == MEM_BUF_DMAHEAP_MEM_TYPE; + return (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE) || + (mem_type == MEM_BUF_BUDDY_MEM_TYPE); +} + +static bool is_valid_ioctl_mem_type(enum mem_buf_mem_type mem_type) +{ + return (mem_type == MEM_BUF_DMAHEAP_MEM_TYPE); } void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data) @@ -1098,8 +1189,8 @@ int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args) if (!allocation_args->size || !allocation_args->nr_acl_entries || !allocation_args->acl_list || (allocation_args->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS) || - !is_valid_mem_type(allocation_args->src_mem_type) || - !is_valid_mem_type(allocation_args->dst_mem_type) || + !is_valid_ioctl_mem_type(allocation_args->src_mem_type) || + !is_valid_ioctl_mem_type(allocation_args->dst_mem_type) || allocation_args->reserved0 || allocation_args->reserved1 || allocation_args->reserved2) return -EINVAL; diff --git a/drivers/soc/qcom/mem_buf/mem-buf-msgq.c b/drivers/soc/qcom/mem_buf/mem-buf-msgq.c index 3473be63465c..953c40d12add 100644 --- a/drivers/soc/qcom/mem_buf/mem-buf-msgq.c +++ b/drivers/soc/qcom/mem_buf/mem-buf-msgq.c @@ -69,6 +69,7 @@ static size_t mem_buf_get_mem_type_alloc_req_size(enum mem_buf_mem_type type) { if (type == MEM_BUF_DMAHEAP_MEM_TYPE) return MEM_BUF_MAX_DMAHEAP_NAME_LEN; + /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */ return 0; } @@ -78,6 +79,7 @@ static void mem_buf_populate_alloc_req_arb_payload(void *dst, void *src, { if (type == MEM_BUF_DMAHEAP_MEM_TYPE) strscpy(dst, src, MEM_BUF_MAX_DMAHEAP_NAME_LEN); + /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */ } /* diff --git a/include/linux/mem-buf.h b/include/linux/mem-buf.h index ba62641ad8a4..8c08679abd52 100644 --- a/include/linux/mem-buf.h +++ b/include/linux/mem-buf.h @@ -14,6 +14,9 @@ #include #include +/* For in-kernel use only, not allowed for userspace ioctl */ +#define MEM_BUF_BUDDY_MEM_TYPE (MEM_BUF_ION_MEM_TYPE + 2) + /* Used to obtain the underlying vmperm struct of a DMA-BUF */ struct mem_buf_vmperm *to_mem_buf_vmperm(struct dma_buf *dmabuf); diff --git a/include/uapi/linux/mem-buf.h b/include/uapi/linux/mem-buf.h index 3bfd4505d757..b3f0ae5a195e 100644 --- a/include/uapi/linux/mem-buf.h +++ b/include/uapi/linux/mem-buf.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _UAPI_LINUX_MEM_BUF_H @@ -20,6 +21,7 @@ enum mem_buf_mem_type { MEM_BUF_MAX_MEM_TYPE, }; #define MEM_BUF_DMAHEAP_MEM_TYPE (MEM_BUF_ION_MEM_TYPE + 1) +/* RESERVED for MEM_BUF_BUDDY_MEM_TYPE: MEM_BUF_ION_MEM_TYPE + 2 */ /* The mem-buf values that represent VMIDs for an ACL. */ #define MEM_BUF_VMID_PRIMARY_VM 0 From 12058013a7d15ec5a19648e0d50a2517614713ff Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Wed, 8 Jun 2022 19:41:33 -0700 Subject: [PATCH 2/7] mem-buf: Add accessor for current_vmid Allow other modules to access this variable. Change-Id: I4eb92e6ea7bed0fb854a7aafe355f33a09120389 Signed-off-by: Patrick Daly --- drivers/soc/qcom/mem_buf/mem-buf-ids.c | 6 ++++++ include/linux/mem-buf.h | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/drivers/soc/qcom/mem_buf/mem-buf-ids.c b/drivers/soc/qcom/mem_buf/mem-buf-ids.c index a81f7f7bf347..846c7fb038ff 100644 --- a/drivers/soc/qcom/mem_buf/mem-buf-ids.c +++ b/drivers/soc/qcom/mem_buf/mem-buf-ids.c @@ -82,6 +82,12 @@ struct mem_buf_vm *pdata_array[] = { NULL, }; +int mem_buf_current_vmid(void) +{ + return current_vmid; +} +EXPORT_SYMBOL(mem_buf_current_vmid); + /* * Opening this file acquires a refcount on vm->dev's kobject - see * chrdev_open(). So private data won't be free'd out from diff --git a/include/linux/mem-buf.h b/include/linux/mem-buf.h index 8c08679abd52..43823d1304ae 100644 --- a/include/linux/mem-buf.h +++ b/include/linux/mem-buf.h @@ -104,6 +104,7 @@ int mem_buf_reclaim(struct dma_buf *dmabuf); void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data); void mem_buf_free(void *membuf); struct gh_sgl_desc *mem_buf_get_sgl(void *membuf); +int mem_buf_current_vmid(void); #else static inline void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data) @@ -117,6 +118,10 @@ static inline struct gh_sgl_desc *mem_buf_get_sgl(void *membuf) { return ERR_PTR(-EINVAL); } +static inline int mem_buf_current_vmid(void) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_QCOM_MEM_BUF */ From 1b828c9bb82c2666cf68d2da984b9e4b5b8df22f Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Fri, 24 Jun 2022 16:21:47 -0700 Subject: [PATCH 3/7] virtio-mem: Add mem-buf interfaces Add IPC communication via mem-buf-mesgq driver for requesting memory from the Host VM. Change-Id: I9b8429e71803fdf9e2f82145d850a685a2b3549e Signed-off-by: Patrick Daly --- drivers/soc/qcom/mem_buf/mem-buf-gh.c | 1 + drivers/virtio/virtio_mem.c | 105 ++++++++++++++++++++++++-- 2 files changed, 100 insertions(+), 6 deletions(-) diff --git a/drivers/soc/qcom/mem_buf/mem-buf-gh.c b/drivers/soc/qcom/mem_buf/mem-buf-gh.c index 545d18bdf737..1542b27415e7 100644 --- a/drivers/soc/qcom/mem_buf/mem-buf-gh.c +++ b/drivers/soc/qcom/mem_buf/mem-buf-gh.c @@ -1181,6 +1181,7 @@ static void mem_buf_free_alloc_data(struct mem_buf_allocation_data *alloc_data) kfree(alloc_data->perms); } +/* FIXME - remove is_valid_ioctl_mem_type. Its already handled */ int mem_buf_alloc_fd(struct mem_buf_alloc_ioctl_arg *allocation_args) { struct mem_buf_allocation_data alloc_data; diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 01a8f20fb0ce..b7765456e591 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -10,6 +10,9 @@ #include #include +#include +#include +#include #include #include #include @@ -267,6 +270,7 @@ struct virtio_mem { /* For now, only allow one virtio-mem device */ static struct virtio_mem *virtio_mem_dev; +static DEFINE_XARRAY(xa_membuf); /* * We have to share a single online_page callback among all virtio-mem @@ -283,6 +287,8 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, static void virtio_mem_retry(struct virtio_mem *vm); static int virtio_mem_create_resource(struct virtio_mem *vm); static void virtio_mem_delete_resource(struct virtio_mem *vm); +static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, + uint64_t size); /* * Register a virtio-mem device so it will be considered for the online_page @@ -1325,23 +1331,103 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order) generic_online_page(page, order); } +/* Default error values to -ENOMEM - virtio_mem_run_wq expects certain rc only */ +static int virtio_mem_convert_error_code(int rc) +{ + if (rc == -ENOSPC || rc == -ETXTBSY || rc == -EBUSY || rc == -EAGAIN) + return rc; + return -ENOMEM; +} + +/* + * mem-buf currently is handle based. This means we must break up requests into + * the common unit size(device_block_size). GH_RM_MEM_DONATE does not actually require + * tracking the handle, so this could be optimized further. + * + * This function must return one of ENOSPC, ETXTBSY, EBUSY, ENOMEM, EAGAIN + */ static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, uint64_t size) { + void *membuf; + struct mem_buf_allocation_data alloc_data; + u32 vmids[1]; + u32 perms[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + struct gh_sgl_desc *gh_sgl; + uint64_t orig_addr = addr; + int ret; + u64 block_size = vm->in_sbm ? vm->sbm.sb_size : vm->bbm.bb_size; dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr, addr + size - 1); - WARN_ON(1); - return -EINVAL; + + vmids[0] = mem_buf_current_vmid(); + + alloc_data.size = block_size; + alloc_data.nr_acl_entries = ARRAY_SIZE(vmids); + alloc_data.vmids = vmids; + alloc_data.perms = perms; + alloc_data.trans_type = GH_RM_TRANS_TYPE_DONATE; + gh_sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL); + if (!gh_sgl) + return -ENOMEM; + /* ipa_base/size configured below */ + gh_sgl->n_sgl_entries = 1; + + alloc_data.sgl_desc = gh_sgl; + alloc_data.src_mem_type = MEM_BUF_BUDDY_MEM_TYPE; + alloc_data.src_data = NULL; + alloc_data.dst_mem_type = MEM_BUF_BUDDY_MEM_TYPE; + alloc_data.dst_data = NULL; + + while (size) { + gh_sgl->sgl_entries[0].ipa_base = addr; + gh_sgl->sgl_entries[0].size = block_size; + + membuf = mem_buf_alloc(&alloc_data); + if (IS_ERR(membuf)) { + dev_err(&vm->vdev->dev, "mem_buf_alloc failed with %d\n", PTR_ERR(membuf)); + ret = virtio_mem_convert_error_code(PTR_ERR(membuf)); + goto err_mem_buf_alloc; + } + + xa_store(&xa_membuf, addr, membuf, GFP_KERNEL); + vm->plugged_size += block_size; + + size -= block_size; + addr += block_size; + } + + kfree(gh_sgl); + return 0; + +err_mem_buf_alloc: + if (addr > orig_addr) + virtio_mem_send_unplug_request(vm, orig_addr, addr - orig_addr); + kfree(gh_sgl); + return ret; } static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, uint64_t size) { + void *membuf; + u64 block_size = vm->in_sbm ? vm->sbm.sb_size : vm->bbm.bb_size; + dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, addr + size - 1); - WARN_ON(1); - return -EINVAL; + + while (size) { + membuf = xa_load(&xa_membuf, addr); + if (!WARN(membuf, "No membuf for %llx\n", addr)) { + mem_buf_free(membuf); + vm->plugged_size -= block_size; + + size -= block_size; + addr += block_size; + } + } + return 0; } static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) @@ -2784,7 +2870,7 @@ static int virtio_mem_remove(struct platform_device *vdev) return 0; } -static void __maybe_unused virtio_mem_config_changed(struct platform_device *vdev) +static void virtio_mem_config_changed(struct platform_device *vdev) { struct virtio_mem *vm = platform_get_drvdata(vdev); @@ -2819,9 +2905,16 @@ int virtio_mem_update_config_size(s64 size, bool sync) virtio_mem_config_changed(vm->vdev); - if (sync) + if (sync) { flush_work(&vm->wq); + if (vm->requested_size != vm->plugged_size) { + dev_err(&vm->vdev->dev, "Request failed: 0x%llx, plugged: 0x%llx\n", + vm->requested_size, vm->plugged_size); + return -ENOMEM; + } + } + return 0; } From 912f04fb721ef1f8a1ba93b2666d9ecd9fda74fb Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Thu, 7 Jul 2022 13:04:33 -0700 Subject: [PATCH 4/7] virtio-mem: Fix missing logical not operation Log an error if membuf is not found in xarray, rather than if it is found. Do not reduce vm->plugged_size in case of error on unplug operations, in order to keep the value consistent with vm->sbm.sb_states bitmap or its bbm equivalent. Change-Id: Ieaf37f80ab6c7d0f9d416aba3daa9cde8f7c2a53 Signed-off-by: Patrick Daly --- drivers/virtio/virtio_mem.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index b7765456e591..3807b569e801 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1413,20 +1413,27 @@ static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, { void *membuf; u64 block_size = vm->in_sbm ? vm->sbm.sb_size : vm->bbm.bb_size; + uint64_t saved_size = size; dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, addr + size - 1); while (size) { membuf = xa_load(&xa_membuf, addr); - if (!WARN(membuf, "No membuf for %llx\n", addr)) { - mem_buf_free(membuf); - vm->plugged_size -= block_size; + if (WARN(!membuf, "No membuf for %llx\n", addr)) + return -EINVAL; - size -= block_size; - addr += block_size; - } + mem_buf_free(membuf); + + size -= block_size; + addr += block_size; } + + /* + * Only update if all successful to be in-line with how errors + * are handled by this function's callers + */ + vm->plugged_size -= saved_size; return 0; } From d688e5ca962796bd6938be4c5fc5d9cf2003e11b Mon Sep 17 00:00:00 2001 From: Sanish Sanjay Kharade Date: Fri, 10 Jun 2022 14:38:52 -0700 Subject: [PATCH 5/7] gunyah: msm-kernel: Add ftrace debugging to gunyah communication Add debug prints to the communication between the android and the guest VM. They can be enabled dynamically at run time using ftrace. Example of a trace message: gh_rm_mem_share: mem_type = Normal Memory flags = 0x0 label = 3 acl_entries = 2 acl_arr = {(0x03,RW), (0x2d,RW)} sgl_entries = 1 sgl_ipa_base = {0xf80ef000} sgl_size = {0x9000} mem_attr_entries = 0 mem_attr_attr = N/A mem_attr_vmid = N/A handle = 0 map_vmid = 0 trans_type = Share. Change-Id: I5732ea586ae7bd488131f4d174be8bf181f40b0a Signed-off-by: Sanish Sanjay Kharade --- drivers/virt/gunyah/gh_rm_iface.c | 80 +++++- include/trace/events/gunyah.h | 462 ++++++++++++++++++++++++++++++ 2 files changed, 529 insertions(+), 13 deletions(-) create mode 100644 include/trace/events/gunyah.h diff --git a/drivers/virt/gunyah/gh_rm_iface.c b/drivers/virt/gunyah/gh_rm_iface.c index c62772d8c8b5..93940f03c62f 100644 --- a/drivers/virt/gunyah/gh_rm_iface.c +++ b/drivers/virt/gunyah/gh_rm_iface.c @@ -13,7 +13,10 @@ #include #include +#define CREATE_TRACE_POINTS + #include "gh_rm_drv_private.h" +#include #define GH_RM_MEM_RELEASE_VALID_FLAGS GH_RM_MEM_RELEASE_CLEAR #define GH_RM_MEM_RECLAIM_VALID_FLAGS GH_RM_MEM_RECLAIM_CLEAR @@ -1742,8 +1745,16 @@ static int gh_rm_mem_release_helper(u32 fn_id, gh_memparcel_handle_t handle, */ int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags) { - return gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RELEASE, - handle, flags); + int ret; + + trace_gh_rm_mem_release(handle, flags); + + ret = gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RELEASE, + handle, flags); + + trace_gh_rm_mem_call_return(handle, ret); + + return ret; } EXPORT_SYMBOL(gh_rm_mem_release); @@ -1760,8 +1771,16 @@ EXPORT_SYMBOL(gh_rm_mem_release); */ int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags) { - return gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RECLAIM, - handle, flags); + int ret; + + trace_gh_rm_mem_reclaim(handle, flags); + + ret = gh_rm_mem_release_helper(GH_RM_RPC_MSG_ID_CALL_MEM_RECLAIM, + handle, flags); + + trace_gh_rm_mem_call_return(handle, ret); + + return ret; } EXPORT_SYMBOL(gh_rm_mem_reclaim); @@ -1814,6 +1833,9 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type, int gh_ret; u32 fn_id = GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT; + trace_gh_rm_mem_accept(mem_type, flags, label, acl_desc, sgl_desc, + mem_attr_desc, &handle, map_vmid, trans_type); + if ((mem_type != GH_RM_MEM_TYPE_NORMAL && mem_type != GH_RM_MEM_TYPE_IO) || (trans_type != GH_RM_TRANS_TYPE_DONATE && @@ -1881,6 +1903,9 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type, err_rm_call: kfree(req_buf); + + trace_gh_rm_mem_accept_reply(ret_sgl); + return ret_sgl; } EXPORT_SYMBOL(gh_rm_mem_accept); @@ -2067,9 +2092,18 @@ int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label, struct gh_mem_attr_desc *mem_attr_desc, gh_memparcel_handle_t *handle) { - return gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_SHARE, - mem_type, flags, label, acl_desc, - sgl_desc, mem_attr_desc, handle); + int ret; + + trace_gh_rm_mem_share(mem_type, flags, label, acl_desc, sgl_desc, + mem_attr_desc, handle, 0, SHARE); + + ret = gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_SHARE, + mem_type, flags, label, acl_desc, + sgl_desc, mem_attr_desc, handle); + + trace_gh_rm_mem_call_return(*handle, ret); + + return ret; } EXPORT_SYMBOL(gh_rm_mem_share); @@ -2098,9 +2132,18 @@ int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label, struct gh_mem_attr_desc *mem_attr_desc, gh_memparcel_handle_t *handle) { - return gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_LEND, - mem_type, flags, label, acl_desc, - sgl_desc, mem_attr_desc, handle); + int ret; + + trace_gh_rm_mem_lend(mem_type, flags, label, acl_desc, sgl_desc, + mem_attr_desc, handle, 0, LEND); + + ret = gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_LEND, + mem_type, flags, label, acl_desc, + sgl_desc, mem_attr_desc, handle); + + trace_gh_rm_mem_call_return(*handle, ret); + + return ret; } EXPORT_SYMBOL(gh_rm_mem_lend); @@ -2138,6 +2181,11 @@ int gh_rm_mem_donate(u8 mem_type, u8 flags, gh_label_t label, struct gh_mem_attr_desc *mem_attr_desc, gh_memparcel_handle_t *handle) { + int ret; + + trace_gh_rm_mem_donate(mem_type, flags, label, acl_desc, sgl_desc, + mem_attr_desc, handle, 0, DONATE); + if (sgl_desc->n_sgl_entries != 1) { pr_err("%s: Physically contiguous memory required\n", __func__); return -EINVAL; @@ -2153,9 +2201,13 @@ int gh_rm_mem_donate(u8 mem_type, u8 flags, gh_label_t label, return -EINVAL; } - return gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_DONATE, - mem_type, flags, label, acl_desc, - sgl_desc, mem_attr_desc, handle); + ret = gh_rm_mem_share_lend_helper(GH_RM_RPC_MSG_ID_CALL_MEM_DONATE, + mem_type, flags, label, acl_desc, + sgl_desc, mem_attr_desc, handle); + + trace_gh_rm_mem_call_return(*handle, ret); + + return ret; } EXPORT_SYMBOL(gh_rm_mem_donate); @@ -2189,6 +2241,8 @@ int gh_rm_mem_notify(gh_memparcel_handle_t handle, u8 flags, unsigned int i; int ret = 0, gh_ret; + trace_gh_rm_mem_notify(handle, flags, mem_info_tag, vmid_desc); + if ((flags & ~GH_RM_MEM_NOTIFY_VALID_FLAGS) || ((flags & GH_RM_MEM_NOTIFY_RECIPIENT_SHARED) && (!vmid_desc || (vmid_desc && diff --git a/include/trace/events/gunyah.h b/include/trace/events/gunyah.h new file mode 100644 index 000000000000..87baba71202d --- /dev/null +++ b/include/trace/events/gunyah.h @@ -0,0 +1,462 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gunyah + +#if !defined(_TRACE_GUNYAH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_GUNYAH_H + +#include +#include +#include +#include + +#ifndef __GUNYAH_HELPER_FUNCTIONS +#define __GUNYAH_HELPER_FUNCTIONS + +#define MAX_ENTRIES_TO_PRINT 4 + +enum { + DONATE = 0, + LEND = 1, + SHARE = 2 +}; + +static const char *__print_acl_arr(struct trace_seq *p, u8 *acl_perms, u16 *acl_vmids, + int count) +{ + const char *ret; + int i = 0; + + u8 *perms = acl_perms; + u16 *vmids = acl_vmids; + + ret = trace_seq_buffer_ptr(p); + + trace_seq_putc(p, '{'); + + for (i = 0; i < count; i++) { + + trace_seq_printf(p, "(0x%x,", *vmids); + trace_seq_printf(p, "%s%s%s)", + ((*perms & 0x4) ? "R" : ""), + ((*perms & 0x2) ? "W" : ""), + ((*perms & 0x1) ? "X" : "") + ); + + perms++; + vmids++; + + if (i != count-1) + trace_seq_printf(p, ", "); + } + + trace_seq_putc(p, '}'); + trace_seq_putc(p, 0); + + return ret; +} +#endif + +DECLARE_EVENT_CLASS(gh_rm_mem_accept_donate_lend_share, + + TP_PROTO(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type), + + TP_ARGS(mem_type, flags, label, + acl_desc, sgl_desc, + mem_attr_desc, + handle, map_vmid, trans_type), + + TP_STRUCT__entry( + __field(u8, mem_type) + __field(u8, flags) + __field(gh_label_t, label) + + /* gh_acl_desc */ + __field(u32, n_acl_entries) + + __dynamic_array(u16, acl_vmid_arr, + ((acl_desc != NULL) ? acl_desc->n_acl_entries : 0)) + __dynamic_array(u8, acl_perm_arr, + ((acl_desc != NULL) ? acl_desc->n_acl_entries : 0)) + + /* gh_sgl_desc */ + __field(u16, n_sgl_entries) + __dynamic_array(u64, sgl_ipa_base_arr, + ((sgl_desc != NULL) ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT + ? MAX_ENTRIES_TO_PRINT + : sgl_desc->n_sgl_entries) + : 0)) + __dynamic_array(u64, sgl_size_arr, + ((sgl_desc != NULL) ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT + ? MAX_ENTRIES_TO_PRINT + : sgl_desc->n_sgl_entries) + : 0)) + + /* mem_attr_desc */ + __field(u16, n_mem_attr_entries) + __dynamic_array(u16, mem_attr_attr_arr, + ((mem_attr_desc != NULL) + ? mem_attr_desc->n_mem_attr_entries : 0)) + __dynamic_array(u16, mem_attr_vmid_arr, + ((mem_attr_desc != NULL) + ? mem_attr_desc->n_mem_attr_entries : 0)) + + __field(gh_memparcel_handle_t, handle) + __field(u16, map_vmid) + __field(u8, trans_type) + + __field(int, sgl_entries_to_print) + ), + + TP_fast_assign( + + unsigned int i; + + /* gh_acl_desc */ + u16 *acl_vmids_arr_ptr = __get_dynamic_array(acl_vmid_arr); + u8 *acl_perms_arr_ptr = __get_dynamic_array(acl_perm_arr); + + /* gh_sgl_desc */ + u64 *sgl_ipa_base_arr_ptr = __get_dynamic_array(sgl_ipa_base_arr); + u64 *sgl_size_arr_ptr = __get_dynamic_array(sgl_size_arr); + + /* mem_attr_desc */ + u16 *mem_attr_attr_arr_ptr = __get_dynamic_array(mem_attr_attr_arr); + u16 *mem_attr_vmid_arr_ptr = __get_dynamic_array(mem_attr_vmid_arr); + + __entry->mem_type = mem_type; + __entry->flags = flags; + __entry->label = label; + + /* gh_acl_desc */ + if (acl_desc != NULL) { + __entry->n_acl_entries = acl_desc->n_acl_entries; + + for (i = 0; i < __entry->n_acl_entries; i++) { + acl_vmids_arr_ptr[i] = acl_desc->acl_entries[i].vmid; + acl_perms_arr_ptr[i] = acl_desc->acl_entries[i].perms; + } + } else { + __entry->n_acl_entries = 0; + } + + /* gh_sgl_desc */ + if (sgl_desc != NULL) { + __entry->n_sgl_entries = sgl_desc->n_sgl_entries; + + __entry->sgl_entries_to_print = + __entry->n_sgl_entries > MAX_ENTRIES_TO_PRINT + ? MAX_ENTRIES_TO_PRINT + : __entry->n_sgl_entries; + + for (i = 0; i < __entry->sgl_entries_to_print; i++) { + sgl_ipa_base_arr_ptr[i] = sgl_desc->sgl_entries[i].ipa_base; + sgl_size_arr_ptr[i] = sgl_desc->sgl_entries[i].size; + } + + } else { + __entry->n_sgl_entries = 0; + __entry->sgl_entries_to_print = 0; + } + + /* mem_attr_desc */ + if (mem_attr_desc != NULL) { + __entry->n_mem_attr_entries = mem_attr_desc->n_mem_attr_entries; + + for (i = 0; i < __entry->n_mem_attr_entries; i++) { + mem_attr_attr_arr_ptr[i] = mem_attr_desc->attr_entries[i].attr; + mem_attr_vmid_arr_ptr[i] = mem_attr_desc->attr_entries[i].vmid; + } + } else { + __entry->n_mem_attr_entries = 0; + } + + __entry->handle = *handle; + + __entry->map_vmid = map_vmid; + __entry->trans_type = trans_type; + + ), + + TP_printk("mem_type = %s flags = 0x%x label = %u\t\t" + "acl_entries = %u acl_arr = %s\t\t" + "sgl_entries = %u sgl_ipa_base = %s sgl_size = %s\t\t" + "mem_attr_entries = %u mem_attr_attr = %s mem_attr_vmid = %s\t\t" + "handle = %u map_vmid = 0x%x trans_type = %s", + __print_symbolic(__entry->mem_type, + { 0, "Normal Memory" }, + { 1, "IO Memory" }), + __entry->flags, + __entry->label, + __entry->n_acl_entries, + (__entry->n_acl_entries + ? __print_acl_arr(p, __get_dynamic_array(acl_perm_arr), + __get_dynamic_array(acl_vmid_arr), __entry->n_acl_entries) + : "N/A"), + __entry->n_sgl_entries, + (__entry->n_sgl_entries + ? __print_array(__get_dynamic_array(sgl_ipa_base_arr), + __entry->sgl_entries_to_print, sizeof(u64)) + : "N/A"), + (__entry->n_sgl_entries + ? __print_array(__get_dynamic_array(sgl_size_arr), + __entry->sgl_entries_to_print, sizeof(u64)) + : "N/A"), + __entry->n_mem_attr_entries, + (__entry->n_mem_attr_entries + ? __print_array(__get_dynamic_array(mem_attr_attr_arr), + __entry->n_mem_attr_entries, sizeof(u16)) + : "N/A"), + (__entry->n_mem_attr_entries + ? __print_array(__get_dynamic_array(mem_attr_vmid_arr), + __entry->n_mem_attr_entries, sizeof(u16)) + : "N/A"), + __entry->handle, __entry->map_vmid, + __print_symbolic(__entry->trans_type, + { 0, "Donate" }, + { 1, "Lend" }, + { 2, "Share" }) + ) +); + +DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_accept, + + TP_PROTO(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type), + + TP_ARGS(mem_type, flags, label, + acl_desc, sgl_desc, + mem_attr_desc, + handle, map_vmid, trans_type) +); + +DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_donate, + + TP_PROTO(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type), + + TP_ARGS(mem_type, flags, label, + acl_desc, sgl_desc, + mem_attr_desc, + handle, map_vmid, trans_type) +); + +DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_lend, + + TP_PROTO(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type), + + TP_ARGS(mem_type, flags, label, + acl_desc, sgl_desc, + mem_attr_desc, + handle, map_vmid, trans_type) +); + +DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_share, + + TP_PROTO(u8 mem_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type), + + TP_ARGS(mem_type, flags, label, + acl_desc, sgl_desc, + mem_attr_desc, + handle, map_vmid, trans_type) +); + +TRACE_EVENT(gh_rm_mem_accept_reply, + + TP_PROTO(struct gh_sgl_desc *sgl_desc), + + TP_ARGS(sgl_desc), + + TP_STRUCT__entry( + + __field(u16, n_sgl_entries) + + __dynamic_array(u64, sgl_ipa_base_arr, + ((sgl_desc != NULL) + ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT + ? MAX_ENTRIES_TO_PRINT + : sgl_desc->n_sgl_entries) + : 0)) + __dynamic_array(u64, sgl_size_arr, + ((sgl_desc != NULL) + ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT + ? MAX_ENTRIES_TO_PRINT + : sgl_desc->n_sgl_entries) + : 0)) + __field(int, sgl_entries_to_print) + __field(bool, is_error) + ), + + TP_fast_assign( + + unsigned int i; + + u64 *sgl_ipa_base_arr_ptr = __get_dynamic_array(sgl_ipa_base_arr); + u64 *sgl_size_arr_ptr = __get_dynamic_array(sgl_size_arr); + + __entry->is_error = IS_ERR(sgl_desc); + + if (sgl_desc != NULL && __entry->is_error == false) { + __entry->n_sgl_entries = sgl_desc->n_sgl_entries; + + __entry->sgl_entries_to_print = + __entry->n_sgl_entries > MAX_ENTRIES_TO_PRINT + ? MAX_ENTRIES_TO_PRINT + : __entry->n_sgl_entries; + + for (i = 0; i < __entry->sgl_entries_to_print; i++) { + sgl_ipa_base_arr_ptr[i] = sgl_desc->sgl_entries[i].ipa_base; + sgl_size_arr_ptr[i] = sgl_desc->sgl_entries[i].size; + } + + } else { + __entry->n_sgl_entries = 0; + __entry->sgl_entries_to_print = 0; + } + + ), + + TP_printk("sgl_entries = %u sgl_ipa_base = %s sgl_size = %s\t\t", + __entry->n_sgl_entries, + ((__entry->n_sgl_entries && __entry->is_error == false) + ? __print_array(__get_dynamic_array(sgl_ipa_base_arr), + __entry->sgl_entries_to_print, sizeof(u64)) + : "N/A"), + ((__entry->n_sgl_entries && __entry->is_error == false) + ? __print_array(__get_dynamic_array(sgl_size_arr), + __entry->sgl_entries_to_print, sizeof(u64)) + : "N/A") + ) +); + +DECLARE_EVENT_CLASS(gh_rm_mem_release_reclaim, + + TP_PROTO(gh_memparcel_handle_t handle, u8 flags), + + TP_ARGS(handle, flags), + + TP_STRUCT__entry( + __field(gh_memparcel_handle_t, handle) + __field(u8, flags) + ), + + TP_fast_assign( + __entry->handle = handle; + __entry->flags = flags; + ), + + TP_printk("handle_s = %u flags = 0x%x", + __entry->handle, + __entry->flags + ) +); + +DEFINE_EVENT(gh_rm_mem_release_reclaim, gh_rm_mem_release, + + TP_PROTO(gh_memparcel_handle_t handle, u8 flags), + + TP_ARGS(handle, flags) +); + + +DEFINE_EVENT(gh_rm_mem_release_reclaim, gh_rm_mem_reclaim, + + TP_PROTO(gh_memparcel_handle_t handle, u8 flags), + + TP_ARGS(handle, flags) +); + +TRACE_EVENT(gh_rm_mem_call_return, + + TP_PROTO(gh_memparcel_handle_t handle, int return_val), + + TP_ARGS(handle, return_val), + + TP_STRUCT__entry( + __field(gh_memparcel_handle_t, handle) + __field(int, return_val) + ), + + TP_fast_assign( + __entry->handle = handle; + __entry->return_val = return_val; + + ), + + TP_printk("handle = %u, return_value = %d", __entry->handle, __entry->return_val) +); + +TRACE_EVENT(gh_rm_mem_notify, + + TP_PROTO(gh_memparcel_handle_t handle, u8 flags, gh_label_t mem_info_tag, + struct gh_notify_vmid_desc *vmid_desc), + + TP_ARGS(handle, flags, mem_info_tag, vmid_desc), + + TP_STRUCT__entry( + __field(gh_memparcel_handle_t, handle) + __field(u8, flags) + __field(gh_label_t, mem_info_tag) + + __field(u16, n_vmid_entries) + __dynamic_array(u16, entry_vmid_arr, + ((vmid_desc != NULL) ? vmid_desc->n_vmid_entries : 0)) + ), + + TP_fast_assign( + + unsigned int i; + + /* vmid_desc */ + u16 *entry_vmid_arr_ptr = __get_dynamic_array(entry_vmid_arr); + + __entry->handle = handle; + __entry->flags = flags; + __entry->mem_info_tag = mem_info_tag; + + if (vmid_desc != NULL) { + __entry->n_vmid_entries = vmid_desc->n_vmid_entries; + + for (i = 0; i < __entry->n_vmid_entries; i++) + entry_vmid_arr_ptr[i] = vmid_desc->vmid_entries[i].vmid; + + } else { + __entry->n_vmid_entries = 0; + } + + ), + + TP_printk("handle = %u flags = 0x%x mem_info_tag = %u\t\t" + "vmid_entries = %u entry_vmid_arr = %s", + __entry->handle, + __entry->flags, + __entry->mem_info_tag, + __entry->n_vmid_entries, + (__entry->n_vmid_entries + ? __print_array(__get_dynamic_array(entry_vmid_arr), + __entry->n_vmid_entries, sizeof(u16)) + : "N/A") + ) +); + + +#endif /* _TRACE_GUNYAH_H */ + +/* This part must be outside protection */ +#include From 592f2703ecc0443e2aacf0f037fc90301668a120 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Tue, 5 Jul 2022 17:49:39 -0700 Subject: [PATCH 6/7] virtio-mem: Use device_block_size in plug/unplug requests Switch back to using device_block_size for plug/unplug requests since this is the intended purpose of this configuration option. Fixes: a0d27d0bdb39 ("virtio-mem: Add mem-buf interfaces") Change-Id: I9068856337b9ce90c76ca9949f8275023e9e7c17 Signed-off-by: Patrick Daly --- drivers/virtio/virtio_mem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 3807b569e801..4eb90f177979 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1356,7 +1356,7 @@ static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, struct gh_sgl_desc *gh_sgl; uint64_t orig_addr = addr; int ret; - u64 block_size = vm->in_sbm ? vm->sbm.sb_size : vm->bbm.bb_size; + u64 block_size = vm->device_block_size; dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr, addr + size - 1); @@ -1412,7 +1412,7 @@ static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, uint64_t size) { void *membuf; - u64 block_size = vm->in_sbm ? vm->sbm.sb_size : vm->bbm.bb_size; + u64 block_size = vm->device_block_size; uint64_t saved_size = size; dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr, From 266b3dd7c2dc78a430ec5844833e242f2cc2c847 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Fri, 15 Jul 2022 14:52:26 -0700 Subject: [PATCH 7/7] gunyah: Split gh_rm_mem_accept Reduce function complexity per static analysis tools. Change-Id: I9bd6f42e566f4c1932730b53bcbd5dee7f3e600c Signed-off-by: Patrick Daly --- drivers/virt/gunyah/gh_rm_iface.c | 115 ++++++++++++++++++------------ 1 file changed, 68 insertions(+), 47 deletions(-) diff --git a/drivers/virt/gunyah/gh_rm_iface.c b/drivers/virt/gunyah/gh_rm_iface.c index 93940f03c62f..b31512d33bfe 100644 --- a/drivers/virt/gunyah/gh_rm_iface.c +++ b/drivers/virt/gunyah/gh_rm_iface.c @@ -1784,6 +1784,66 @@ int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags) } EXPORT_SYMBOL(gh_rm_mem_reclaim); + +static struct gh_mem_accept_req_payload_hdr * +gh_rm_mem_accept_prepare_request(gh_memparcel_handle_t handle, u8 mem_type, + u8 trans_type, u8 flags, gh_label_t label, + struct gh_acl_desc *acl_desc, + struct gh_sgl_desc *sgl_desc, + struct gh_mem_attr_desc *mem_attr_desc, + u16 map_vmid, size_t *req_payload_size) +{ + void *req_buf; + struct gh_mem_accept_req_payload_hdr *req_payload_hdr; + u16 req_sgl_entries = 0, req_mem_attr_entries = 0; + u32 req_acl_entries = 0; + u32 fn_id = GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT; + + if ((mem_type != GH_RM_MEM_TYPE_NORMAL && + mem_type != GH_RM_MEM_TYPE_IO) || + (trans_type != GH_RM_TRANS_TYPE_DONATE && + trans_type != GH_RM_TRANS_TYPE_LEND && + trans_type != GH_RM_TRANS_TYPE_SHARE) || + (flags & ~GH_RM_MEM_ACCEPT_VALID_FLAGS) || + (sgl_desc && sgl_desc->n_sgl_entries > GH_RM_MEM_MAX_SGL_ENTRIES)) + return ERR_PTR(-EINVAL); + + if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS && + (!acl_desc || !acl_desc->n_acl_entries) && + (!mem_attr_desc || !mem_attr_desc->n_mem_attr_entries)) + return ERR_PTR(-EINVAL); + + if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS) { + if (acl_desc) + req_acl_entries = acl_desc->n_acl_entries; + if (mem_attr_desc) + req_mem_attr_entries = + mem_attr_desc->n_mem_attr_entries; + } + + if (sgl_desc) + req_sgl_entries = sgl_desc->n_sgl_entries; + + req_buf = gh_rm_alloc_mem_request_buf(fn_id, req_acl_entries, + req_sgl_entries, + req_mem_attr_entries, + req_payload_size); + if (IS_ERR(req_buf)) + return req_buf; + + req_payload_hdr = req_buf; + req_payload_hdr->memparcel_handle = handle; + req_payload_hdr->mem_type = mem_type; + req_payload_hdr->trans_type = trans_type; + req_payload_hdr->flags = flags; + if (flags & GH_RM_MEM_ACCEPT_VALIDATE_LABEL) + req_payload_hdr->validate_label = label; + gh_rm_populate_mem_request(req_buf, fn_id, acl_desc, sgl_desc, map_vmid, + mem_attr_desc); + + return req_payload_hdr; +} + /** * gh_rm_mem_accept: Accept a handle representing memory. This results in * the RM mapping the associated memory from the stage-2 @@ -1823,61 +1883,23 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type, struct gh_mem_attr_desc *mem_attr_desc, u16 map_vmid) { - struct gh_mem_accept_req_payload_hdr *req_payload_hdr; + struct gh_mem_accept_req_payload_hdr *req_payload; struct gh_sgl_desc *ret_sgl; struct gh_mem_accept_resp_payload *resp_payload; - void *req_buf; size_t req_payload_size, resp_payload_size; - u16 req_sgl_entries = 0, req_mem_attr_entries = 0; - u32 req_acl_entries = 0; int gh_ret; u32 fn_id = GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT; trace_gh_rm_mem_accept(mem_type, flags, label, acl_desc, sgl_desc, mem_attr_desc, &handle, map_vmid, trans_type); - if ((mem_type != GH_RM_MEM_TYPE_NORMAL && - mem_type != GH_RM_MEM_TYPE_IO) || - (trans_type != GH_RM_TRANS_TYPE_DONATE && - trans_type != GH_RM_TRANS_TYPE_LEND && - trans_type != GH_RM_TRANS_TYPE_SHARE) || - (flags & ~GH_RM_MEM_ACCEPT_VALID_FLAGS)) - return ERR_PTR(-EINVAL); + req_payload = gh_rm_mem_accept_prepare_request(handle, mem_type, trans_type, flags, + label, acl_desc, sgl_desc, mem_attr_desc, + map_vmid, &req_payload_size); + if (IS_ERR(req_payload)) + return ERR_CAST(req_payload); - if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS && - (!acl_desc || !acl_desc->n_acl_entries) && - (!mem_attr_desc || !mem_attr_desc->n_mem_attr_entries)) - return ERR_PTR(-EINVAL); - - if (flags & GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS) { - if (acl_desc) - req_acl_entries = acl_desc->n_acl_entries; - if (mem_attr_desc) - req_mem_attr_entries = - mem_attr_desc->n_mem_attr_entries; - } - - if (sgl_desc) - req_sgl_entries = sgl_desc->n_sgl_entries; - - req_buf = gh_rm_alloc_mem_request_buf(fn_id, req_acl_entries, - req_sgl_entries, - req_mem_attr_entries, - &req_payload_size); - if (IS_ERR(req_buf)) - return req_buf; - - req_payload_hdr = req_buf; - req_payload_hdr->memparcel_handle = handle; - req_payload_hdr->mem_type = mem_type; - req_payload_hdr->trans_type = trans_type; - req_payload_hdr->flags = flags; - if (flags & GH_RM_MEM_ACCEPT_VALIDATE_LABEL) - req_payload_hdr->validate_label = label; - gh_rm_populate_mem_request(req_buf, fn_id, acl_desc, sgl_desc, map_vmid, - mem_attr_desc); - - resp_payload = gh_rm_call(fn_id, req_buf, req_payload_size, + resp_payload = gh_rm_call(fn_id, req_payload, req_payload_size, &resp_payload_size, &gh_ret); if (gh_ret || IS_ERR(resp_payload)) { ret_sgl = ERR_CAST(resp_payload); @@ -1902,10 +1924,9 @@ struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type, } err_rm_call: - kfree(req_buf); + kfree(req_payload); trace_gh_rm_mem_accept_reply(ret_sgl); - return ret_sgl; } EXPORT_SYMBOL(gh_rm_mem_accept);