mem-buf: Split into multiple modules

"mem_buf_retrieve() [mem_buf.ko]" exports a dma_buf, and wants to use
"qcom_sg_ops [qcom_dma_heaps.ko]". But "qcom_sg_map_dma_buf() [qcom_sg_op]"
calls "mem_buf_vmperm_can_cmo [mem_buf.ko]".

Resolve this by splitting mem-buf into two separate modules.
1) mem-buf.ko
Contains  the ioctl interface, msgq handling, and mem_buf_retrieve().
2) mem-buf-dev.ko
Contains the hypervisor interface, as well as all vmperm functionality.

Change-Id: Ifc41f12e62858d865b880c7b40676607016d36a0
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2021-01-29 18:54:55 -08:00
parent 94922f6ea1
commit 7acf8182d6
10 changed files with 429 additions and 355 deletions

View File

@ -3,6 +3,7 @@
config QCOM_MEM_BUF
tristate "Qualcomm Technologies, Inc. Memory Buffer Sharing Driver"
depends on HH_MSGQ && HH_RM_DRV
select QCOM_MEM_BUF_DEV
help
Add support for lending memory from one virtual machine to another.
This driver communicates with the hypervisor, as well as other
@ -10,4 +11,5 @@ config QCOM_MEM_BUF
respectively.
If unsure, say N
config QCOM_MEM_BUF_DEV
tristate

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_QCOM_MEM_BUF) += mem_buf.o
mem_buf-y += mem-buf.o mem_buf_dma_buf.o mem-buf-ids.o
mem_buf-y += mem-buf.o
obj-$(CONFIG_QCOM_MEM_BUF_DEV) += mem_buf_dev.o
mem_buf_dev-y += mem-buf-dev.o mem_buf_dma_buf.o mem-buf-ids.o

View File

@ -0,0 +1,279 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/memory_hotplug.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/haven/hh_rm_drv.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/mem-buf.h>
#include "mem-buf-dev.h"
#define CREATE_TRACE_POINTS
#include "trace-mem-buf.h"
EXPORT_TRACEPOINT_SYMBOL(send_alloc_req);
EXPORT_TRACEPOINT_SYMBOL(receive_alloc_req);
EXPORT_TRACEPOINT_SYMBOL(send_relinquish_msg);
EXPORT_TRACEPOINT_SYMBOL(receive_relinquish_msg);
EXPORT_TRACEPOINT_SYMBOL(send_alloc_resp_msg);
EXPORT_TRACEPOINT_SYMBOL(receive_alloc_resp_msg);
EXPORT_TRACEPOINT_SYMBOL(mem_buf_alloc_info);
struct device *mem_buf_dev;
EXPORT_SYMBOL(mem_buf_dev);
unsigned char mem_buf_capability;
EXPORT_SYMBOL(mem_buf_capability);
int mem_buf_assign_mem(struct sg_table *sgt, int *dst_vmids,
int *dst_perms, unsigned int nr_acl_entries)
{
u32 src_vmid = VMID_HLOS;
int ret;
if (!sgt || !dst_vmids || !dst_perms || !nr_acl_entries)
return -EINVAL;
pr_debug("%s: Assigning memory to target VMIDs\n", __func__);
ret = hyp_assign_table(sgt, &src_vmid, 1, dst_vmids, dst_perms,
nr_acl_entries);
if (ret < 0)
pr_err("%s: failed to assign memory for rmt allocation rc:%d\n",
__func__, ret);
else
pr_debug("%s: Memory assigned to target VMIDs\n", __func__);
return ret;
}
EXPORT_SYMBOL(mem_buf_assign_mem);
int mem_buf_unassign_mem(struct sg_table *sgt, int *src_vmids,
unsigned int nr_acl_entries)
{
int dst_vmid = VMID_HLOS;
int dst_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
int ret;
if (!sgt || !src_vmids || !nr_acl_entries)
return -EINVAL;
pr_debug("%s: Unassigning memory to HLOS\n", __func__);
ret = hyp_assign_table(sgt, src_vmids, nr_acl_entries, &dst_vmid,
&dst_perms, 1);
if (ret < 0)
pr_err("%s: failed to assign memory from rmt allocation rc: %d\n",
__func__, ret);
else
pr_debug("%s: Unassigned memory to HLOS\n", __func__);
return ret;
}
EXPORT_SYMBOL(mem_buf_unassign_mem);
int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
int *dst_vmids, int *dst_perms,
u32 nr_acl_entries,
hh_memparcel_handle_t *memparcel_hdl)
{
struct hh_sgl_desc *sgl_desc;
struct hh_acl_desc *acl_desc;
unsigned int i, nr_sg_entries;
struct scatterlist *sg;
int ret;
size_t sgl_desc_size, acl_desc_size;
if (!sgt || !dst_vmids || !dst_perms || !nr_acl_entries ||
!memparcel_hdl)
return -EINVAL;
nr_sg_entries = sgt->nents;
sgl_desc_size = offsetof(struct hh_sgl_desc,
sgl_entries[nr_sg_entries]);
sgl_desc = kzalloc(sgl_desc_size, GFP_KERNEL);
if (!sgl_desc)
return -ENOMEM;
acl_desc_size = offsetof(struct hh_acl_desc,
acl_entries[nr_acl_entries]);
acl_desc = kzalloc(acl_desc_size, GFP_KERNEL);
if (!acl_desc) {
kfree(sgl_desc);
return -ENOMEM;
}
sgl_desc->n_sgl_entries = nr_sg_entries;
for_each_sg(sgt->sgl, sg, nr_sg_entries, i) {
sgl_desc->sgl_entries[i].ipa_base = page_to_phys(sg_page(sg));
sgl_desc->sgl_entries[i].size = sg->length;
}
acl_desc->n_acl_entries = nr_acl_entries;
for (i = 0; i < nr_acl_entries; i++) {
acl_desc->acl_entries[i].vmid = dst_vmids[i];
acl_desc->acl_entries[i].perms = dst_perms[i];
}
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL, 0, acl_desc,
sgl_desc, NULL, memparcel_hdl);
trace_lookup_sgl(sgl_desc, ret, *memparcel_hdl);
if (ret < 0)
pr_err("%s: hh_rm_mem_qcom_lookup_sgl failure rc: %d\n",
__func__, ret);
kfree(acl_desc);
kfree(sgl_desc);
return ret;
}
EXPORT_SYMBOL(mem_buf_retrieve_memparcel_hdl);
static int mem_buf_get_mem_xfer_type(struct hh_acl_desc *acl_desc)
{
u32 i, nr_acl_entries = acl_desc->n_acl_entries;
for (i = 0; i < nr_acl_entries; i++)
if (acl_desc->acl_entries[i].vmid == VMID_HLOS &&
acl_desc->acl_entries[i].perms != 0)
return HH_RM_TRANS_TYPE_SHARE;
return HH_RM_TRANS_TYPE_LEND;
}
/*
* FIXME: hh_rm_mem_accept uses kmemdup, which isn't right for large buffers.
*/
struct hh_sgl_desc *mem_buf_map_mem_s2(hh_memparcel_handle_t memparcel_hdl,
struct hh_acl_desc *acl_desc)
{
struct hh_sgl_desc *sgl_desc;
if (!acl_desc)
return ERR_PTR(-EINVAL);
pr_debug("%s: adding CPU MMU stage 2 mappings\n", __func__);
sgl_desc = hh_rm_mem_accept(memparcel_hdl, HH_RM_MEM_TYPE_NORMAL,
mem_buf_get_mem_xfer_type(acl_desc),
HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
HH_RM_MEM_ACCEPT_DONE, 0, acl_desc, NULL,
NULL, 0);
if (IS_ERR(sgl_desc)) {
pr_err("%s failed to map memory in stage 2 rc: %d\n", __func__,
PTR_ERR(sgl_desc));
return sgl_desc;
}
trace_map_mem_s2(memparcel_hdl, sgl_desc);
return sgl_desc;
}
EXPORT_SYMBOL(mem_buf_map_mem_s2);
int mem_buf_unmap_mem_s2(hh_memparcel_handle_t memparcel_hdl)
{
int ret;
pr_debug("%s: removing CPU MMU stage 2 mappings\n", __func__);
ret = hh_rm_mem_release(memparcel_hdl, 0);
if (ret < 0)
pr_err("%s: Failed to release memparcel hdl: 0x%lx rc: %d\n",
__func__, memparcel_hdl, ret);
else
pr_debug("%s: CPU MMU stage 2 mappings removed\n", __func__);
return ret;
}
EXPORT_SYMBOL(mem_buf_unmap_mem_s2);
int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc)
{
return -EINVAL;
}
EXPORT_SYMBOL(mem_buf_map_mem_s1);
int mem_buf_unmap_mem_s1(struct hh_sgl_desc *sgl_desc)
{
return -EINVAL;
}
EXPORT_SYMBOL(mem_buf_unmap_mem_s1);
static int mem_buf_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
u64 dma_mask = IS_ENABLED(CONFIG_ARM64) ? DMA_BIT_MASK(64) :
DMA_BIT_MASK(32);
if (of_property_match_string(dev->of_node, "qcom,mem-buf-capabilities",
"supplier") >= 0) {
mem_buf_capability = MEM_BUF_CAP_SUPPLIER;
} else if (of_property_match_string(dev->of_node,
"qcom,mem-buf-capabilities",
"consumer") >= 0) {
mem_buf_capability = MEM_BUF_CAP_CONSUMER;
} else if (of_property_match_string(dev->of_node,
"qcom,mem-buf-capabilities",
"dual") >= 0) {
mem_buf_capability = MEM_BUF_CAP_DUAL;
} else {
dev_err(dev, "Transfer direction property not present or not valid\n");
return -EINVAL;
}
ret = dma_set_mask_and_coherent(dev, dma_mask);
if (ret) {
dev_err(dev, "Unable to set dma mask: %d\n", ret);
return ret;
}
ret = mem_buf_vm_init(dev);
if (ret) {
dev_err(dev, "mem_buf_vm_init failed %d\n", ret);
return ret;
}
mem_buf_dev = dev;
return 0;
}
static int mem_buf_remove(struct platform_device *pdev)
{
mem_buf_dev = NULL;
return 0;
}
static const struct of_device_id mem_buf_match_tbl[] = {
{.compatible = "qcom,mem-buf"},
{},
};
static struct platform_driver mem_buf_driver = {
.probe = mem_buf_probe,
.remove = mem_buf_remove,
.driver = {
.name = "mem-buf",
.of_match_table = of_match_ptr(mem_buf_match_tbl),
},
};
static int __init mem_buf_dev_init(void)
{
return platform_driver_register(&mem_buf_driver);
}
module_init(mem_buf_dev_init);
static void __exit mem_buf_dev_exit(void)
{
mem_buf_vm_exit();
platform_driver_unregister(&mem_buf_driver);
}
module_exit(mem_buf_dev_exit);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Memory Buffer Sharing driver");
MODULE_LICENSE("GPL v2");

View File

@ -20,6 +20,7 @@
extern unsigned char mem_buf_capability;
extern struct device *mem_buf_dev;
/* Hypervisor Interface */
int mem_buf_assign_mem(struct sg_table *sgt, int *dst_vmids,
int *dst_perms, unsigned int nr_acl_entries);
int mem_buf_unassign_mem(struct sg_table *sgt, int *src_vmids,
@ -30,15 +31,11 @@ int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
hh_memparcel_handle_t *memparcel_hdl);
struct hh_sgl_desc *mem_buf_map_mem_s2(hh_memparcel_handle_t memparcel_hdl,
struct hh_acl_desc *acl_desc);
int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc);
int mem_buf_unmap_mem_s2(hh_memparcel_handle_t memparcel_hdl);
/* Memory Hotplug */
int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc);
int mem_buf_unmap_mem_s1(struct hh_sgl_desc *sgl_desc);
size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc);
struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc);
struct hh_sgl_desc *dup_sgt_to_hh_sgl_desc(struct sg_table *sgt);
struct hh_acl_desc *mem_buf_vmid_perm_list_to_hh_acl(int *vmids, int *perms,
unsigned int nr_acl_entries);
/*
* Deltas from original qcom_sg_buffer:
@ -99,5 +96,6 @@ int mem_buf_fd_to_vmid(int fd);
int mem_buf_lend_internal(struct dma_buf *dmabuf,
struct mem_buf_lend_kernel_arg *arg,
bool is_lend);
void mem_buf_retrieve_release(struct qcom_sg_buffer *buffer);
#endif

View File

@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/xarray.h>
#include <soc/qcom/secure_buffer.h>
#include "mem-buf-private.h"
#include "mem-buf-dev.h"
#define DEVNAME "mem_buf_vm"
#define NUM_MEM_BUF_VM_MINORS 128
@ -184,6 +184,7 @@ int mem_buf_fd_to_vmid(int fd)
fput(file);
return ret ? ret : vmid;
}
EXPORT_SYMBOL(mem_buf_fd_to_vmid);
static void mem_buf_vm_device_release(struct device *dev)
{

View File

@ -23,13 +23,12 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/mem-buf-exporter.h>
#include <soc/qcom/secure_buffer.h>
#include <uapi/linux/mem-buf.h>
#include "mem-buf-private.h"
#define CREATE_TRACE_POINTS
#include "mem-buf-dev.h"
#include "trace-mem-buf.h"
#define MEM_BUF_MAX_DEVS 1
@ -54,9 +53,10 @@ static DEFINE_MUTEX(mem_buf_idr_mutex);
static DEFINE_IDR(mem_buf_txn_idr);
static struct task_struct *mem_buf_msgq_recv_thr;
static void *mem_buf_hh_msgq_hdl;
unsigned char mem_buf_capability;
static struct workqueue_struct *mem_buf_wq;
static size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc);
static struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc);
/**
* struct mem_buf_txn: Represents a transaction (request/response pair) in the
* membuf driver.
@ -73,9 +73,6 @@ struct mem_buf_txn {
void *resp_buf;
};
/* Data structures for maintaining memory shared to other VMs */
struct device *mem_buf_dev;
/* Maintains a list of memory buffers lent out to other VMs */
static DEFINE_MUTEX(mem_buf_xfer_mem_list_lock);
static LIST_HEAD(mem_buf_xfer_mem_list);
@ -349,7 +346,7 @@ static int mem_buf_hh_acl_desc_to_vmid_perm_list(struct hh_acl_desc *acl_desc,
return 0;
}
struct hh_acl_desc *mem_buf_vmid_perm_list_to_hh_acl(int *vmids, int *perms,
static struct hh_acl_desc *mem_buf_vmid_perm_list_to_hh_acl(int *vmids, int *perms,
unsigned int nr_acl_entries)
{
struct hh_acl_desc *hh_acl;
@ -370,105 +367,6 @@ struct hh_acl_desc *mem_buf_vmid_perm_list_to_hh_acl(int *vmids, int *perms,
return hh_acl;
}
int mem_buf_assign_mem(struct sg_table *sgt, int *dst_vmids,
int *dst_perms, unsigned int nr_acl_entries)
{
u32 src_vmid = VMID_HLOS;
int ret;
if (!sgt || !dst_vmids || !dst_perms || !nr_acl_entries)
return -EINVAL;
pr_debug("%s: Assigning memory to target VMIDs\n", __func__);
ret = hyp_assign_table(sgt, &src_vmid, 1, dst_vmids, dst_perms,
nr_acl_entries);
if (ret < 0)
pr_err("%s: failed to assign memory for rmt allocation rc:%d\n",
__func__, ret);
else
pr_debug("%s: Memory assigned to target VMIDs\n", __func__);
return ret;
}
int mem_buf_unassign_mem(struct sg_table *sgt, int *src_vmids,
unsigned int nr_acl_entries)
{
int dst_vmid = VMID_HLOS;
int dst_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
int ret;
if (!sgt || !src_vmids || !nr_acl_entries)
return -EINVAL;
pr_debug("%s: Unassigning memory to HLOS\n", __func__);
ret = hyp_assign_table(sgt, src_vmids, nr_acl_entries, &dst_vmid,
&dst_perms, 1);
if (ret < 0)
pr_err("%s: failed to assign memory from rmt allocation rc: %d\n",
__func__, ret);
else
pr_debug("%s: Unassigned memory to HLOS\n", __func__);
return ret;
}
int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
int *dst_vmids, int *dst_perms,
u32 nr_acl_entries,
hh_memparcel_handle_t *memparcel_hdl)
{
struct hh_sgl_desc *sgl_desc;
struct hh_acl_desc *acl_desc;
unsigned int i, nr_sg_entries;
struct scatterlist *sg;
int ret;
size_t sgl_desc_size, acl_desc_size;
if (!sgt || !dst_vmids || !dst_perms || !nr_acl_entries ||
!memparcel_hdl)
return -EINVAL;
nr_sg_entries = sgt->nents;
sgl_desc_size = offsetof(struct hh_sgl_desc,
sgl_entries[nr_sg_entries]);
sgl_desc = kzalloc(sgl_desc_size, GFP_KERNEL);
if (!sgl_desc)
return -ENOMEM;
acl_desc_size = offsetof(struct hh_acl_desc,
acl_entries[nr_acl_entries]);
acl_desc = kzalloc(acl_desc_size, GFP_KERNEL);
if (!acl_desc) {
kfree(sgl_desc);
return -ENOMEM;
}
sgl_desc->n_sgl_entries = nr_sg_entries;
for_each_sg(sgt->sgl, sg, nr_sg_entries, i) {
sgl_desc->sgl_entries[i].ipa_base = page_to_phys(sg_page(sg));
sgl_desc->sgl_entries[i].size = sg->length;
}
acl_desc->n_acl_entries = nr_acl_entries;
for (i = 0; i < nr_acl_entries; i++) {
acl_desc->acl_entries[i].vmid = dst_vmids[i];
acl_desc->acl_entries[i].perms = dst_perms[i];
}
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL, 0, acl_desc,
sgl_desc, NULL, memparcel_hdl);
trace_lookup_sgl(sgl_desc, ret, *memparcel_hdl);
if (ret < 0)
pr_err("%s: hh_rm_mem_qcom_lookup_sgl failure rc: %d\n",
__func__, ret);
kfree(acl_desc);
kfree(sgl_desc);
return ret;
}
static
struct mem_buf_xfer_ion_mem *mem_buf_alloc_ion_xfer_mem_type_data(
void *rmt_data)
@ -626,18 +524,6 @@ static void mem_buf_cleanup_alloc_req(struct mem_buf_xfer_mem *xfer_mem)
mem_buf_free_xfer_mem(xfer_mem);
}
static int mem_buf_get_mem_xfer_type(struct hh_acl_desc *acl_desc)
{
u32 i, nr_acl_entries = acl_desc->n_acl_entries;
for (i = 0; i < nr_acl_entries; i++)
if (acl_desc->acl_entries[i].vmid == VMID_HLOS &&
acl_desc->acl_entries[i].perms != 0)
return HH_RM_TRANS_TYPE_SHARE;
return HH_RM_TRANS_TYPE_LEND;
}
static void mem_buf_alloc_req_work(struct work_struct *work)
{
struct mem_buf_rmt_msg *rmt_msg = to_rmt_msg(work);
@ -957,60 +843,6 @@ static void mem_buf_relinquish_mem(u32 memparcel_hdl)
pr_debug("%s: allocation relinquish message sent\n", __func__);
}
/*
* FIXME: hh_rm_mem_accept uses kmemdup, which isn't right for large buffers.
*/
struct hh_sgl_desc *mem_buf_map_mem_s2(
hh_memparcel_handle_t memparcel_hdl,
struct hh_acl_desc *acl_desc)
{
struct hh_sgl_desc *sgl_desc;
if (!acl_desc)
return ERR_PTR(-EINVAL);
pr_debug("%s: adding CPU MMU stage 2 mappings\n", __func__);
sgl_desc = hh_rm_mem_accept(memparcel_hdl, HH_RM_MEM_TYPE_NORMAL,
mem_buf_get_mem_xfer_type(acl_desc),
HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
HH_RM_MEM_ACCEPT_DONE, 0, acl_desc, NULL,
NULL, 0);
if (IS_ERR(sgl_desc)) {
pr_err("%s failed to map memory in stage 2 rc: %d\n", __func__,
PTR_ERR(sgl_desc));
return sgl_desc;
}
trace_map_mem_s2(memparcel_hdl, sgl_desc);
return sgl_desc;
}
int mem_buf_unmap_mem_s2(hh_memparcel_handle_t memparcel_hdl)
{
int ret;
pr_debug("%s: removing CPU MMU stage 2 mappings\n", __func__);
ret = hh_rm_mem_release(memparcel_hdl, 0);
if (ret < 0)
pr_err("%s: Failed to release memparcel hdl: 0x%lx rc: %d\n",
__func__, memparcel_hdl, ret);
else
pr_debug("%s: CPU MMU stage 2 mappings removed\n", __func__);
return ret;
}
int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc)
{
return -EINVAL;
}
int mem_buf_unmap_mem_s1(struct hh_sgl_desc *sgl_desc)
{
return -EINVAL;
}
static int mem_buf_add_ion_mem(struct sg_table *sgt, void *dst_data)
{
struct mem_buf_ion_data *dst_ion_data = dst_data;
@ -1199,26 +1031,6 @@ static struct hh_acl_desc *mem_buf_acl_to_hh_acl(unsigned int nr_acl_entries,
return ERR_PTR(ret);
}
struct hh_acl_desc *mem_buf_vmids_to_hh_acl(int *vmids, int *perms, unsigned int nr_acl_entries)
{
unsigned int i;
struct hh_acl_desc *acl_desc = kzalloc(offsetof(struct hh_acl_desc,
acl_entries[nr_acl_entries]),
GFP_KERNEL);
if (!acl_desc)
return ERR_PTR(-ENOMEM);
acl_desc->n_acl_entries = nr_acl_entries;
for (i = 0; i < nr_acl_entries; i++) {
acl_desc->acl_entries[i].vmid = vmids[i];
acl_desc->acl_entries[i].perms = perms[i];
}
return acl_desc;
}
static void *mem_buf_retrieve_ion_mem_type_data_user(
struct mem_buf_ion_data __user *mem_type_data)
{
@ -1482,6 +1294,89 @@ void *mem_buf_get(int fd)
}
EXPORT_SYMBOL(mem_buf_get);
struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
{
int ret;
struct qcom_sg_buffer *buffer;
struct hh_acl_desc *acl_desc;
struct hh_sgl_desc *sgl_desc;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf;
struct sg_table *sgt;
if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER))
return ERR_PTR(-EOPNOTSUPP);
if (arg->fd_flags & ~MEM_BUF_VALID_FD_FLAGS)
return ERR_PTR(-EINVAL);
if (!arg->nr_acl_entries || !arg->vmids || !arg->perms)
return ERR_PTR(-EINVAL);
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
acl_desc = mem_buf_vmid_perm_list_to_hh_acl(arg->vmids, arg->perms,
arg->nr_acl_entries);
if (IS_ERR(acl_desc)) {
ret = PTR_ERR(acl_desc);
goto err_hh_acl;
}
sgl_desc = mem_buf_map_mem_s2(arg->memparcel_hdl, acl_desc);
if (IS_ERR(sgl_desc)) {
ret = PTR_ERR(sgl_desc);
goto err_map_s2;
}
ret = mem_buf_map_mem_s1(sgl_desc);
if (ret < 0)
goto err_map_mem_s1;
sgt = dup_hh_sgl_desc_to_sgt(sgl_desc);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_dup_sgt;
}
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->len = mem_buf_get_sgl_buf_size(sgl_desc);
buffer->sg_table = sgt;
buffer->free = mem_buf_retrieve_release;
buffer->vmperm = mem_buf_vmperm_alloc_accept(sgt, arg->memparcel_hdl);
exp_info.ops = &mem_buf_dma_buf_ops.dma_ops;
exp_info.size = buffer->len;
exp_info.flags = arg->fd_flags;
exp_info.priv = buffer;
dmabuf = mem_buf_dma_buf_export(&exp_info);
if (IS_ERR(dmabuf))
goto err_export_dma_buf;
/* sgt & qcom_sg_buffer will be freed by mem_buf_retrieve_release */
kfree(sgl_desc);
kfree(acl_desc);
return dmabuf;
err_export_dma_buf:
sg_free_table(sgt);
kfree(sgt);
err_dup_sgt:
mem_buf_unmap_mem_s1(sgl_desc);
err_map_mem_s1:
kfree(sgl_desc);
mem_buf_unmap_mem_s2(arg->memparcel_hdl);
err_map_s2:
kfree(acl_desc);
err_hh_acl:
kfree(buffer);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(mem_buf_retrieve);
/* Userspace machinery */
static int mem_buf_prep_alloc_data(struct mem_buf_allocation_data *alloc_data,
struct mem_buf_alloc_ioctl_arg *allocation_args)
@ -1630,7 +1525,7 @@ static int mem_buf_acl_to_vmid_perms_list(unsigned int nr_acl_entries,
return ret;
}
size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc)
static size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc)
{
size_t size = 0;
unsigned int i;
@ -1641,7 +1536,7 @@ size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc)
return size;
}
struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc)
static struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc)
{
struct sg_table *new_table;
int ret, i;
@ -1670,27 +1565,6 @@ struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc)
return new_table;
}
struct hh_sgl_desc *dup_sgt_to_hh_sgl_desc(struct sg_table *sgt)
{
struct hh_sgl_desc *hh_sgl;
size_t size;
int i;
struct scatterlist *sg;
size = offsetof(struct hh_sgl_desc, sgl_entries[sgt->orig_nents]);
hh_sgl = kvmalloc(size, GFP_KERNEL);
if (!hh_sgl)
return ERR_PTR(-ENOMEM);
hh_sgl->n_sgl_entries = sgt->orig_nents;
for_each_sgtable_sg(sgt, sg, i) {
hh_sgl->sgl_entries[i].ipa_base = sg_phys(sg);
hh_sgl->sgl_entries[i].size = sg->length;
}
return hh_sgl;
}
static int mem_buf_lend_user(struct mem_buf_lend_ioctl_arg *uarg, bool is_lend)
{
int *vmids, *perms;
@ -1900,35 +1774,20 @@ static const struct file_operations mem_buf_dev_fops = {
.unlocked_ioctl = mem_buf_dev_ioctl,
};
static int mem_buf_probe(struct platform_device *pdev)
/*
* The msgq needs to live in the same module as the ioctl handling code because it
* directly calls into mem_buf_process_alloc_resp without using a function
* pointer. Ideally msgq would support a client registration API which would
* associated a 'struct mem_buf_msg_hdr->msg_type' with a handler callback.
*/
static int mem_buf_msgq_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
struct device *class_dev;
u64 dma_mask = IS_ENABLED(CONFIG_ARM64) ? DMA_BIT_MASK(64) :
DMA_BIT_MASK(32);
if (of_property_match_string(dev->of_node, "qcom,mem-buf-capabilities",
"supplier") >= 0) {
mem_buf_capability = MEM_BUF_CAP_SUPPLIER;
} else if (of_property_match_string(dev->of_node,
"qcom,mem-buf-capabilities",
"consumer") >= 0) {
mem_buf_capability = MEM_BUF_CAP_CONSUMER;
} else if (of_property_match_string(dev->of_node,
"qcom,mem-buf-capabilities",
"dual") >= 0) {
mem_buf_capability = MEM_BUF_CAP_DUAL;
} else {
dev_err(dev, "Transfer direction property not present or not valid\n");
return -EINVAL;
}
ret = dma_set_mask_and_coherent(dev, dma_mask);
if (ret) {
dev_err(dev, "Unable to set dma mask: %d\n", ret);
return ret;
}
if (!mem_buf_dev)
return -EPROBE_DEFER;
mem_buf_wq = alloc_workqueue("mem_buf_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!mem_buf_wq) {
@ -1960,7 +1819,6 @@ static int mem_buf_probe(struct platform_device *pdev)
if (ret < 0)
goto err_cdev_add;
mem_buf_dev = dev;
class_dev = device_create(mem_buf_class, NULL, mem_buf_dev_no, NULL,
"membuf");
if (IS_ERR(class_dev)) {
@ -1968,17 +1826,10 @@ static int mem_buf_probe(struct platform_device *pdev)
goto err_dev_create;
}
ret = mem_buf_vm_init(dev);
if (ret)
goto err_vm_init;
wake_up_process(mem_buf_msgq_recv_thr);
return 0;
err_vm_init:
put_device(class_dev);
err_dev_create:
mem_buf_dev = NULL;
cdev_del(&mem_buf_char_dev);
err_cdev_add:
hh_msgq_unregister(mem_buf_hh_msgq_hdl);
@ -1992,7 +1843,7 @@ static int mem_buf_probe(struct platform_device *pdev)
return ret;
}
static int mem_buf_remove(struct platform_device *pdev)
static int mem_buf_msgq_remove(struct platform_device *pdev)
{
mutex_lock(&mem_buf_list_lock);
if (!list_empty(&mem_buf_list))
@ -2007,7 +1858,6 @@ static int mem_buf_remove(struct platform_device *pdev)
mutex_unlock(&mem_buf_xfer_mem_list_lock);
device_destroy(mem_buf_class, mem_buf_dev_no);
mem_buf_dev = NULL;
cdev_del(&mem_buf_char_dev);
hh_msgq_unregister(mem_buf_hh_msgq_hdl);
mem_buf_hh_msgq_hdl = NULL;
@ -2018,17 +1868,17 @@ static int mem_buf_remove(struct platform_device *pdev)
return 0;
}
static const struct of_device_id mem_buf_match_tbl[] = {
{.compatible = "qcom,mem-buf"},
static const struct of_device_id mem_buf_msgq_match_tbl[] = {
{.compatible = "qcom,mem-buf-msgq"},
{},
};
static struct platform_driver mem_buf_driver = {
.probe = mem_buf_probe,
.remove = mem_buf_remove,
static struct platform_driver mem_buf_msgq_driver = {
.probe = mem_buf_msgq_probe,
.remove = mem_buf_msgq_remove,
.driver = {
.name = "mem-buf",
.of_match_table = of_match_ptr(mem_buf_match_tbl),
.name = "mem-buf-msgq",
.of_match_table = of_match_ptr(mem_buf_msgq_match_tbl),
},
};
@ -2047,7 +1897,7 @@ static int __init mem_buf_init(void)
goto err_class_create;
}
ret = platform_driver_register(&mem_buf_driver);
ret = platform_driver_register(&mem_buf_msgq_driver);
if (ret < 0)
goto err_platform_drvr_register;
@ -2064,8 +1914,7 @@ module_init(mem_buf_init);
static void __exit mem_buf_exit(void)
{
mem_buf_vm_exit();
platform_driver_unregister(&mem_buf_driver);
platform_driver_unregister(&mem_buf_msgq_driver);
class_destroy(mem_buf_class);
unregister_chrdev_region(mem_buf_dev_no, MEM_BUF_MAX_DEVS);
}

View File

@ -7,7 +7,7 @@
#include <linux/highmem.h>
#include <linux/mem-buf-exporter.h>
#include "mem-buf-private.h"
#include "mem-buf-dev.h"
struct mem_buf_vmperm {
u32 flags;
@ -193,6 +193,27 @@ static int __mem_buf_vmperm_reclaim(struct mem_buf_vmperm *vmperm)
return 0;
}
static struct hh_sgl_desc *dup_sgt_to_hh_sgl_desc(struct sg_table *sgt)
{
struct hh_sgl_desc *hh_sgl;
size_t size;
int i;
struct scatterlist *sg;
size = offsetof(struct hh_sgl_desc, sgl_entries[sgt->orig_nents]);
hh_sgl = kvmalloc(size, GFP_KERNEL);
if (!hh_sgl)
return ERR_PTR(-ENOMEM);
hh_sgl->n_sgl_entries = sgt->orig_nents;
for_each_sgtable_sg(sgt, sg, i) {
hh_sgl->sgl_entries[i].ipa_base = sg_phys(sg);
hh_sgl->sgl_entries[i].size = sg->length;
}
return hh_sgl;
}
static int mem_buf_vmperm_relinquish(struct mem_buf_vmperm *vmperm)
{
int ret;
@ -813,7 +834,7 @@ int mem_buf_lend_internal(struct dma_buf *dmabuf,
mutex_unlock(&vmperm->lock);
return ret;
}
EXPORT_SYMBOL(mem_buf_lend);
EXPORT_SYMBOL(mem_buf_lend_internal);
/*
* Kernel API for Sharing, Lending, Recieving or Reclaiming
@ -824,6 +845,7 @@ int mem_buf_lend(struct dma_buf *dmabuf,
{
return mem_buf_lend_internal(dmabuf, arg, true);
}
EXPORT_SYMBOL(mem_buf_lend);
int mem_buf_share(struct dma_buf *dmabuf,
struct mem_buf_lend_kernel_arg *arg)
@ -838,89 +860,7 @@ void mem_buf_retrieve_release(struct qcom_sg_buffer *buffer)
kfree(buffer->sg_table);
kfree(buffer);
}
struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
{
int ret;
struct qcom_sg_buffer *buffer;
struct hh_acl_desc *acl_desc;
struct hh_sgl_desc *sgl_desc;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf;
struct sg_table *sgt;
if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER))
return ERR_PTR(-EOPNOTSUPP);
if (arg->fd_flags & ~MEM_BUF_VALID_FD_FLAGS)
return ERR_PTR(-EINVAL);
if (!arg->nr_acl_entries || !arg->vmids || !arg->perms)
return ERR_PTR(-EINVAL);
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
acl_desc = mem_buf_vmid_perm_list_to_hh_acl(arg->vmids, arg->perms,
arg->nr_acl_entries);
if (IS_ERR(acl_desc)) {
ret = PTR_ERR(acl_desc);
goto err_hh_acl;
}
sgl_desc = mem_buf_map_mem_s2(arg->memparcel_hdl, acl_desc);
if (IS_ERR(sgl_desc)) {
ret = PTR_ERR(sgl_desc);
goto err_map_s2;
}
ret = mem_buf_map_mem_s1(sgl_desc);
if (ret < 0)
goto err_map_mem_s1;
sgt = dup_hh_sgl_desc_to_sgt(sgl_desc);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_dup_sgt;
}
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->len = mem_buf_get_sgl_buf_size(sgl_desc);
buffer->sg_table = sgt;
buffer->free = mem_buf_retrieve_release;
buffer->vmperm = mem_buf_vmperm_alloc_accept(sgt, arg->memparcel_hdl);
exp_info.ops = &mem_buf_dma_buf_ops.dma_ops;
exp_info.size = buffer->len;
exp_info.flags = arg->fd_flags;
exp_info.priv = buffer;
dmabuf = mem_buf_dma_buf_export(&exp_info);
if (IS_ERR(dmabuf))
goto err_export_dma_buf;
/* sgt & qcom_sg_buffer will be freed by mem_buf_retrieve_release */
kfree(sgl_desc);
kfree(acl_desc);
return dmabuf;
err_export_dma_buf:
sg_free_table(sgt);
kfree(sgt);
err_dup_sgt:
mem_buf_unmap_mem_s1(sgl_desc);
err_map_mem_s1:
kfree(sgl_desc);
mem_buf_unmap_mem_s2(arg->memparcel_hdl);
err_map_s2:
kfree(acl_desc);
err_hh_acl:
kfree(buffer);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(mem_buf_retrieve);
EXPORT_SYMBOL(mem_buf_retrieve_release);
int mem_buf_reclaim(struct dma_buf *dmabuf)
{

View File

@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/haven/hh_rm_drv.h>
#include <linux/types.h>
#include <linux/dma-buf.h>
#include <uapi/linux/mem-buf.h>
/**

View File

@ -31,6 +31,7 @@ iommu-logger.ko
msm_dma_iommu_mapping.ko
qcom_dma_heaps.ko
mem_buf.ko
mem_buf_dev.ko
qcom-arm-smmu-mod.ko
msm-geni-se.ko
msm_geni_serial.ko

View File

@ -94,6 +94,7 @@ msm_dma_iommu_mapping.ko
system_heap.ko
qcom_dma_heaps.ko
mem_buf.ko
mem_buf_dev.ko
qcom_smem.ko
qcom_smd.ko
mdt_loader.ko