rpmsg: glink: Add an interface for memshare
In the initial version of zero copy, the memshare driver is supposed to allocate buffers using the rproc device and GLINK should be able to access the dma buffer through the rproc helpers. This approach starts to fall apart when reserved memory regions are needed for larger allocations. The q6v5_pas rproc can sometimes specify a reserved region to allocate dma from, when this is present there is no facility in rproc to point to a different dma region when allocating from memshare. This patch adds a interface for the memshare driver to register regions directly with glink. When glink gets a zero copy packet, it can then query and prepare cpu access using this interface instead of relying on remoteproc. This is a temporary interface until sharing buffers between remoteproc, memshare and glink is accepeted. Change-Id: I1438cea517fad55d996852cb17cb2f591190cf04 Signed-off-by: Chris Lew <quic_clew@quicinc.com>
This commit is contained in:
parent
7e077bd977
commit
052dc8dd1f
@ -5,6 +5,7 @@ obj-$(CONFIG_RPMSG_CTRL) += rpmsg_ctrl.o
|
||||
obj-$(CONFIG_RPMSG_NS) += rpmsg_ns.o
|
||||
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
|
||||
qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o
|
||||
qcom_glink-objs += qcom_glink_memshare.o
|
||||
obj-$(CONFIG_RPMSG_QCOM_GLINK) += qcom_glink.o
|
||||
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
|
||||
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
|
||||
|
95
drivers/rpmsg/qcom_glink_memshare.c
Normal file
95
drivers/rpmsg/qcom_glink_memshare.c
Normal file
@ -0,0 +1,95 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct qcom_glink_mem_entry {
|
||||
struct device *dev;
|
||||
void *va;
|
||||
dma_addr_t dma;
|
||||
size_t len;
|
||||
u32 da;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(qcom_glink_mem_entry_lock);
|
||||
static LIST_HEAD(qcom_glink_mem_entries);
|
||||
|
||||
struct qcom_glink_mem_entry *
|
||||
qcom_glink_mem_entry_init(struct device *dev, void *va, dma_addr_t dma, size_t len, u32 da)
|
||||
{
|
||||
struct qcom_glink_mem_entry *mem = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return mem;
|
||||
|
||||
mem->dev = dev;
|
||||
mem->va = va;
|
||||
mem->dma = dma;
|
||||
mem->da = da;
|
||||
mem->len = len;
|
||||
INIT_LIST_HEAD(&mem->node);
|
||||
|
||||
spin_lock_irqsave(&qcom_glink_mem_entry_lock, flags);
|
||||
list_add_tail(&mem->node, &qcom_glink_mem_entries);
|
||||
spin_unlock_irqrestore(&qcom_glink_mem_entry_lock, flags);
|
||||
|
||||
return mem;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_glink_mem_entry_init);
|
||||
|
||||
void qcom_glink_mem_entry_free(struct qcom_glink_mem_entry *mem)
|
||||
{
|
||||
struct qcom_glink_mem_entry *entry, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qcom_glink_mem_entry_lock, flags);
|
||||
list_for_each_entry_safe(entry, tmp, &qcom_glink_mem_entries, node) {
|
||||
if (entry == mem) {
|
||||
list_del(&mem->node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&qcom_glink_mem_entry_lock, flags);
|
||||
|
||||
kfree(mem);
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_glink_mem_entry_free);
|
||||
|
||||
void *qcom_glink_prepare_da_for_cpu(u64 da, size_t len)
|
||||
{
|
||||
struct qcom_glink_mem_entry *mem;
|
||||
unsigned long flags;
|
||||
void *ptr = NULL;
|
||||
|
||||
spin_lock_irqsave(&qcom_glink_mem_entry_lock, flags);
|
||||
list_for_each_entry(mem, &qcom_glink_mem_entries, node) {
|
||||
int offset = da - mem->da;
|
||||
|
||||
if (!mem->va)
|
||||
continue;
|
||||
|
||||
if (offset < 0)
|
||||
continue;
|
||||
|
||||
if (offset + len > mem->len)
|
||||
continue;
|
||||
|
||||
ptr = mem->va + offset;
|
||||
dma_sync_single_for_cpu(mem->dev, da, len, DMA_FROM_DEVICE);
|
||||
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&qcom_glink_mem_entry_lock, flags);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_glink_prepare_da_for_cpu);
|
@ -63,4 +63,6 @@ int qcom_glink_register_signals_cb(struct rpmsg_endpoint *ept,
|
||||
bool qcom_glink_rx_done_supported(struct rpmsg_endpoint *ept);
|
||||
int qcom_glink_rx_done(struct rpmsg_endpoint *ept, void *data);
|
||||
|
||||
void *qcom_glink_prepare_da_for_cpu(u64 da, size_t len);
|
||||
|
||||
#endif
|
||||
|
@ -6,11 +6,21 @@
|
||||
#include <linux/device.h>
|
||||
|
||||
struct qcom_glink;
|
||||
struct qcom_glink_mem_entry;
|
||||
|
||||
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
|
||||
void qcom_glink_ssr_notify(const char *ssr_name);
|
||||
struct qcom_glink_mem_entry *
|
||||
qcom_glink_mem_entry_init(struct device *dev, void *va, dma_addr_t dma, size_t len, u32 da);
|
||||
void qcom_glink_mem_entry_free(struct qcom_glink_mem_entry *mem);
|
||||
#else
|
||||
static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
|
||||
static inline struct qcom_glink_mem_entry *
|
||||
qcom_glink_mem_entry_init(struct device *dev, void *va, dma_addr_t dma, size_t len, u32 da)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void qcom_glink_mem_entry_free(struct qcom_glink_mem_entry *mem) {}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
|
||||
|
Loading…
Reference in New Issue
Block a user