dma-heap: qcom: Add the UBWC-P heap
Add the UBWC-P heap. Change-Id: I62cc0c31313cd95fd1569c550504aeb47d0f089a Signed-off-by: Chris Goldsworthy <quic_cgoldswo@quicinc.com>
This commit is contained in:
parent
3a0a1539ac
commit
2a87fc7c56
@ -75,3 +75,12 @@ config QCOM_DMABUF_HEAPS_CARVEOUT
|
||||
help
|
||||
Choose this option to build the QCOM DMA-BUF Carveout heap.
|
||||
If in doubt, say Y here.
|
||||
|
||||
config QCOM_DMABUF_HEAPS_UBWCP
|
||||
bool "QCOM dma-buf UBWC-P Heap"
|
||||
depends on QCOM_DMABUF_HEAPS_SYSTEM
|
||||
help
|
||||
Choose this option to build the QCOM DMA-BUF UBWC-P heap.
|
||||
The heap itself is backed by system heap memory and works
|
||||
in tandem with the UBWC-P driver.
|
||||
If in doubt, say Y here.
|
||||
|
@ -12,3 +12,4 @@ qcom_dma_heaps-$(CONFIG_QCOM_DMABUF_HEAPS_CMA) += qcom_cma_heap.o
|
||||
qcom_dma_heaps-$(CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT) += qcom_carveout_heap.o \
|
||||
qcom_dma_heap_secure_utils.o
|
||||
qcom_dma_heaps-$(CONFIG_QCOM_DMABUF_HEAPS_BITSTREAM_CONTIG) += qcom_bitstream_contig_heap.o
|
||||
qcom_dma_heaps-$(CONFIG_QCOM_DMABUF_HEAPS_UBWCP) += qcom_ubwcp_heap.o
|
||||
|
@ -57,8 +57,8 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
|
||||
return new_table;
|
||||
}
|
||||
|
||||
static int qcom_sg_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
int qcom_sg_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
@ -88,8 +88,8 @@ static int qcom_sg_attach(struct dma_buf *dmabuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_sg_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
void qcom_sg_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a = attachment->priv;
|
||||
@ -103,8 +103,8 @@ static void qcom_sg_detach(struct dma_buf *dmabuf,
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
static struct sg_table *qcom_sg_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction)
|
||||
struct sg_table *qcom_sg_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct dma_heap_attachment *a = attachment->priv;
|
||||
struct sg_table *table = a->table;
|
||||
@ -145,9 +145,9 @@ static struct sg_table *qcom_sg_map_dma_buf(struct dma_buf_attachment *attachmen
|
||||
return table;
|
||||
}
|
||||
|
||||
static void qcom_sg_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction direction)
|
||||
void qcom_sg_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct dma_heap_attachment *a = attachment->priv;
|
||||
struct qcom_sg_buffer *buffer;
|
||||
@ -173,8 +173,8 @@ static void qcom_sg_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
mutex_unlock(&buffer->lock);
|
||||
}
|
||||
|
||||
static int qcom_sg_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
int qcom_sg_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
@ -204,8 +204,8 @@ static int qcom_sg_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_sg_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
int qcom_sg_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
@ -290,10 +290,10 @@ static int sgl_sync_range(struct device *dev, struct scatterlist *sgl,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_sg_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
int qcom_sg_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
@ -325,10 +325,10 @@ static int qcom_sg_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_sg_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
int qcom_sg_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
@ -379,7 +379,7 @@ static const struct vm_operations_struct qcom_sg_vm_ops = {
|
||||
.close = qcom_sg_vm_ops_close,
|
||||
};
|
||||
|
||||
static int qcom_sg_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
int qcom_sg_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
struct sg_table *table = &buffer->sg_table;
|
||||
@ -427,7 +427,7 @@ static int qcom_sg_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *qcom_sg_do_vmap(struct qcom_sg_buffer *buffer)
|
||||
void *qcom_sg_do_vmap(struct qcom_sg_buffer *buffer)
|
||||
{
|
||||
struct sg_table *table = &buffer->sg_table;
|
||||
int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
|
||||
@ -457,7 +457,7 @@ static void *qcom_sg_do_vmap(struct qcom_sg_buffer *buffer)
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static int qcom_sg_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
int qcom_sg_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
void *vaddr;
|
||||
@ -492,7 +492,7 @@ static int qcom_sg_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_sg_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
void qcom_sg_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
|
||||
@ -506,7 +506,7 @@ static void qcom_sg_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
iosys_map_clear(map);
|
||||
}
|
||||
|
||||
static void qcom_sg_release(struct dma_buf *dmabuf)
|
||||
void qcom_sg_release(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
|
||||
@ -517,7 +517,7 @@ static void qcom_sg_release(struct dma_buf *dmabuf)
|
||||
buffer->free(buffer);
|
||||
}
|
||||
|
||||
static struct mem_buf_vmperm *qcom_sg_lookup_vmperm(struct dma_buf *dmabuf)
|
||||
struct mem_buf_vmperm *qcom_sg_lookup_vmperm(struct dma_buf *dmabuf)
|
||||
{
|
||||
struct qcom_sg_buffer *buffer = dmabuf->priv;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Andrew F. Davis <afd@ti.com>
|
||||
*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _QCOM_SG_OPS_H
|
||||
@ -38,6 +39,47 @@ struct dma_heap_attachment {
|
||||
bool mapped;
|
||||
};
|
||||
|
||||
int qcom_sg_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment);
|
||||
|
||||
void qcom_sg_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attachment);
|
||||
|
||||
struct sg_table *qcom_sg_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
void qcom_sg_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *table,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
int qcom_sg_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
int qcom_sg_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
int qcom_sg_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction dir,
|
||||
unsigned int offset,
|
||||
unsigned int len);
|
||||
|
||||
int qcom_sg_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len);
|
||||
|
||||
int qcom_sg_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma);
|
||||
|
||||
void *qcom_sg_do_vmap(struct qcom_sg_buffer *buffer);
|
||||
|
||||
int qcom_sg_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
|
||||
|
||||
void qcom_sg_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
|
||||
|
||||
void qcom_sg_release(struct dma_buf *dmabuf);
|
||||
|
||||
struct mem_buf_vmperm *qcom_sg_lookup_vmperm(struct dma_buf *dmabuf);
|
||||
|
||||
extern struct mem_buf_dma_buf_ops qcom_sg_buf_ops;
|
||||
|
||||
#endif /* _QCOM_SG_OPS_H */
|
||||
|
@ -340,7 +340,7 @@ static int system_heap_zero_buffer(struct qcom_sg_buffer *buffer)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void system_heap_free(struct qcom_sg_buffer *buffer)
|
||||
void qcom_system_heap_free(struct qcom_sg_buffer *buffer)
|
||||
{
|
||||
struct qcom_system_heap *sys_heap;
|
||||
struct sg_table *table;
|
||||
@ -367,7 +367,7 @@ static void system_heap_free(struct qcom_sg_buffer *buffer)
|
||||
|
||||
static void system_qcom_sg_buffer_free(struct qcom_sg_buffer *buffer)
|
||||
{
|
||||
system_heap_free(buffer);
|
||||
qcom_system_heap_free(buffer);
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
@ -519,7 +519,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
|
||||
free_vmperm:
|
||||
mem_buf_vmperm_release(buffer->vmperm);
|
||||
free_sys_heap_mem:
|
||||
system_heap_free(buffer);
|
||||
qcom_system_heap_free(buffer);
|
||||
free_buf_struct:
|
||||
kfree(buffer);
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/err.h>
|
||||
#include "qcom_sg_ops.h"
|
||||
#include "qcom_dynamic_page_pool.h"
|
||||
#include "qcom_sg_ops.h"
|
||||
|
||||
struct qcom_system_heap {
|
||||
int uncached;
|
||||
@ -19,6 +20,7 @@ struct qcom_system_heap {
|
||||
|
||||
#ifdef CONFIG_QCOM_DMABUF_HEAPS_SYSTEM
|
||||
void qcom_system_heap_create(const char *name, const char *system_alias, bool uncached);
|
||||
void qcom_system_heap_free(struct qcom_sg_buffer *buffer);
|
||||
struct page *qcom_sys_heap_alloc_largest_available(struct dynamic_page_pool **pools,
|
||||
unsigned long size,
|
||||
unsigned int max_order);
|
||||
@ -30,6 +32,10 @@ static inline void qcom_system_heap_create(const char *name, const char *system_
|
||||
bool uncached)
|
||||
{
|
||||
|
||||
}
|
||||
static inline void qcom_system_heap_free(struct qcom_sg_buffer *buffer)
|
||||
{
|
||||
|
||||
}
|
||||
static inline struct page *qcom_sys_heap_alloc_largest_available(struct dynamic_page_pool **pools,
|
||||
unsigned long size,
|
||||
|
453
drivers/dma-buf/heaps/qcom_ubwcp_heap.c
Normal file
453
drivers/dma-buf/heaps/qcom_ubwcp_heap.c
Normal file
@ -0,0 +1,453 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/ubwcp_dma_heap.h>
|
||||
#include <trace/hooks/dma_buf.h>
|
||||
#include <linux/msm_dma_iommu_mapping.h>
|
||||
|
||||
#include <linux/qcom-dma-mapping.h>
|
||||
#include "qcom_system_heap.h"
|
||||
|
||||
static struct dma_heap *sys_heap;
|
||||
|
||||
struct ubwcp_driver_ops {
|
||||
init_buffer init_buffer;
|
||||
free_buffer free_buffer;
|
||||
lock_buffer lock_buffer;
|
||||
unlock_buffer unlock_buffer;
|
||||
} ubwcp_driver_ops;
|
||||
|
||||
struct ubwcp_buffer {
|
||||
struct qcom_sg_buffer qcom_sg_buf;
|
||||
bool ubwcp_init_complete;
|
||||
|
||||
struct rw_semaphore linear_mode_sem;
|
||||
bool mmap_configured;
|
||||
bool is_linear;
|
||||
atomic_t cpu_map_count;
|
||||
phys_addr_t ula_pa_addr;
|
||||
size_t ula_pa_size;
|
||||
};
|
||||
|
||||
static int ubwcp_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
int ret;
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
|
||||
if (!buffer->is_linear)
|
||||
ret = ubwcp_driver_ops.lock_buffer(dmabuf, direction);
|
||||
else
|
||||
ret = qcom_sg_dma_buf_begin_cpu_access(dmabuf, direction);
|
||||
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ubwcp_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
int ret;
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
|
||||
if (!buffer->is_linear)
|
||||
ret = ubwcp_driver_ops.unlock_buffer(dmabuf, direction);
|
||||
else
|
||||
ret = qcom_sg_dma_buf_end_cpu_access(dmabuf, direction);
|
||||
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ubwcp_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
int ret;
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
if (!buffer->is_linear) {
|
||||
pr_err("%s: isn't in linear mode, bailing\n", __func__);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
ret = qcom_sg_dma_buf_begin_cpu_access_partial(dmabuf, direction, offset,
|
||||
len);
|
||||
}
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ubwcp_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction,
|
||||
unsigned int offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
int ret;
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
if (!buffer->is_linear) {
|
||||
pr_err("%s: isn't in linear mode, bailing\n", __func__);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
ret = qcom_sg_dma_buf_end_cpu_access_partial(dmabuf, direction, offset,
|
||||
len);
|
||||
}
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_sg_vm_ops_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = vma->vm_private_data;
|
||||
|
||||
atomic_inc(&buffer->cpu_map_count);
|
||||
mem_buf_vmperm_pin(buffer->qcom_sg_buf.vmperm);
|
||||
}
|
||||
|
||||
static void qcom_sg_vm_ops_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = vma->vm_private_data;
|
||||
|
||||
atomic_dec(&buffer->cpu_map_count);
|
||||
mem_buf_vmperm_unpin(buffer->qcom_sg_buf.vmperm);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct qcom_sg_vm_ops = {
|
||||
.open = qcom_sg_vm_ops_open,
|
||||
.close = qcom_sg_vm_ops_close,
|
||||
};
|
||||
|
||||
static int ubwcp_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
|
||||
unsigned long vaddr = vma->vm_start;
|
||||
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
|
||||
unsigned long map_len = vma->vm_end - vma->vm_start;
|
||||
int ret = 0;
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
if (!buffer->mmap_configured) {
|
||||
pr_err("Must call msm_dma_buf_configure_mmap() before calling %s()\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (buffer->is_linear) {
|
||||
ret = qcom_sg_mmap(dmabuf, vma);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (map_len + offset > buffer->ula_pa_size) {
|
||||
pr_err("mmap is too large!\n");
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mem_buf_vmperm_pin(buffer->qcom_sg_buf.vmperm);
|
||||
if (!mem_buf_vmperm_can_mmap(buffer->qcom_sg_buf.vmperm, vma)) {
|
||||
mem_buf_vmperm_unpin(buffer->qcom_sg_buf.vmperm);
|
||||
ret = -EPERM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
vma->vm_ops = &qcom_sg_vm_ops;
|
||||
vma->vm_private_data = buffer;
|
||||
|
||||
ret = remap_pfn_range(vma, vaddr,
|
||||
(buffer->ula_pa_addr >> PAGE_SHIFT) + offset,
|
||||
map_len, vma->vm_page_prot);
|
||||
if (ret) {
|
||||
mem_buf_vmperm_unpin(buffer->qcom_sg_buf.vmperm);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
atomic_inc(&buffer->cpu_map_count);
|
||||
unlock:
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ubwcp_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
int ret;
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
if (!buffer->mmap_configured) {
|
||||
pr_err("Must call msm_dma_buf_configure_mmap() before calling %s()\n",
|
||||
__func__);
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!buffer->is_linear) {
|
||||
pr_err("%s: isn't in linear mode, bailing\n", __func__);
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = qcom_sg_vmap(dmabuf, map);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
atomic_inc(&buffer->cpu_map_count);
|
||||
unlock:
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ubwcp_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
|
||||
{
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
|
||||
down_read(&buffer->linear_mode_sem);
|
||||
|
||||
if (!buffer->is_linear)
|
||||
pr_err("%s: isn't in linear mode, bailing\n", __func__);
|
||||
else
|
||||
qcom_sg_vunmap(dmabuf, map);
|
||||
|
||||
WARN_ON(atomic_read(&buffer->cpu_map_count) <= 0);
|
||||
atomic_dec(&buffer->cpu_map_count);
|
||||
up_read(&buffer->linear_mode_sem);
|
||||
}
|
||||
|
||||
static void ubwcp_release(struct dma_buf *dmabuf)
|
||||
{
|
||||
int ret;
|
||||
struct ubwcp_buffer *buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
|
||||
if (buffer->ubwcp_init_complete) {
|
||||
ret = ubwcp_driver_ops.free_buffer(dmabuf);
|
||||
if (ret) {
|
||||
pr_err("%s: UBWC-P buffer not freed, err: %d\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = mem_buf_vmperm_release(buffer->qcom_sg_buf.vmperm);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to release vmperm, err: %d\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
msm_dma_buf_freed(dmabuf->priv);
|
||||
qcom_system_heap_free(&buffer->qcom_sg_buf);
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
struct mem_buf_dma_buf_ops ubwcp_ops = {
|
||||
.attach = qcom_sg_attach,
|
||||
.lookup = qcom_sg_lookup_vmperm,
|
||||
.dma_ops = {
|
||||
.attach = NULL, /* Will be set by mem_buf_dma_buf_export */
|
||||
.detach = qcom_sg_detach,
|
||||
.map_dma_buf = qcom_sg_map_dma_buf,
|
||||
.unmap_dma_buf = qcom_sg_unmap_dma_buf,
|
||||
.begin_cpu_access = ubwcp_dma_buf_begin_cpu_access,
|
||||
.end_cpu_access = ubwcp_dma_buf_end_cpu_access,
|
||||
.begin_cpu_access_partial = ubwcp_dma_buf_begin_cpu_access_partial,
|
||||
.end_cpu_access_partial = ubwcp_dma_buf_end_cpu_access_partial,
|
||||
.mmap = ubwcp_mmap,
|
||||
.vmap = ubwcp_vmap,
|
||||
.vunmap = ubwcp_vunmap,
|
||||
.release = ubwcp_release,
|
||||
}
|
||||
};
|
||||
|
||||
int msm_ubwcp_dma_buf_configure_mmap(struct dma_buf *dmabuf, bool linear,
|
||||
phys_addr_t ula_pa_addr,
|
||||
size_t ula_pa_size)
|
||||
{
|
||||
struct ubwcp_buffer *buffer;
|
||||
int ret = 0;
|
||||
|
||||
if (dmabuf->ops != &ubwcp_ops.dma_ops) {
|
||||
pr_err("%s: User didn't pass in DMA-BUF!\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ula_pa_addr % PAGE_SIZE || ula_pa_size % PAGE_SIZE) {
|
||||
pr_err("%s: ULA PA addr and ULA PA map size must be page_aligned!\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
|
||||
down_write(&buffer->linear_mode_sem);
|
||||
if (atomic_read(&buffer->cpu_map_count)) {
|
||||
pr_err("%s: Buffer already mapped!\n", __func__);
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
buffer->is_linear = linear;
|
||||
buffer->ula_pa_addr = ula_pa_addr;
|
||||
buffer->ula_pa_size = ula_pa_size;
|
||||
buffer->mmap_configured = true;
|
||||
unlock:
|
||||
up_write(&buffer->linear_mode_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(msm_ubwcp_dma_buf_configure_mmap);
|
||||
|
||||
static struct dma_buf *ubwcp_allocate(struct dma_heap *heap,
|
||||
unsigned long len,
|
||||
unsigned long fd_flags,
|
||||
unsigned long heap_flags)
|
||||
{
|
||||
struct ubwcp_buffer *buffer;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
struct dma_buf *dmabuf;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
init_rwsem(&buffer->linear_mode_sem);
|
||||
|
||||
ret = system_qcom_sg_buffer_alloc(sys_heap, &buffer->qcom_sg_buf, len);
|
||||
if (ret)
|
||||
goto free_buf_struct;
|
||||
|
||||
buffer->qcom_sg_buf.vmperm = mem_buf_vmperm_alloc(&buffer->qcom_sg_buf.sg_table);
|
||||
if (IS_ERR(buffer->qcom_sg_buf.vmperm)) {
|
||||
ret = PTR_ERR(buffer->qcom_sg_buf.vmperm);
|
||||
goto free_sys_heap_mem;
|
||||
}
|
||||
|
||||
/* create the dmabuf */
|
||||
exp_info.exp_name = dma_heap_get_name(heap);
|
||||
exp_info.size = buffer->qcom_sg_buf.len;
|
||||
exp_info.flags = fd_flags;
|
||||
exp_info.priv = &buffer->qcom_sg_buf;
|
||||
dmabuf = mem_buf_dma_buf_export(&exp_info, &ubwcp_ops);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
ret = PTR_ERR(dmabuf);
|
||||
goto free_vmperm;
|
||||
}
|
||||
|
||||
ret = ubwcp_driver_ops.init_buffer(dmabuf);
|
||||
if (ret)
|
||||
goto buf_release;
|
||||
buffer->ubwcp_init_complete = true;
|
||||
|
||||
return dmabuf;
|
||||
|
||||
buf_release:
|
||||
dma_buf_put(dmabuf);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
free_vmperm:
|
||||
mem_buf_vmperm_release(buffer->qcom_sg_buf.vmperm);
|
||||
free_sys_heap_mem:
|
||||
qcom_system_heap_free(&buffer->qcom_sg_buf);
|
||||
free_buf_struct:
|
||||
kfree(buffer);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static const struct dma_heap_ops ubwcp_heap_ops = {
|
||||
.allocate = ubwcp_allocate,
|
||||
};
|
||||
|
||||
static void ignore_vmap_bounds_check(void *unused, struct dma_buf *dmabuf, bool *result)
|
||||
{
|
||||
struct ubwcp_buffer *buffer;
|
||||
|
||||
if (dmabuf->ops != &ubwcp_ops.dma_ops) {
|
||||
*result = false;
|
||||
return;
|
||||
}
|
||||
|
||||
buffer = container_of(dmabuf->priv, struct ubwcp_buffer, qcom_sg_buf);
|
||||
|
||||
if (buffer->is_linear)
|
||||
*result = false;
|
||||
else
|
||||
*result = true;
|
||||
}
|
||||
|
||||
int qcom_ubwcp_heap_create(void)
|
||||
{
|
||||
struct dma_heap_export_info exp_info;
|
||||
struct dma_heap *heap;
|
||||
char *name = "qcom,ubwcp";
|
||||
int ret;
|
||||
|
||||
ret = register_trace_android_vh_ignore_dmabuf_vmap_bounds(ignore_vmap_bounds_check,
|
||||
NULL);
|
||||
if (ret) {
|
||||
pr_err("%s: Unable to register vmap bounds tracehook\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sys_heap = dma_heap_find("qcom,system");
|
||||
if (!sys_heap) {
|
||||
pr_err("%s: Unable to find 'qcom,system'\n", __func__);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
exp_info.name = name;
|
||||
exp_info.ops = &ubwcp_heap_ops;
|
||||
exp_info.priv = NULL;
|
||||
|
||||
heap = dma_heap_add(&exp_info);
|
||||
if (IS_ERR(heap)) {
|
||||
ret = PTR_ERR(heap);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_info("%s: DMA-BUF Heap: Created '%s'\n", __func__, name);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
pr_err("%s: Failed to create '%s', error is %d\n", __func__, name, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int msm_ubwcp_set_ops(init_buffer init_buf_fn_ptr,
|
||||
free_buffer free_buf_fn_ptr,
|
||||
lock_buffer lock_buf_fn_ptr,
|
||||
unlock_buffer unlock_buf_fn_ptr)
|
||||
{
|
||||
if (!init_buf_fn_ptr || !free_buf_fn_ptr || !lock_buf_fn_ptr ||
|
||||
!unlock_buf_fn_ptr) {
|
||||
pr_err("%s: Missing function pointer\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ubwcp_driver_ops.init_buffer = init_buf_fn_ptr;
|
||||
ubwcp_driver_ops.free_buffer = free_buf_fn_ptr;
|
||||
ubwcp_driver_ops.lock_buffer = lock_buf_fn_ptr;
|
||||
ubwcp_driver_ops.unlock_buffer = unlock_buf_fn_ptr;
|
||||
|
||||
return qcom_ubwcp_heap_create();
|
||||
}
|
||||
EXPORT_SYMBOL(msm_ubwcp_set_ops);
|
||||
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
18
drivers/dma-buf/heaps/qcom_ubwcp_heap.h
Normal file
18
drivers/dma-buf/heaps/qcom_ubwcp_heap.h
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _QCOM_UBWCP_HEAP_H
|
||||
#define _QCOM_UBWCP_HEAP_H
|
||||
|
||||
#ifdef CONFIG_QCOM_DMABUF_HEAPS_UBWCP
|
||||
int qcom_ubwcp_heap_create(void);
|
||||
#else
|
||||
int qcom_ubwcp_heap_create(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _QCOM_UBWCP_HEAP_H */
|
@ -58,4 +58,36 @@ typedef int (*lock_buffer)(struct dma_buf *dma_buf, enum dma_data_direction dire
|
||||
*/
|
||||
typedef int (*unlock_buffer)(struct dma_buf *dmabuf, enum dma_data_direction direction);
|
||||
|
||||
/**
|
||||
* Set the callbacks that will allow the UBWC-P DMA-BUF heap driver
|
||||
* to call back into the UBWC-P driver.
|
||||
*
|
||||
* @param init_buf_fn_ptr : Pointer to init_buffer function
|
||||
* @param free_buf_fn_ptr : Pointer to free_buffer function
|
||||
* @param lock_buf_fn_ptr : Pointer to lock_buffer function
|
||||
* @param unlock_buf_fn_ptr : Pointer to unlock_buffer function
|
||||
*
|
||||
* @return int : 0 on success, otherwise error code
|
||||
*/
|
||||
int msm_ubwcp_set_ops(init_buffer init_buf_fn_ptr,
|
||||
free_buffer free_buf_fn_ptr,
|
||||
lock_buffer lock_buf_fn_ptr,
|
||||
unlock_buffer unlock_buf_fn_ptr);
|
||||
|
||||
/**
|
||||
* Configures whether a DMA-BUF allocated from the UBWC-P heap is in
|
||||
* linear or UBWC-P mode.
|
||||
*
|
||||
* @param dmabuf : ptr to the dma buf
|
||||
* @param linear : controls which mode this buffer will be placed into
|
||||
* @param ula_pa_addr : ULA-PA "physical address" to mmap a buffer to
|
||||
* @param ula_pa_size : size of the ULA-PA buffer mapping to be used
|
||||
* during mmap
|
||||
*
|
||||
* @return int : 0 on success, otherwise error code
|
||||
*/
|
||||
int msm_ubwcp_dma_buf_configure_mmap(struct dma_buf *dmabuf, bool linear,
|
||||
phys_addr_t ula_pa_addr,
|
||||
size_t ula_pa_size);
|
||||
|
||||
#endif /* __UBWCP_DMA_HEAP_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user