Merge "soc: qcom: hgsl: Enable hgsl driver"

This commit is contained in:
qctecmdr 2023-05-08 14:43:10 -07:00 committed by Gerrit - the friendly Code Review server
commit 3070f95743
21 changed files with 10013 additions and 0 deletions

View File

@ -48,6 +48,8 @@ CONFIG_QCOM_DMABUF_HEAPS_CMA=y
CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y
CONFIG_QCOM_HGSL=m
CONFIG_QCOM_HGSL_TCSR_SIGNAL=m
CONFIG_QCOM_IOMMU_DEBUG=m
CONFIG_QCOM_IOMMU_UTIL=m
CONFIG_QCOM_MEM_BUF=m

View File

@ -53,6 +53,7 @@ def define_autogvm():
"drivers/rtc/rtc-pm8xxx.ko",
"drivers/soc/qcom/boot_stats.ko",
"drivers/soc/qcom/hab/msm_hab.ko",
"drivers/soc/qcom/hgsl/qcom_hgsl.ko",
"drivers/soc/qcom/mem_buf/mem_buf.ko",
"drivers/soc/qcom/mem_buf/mem_buf_dev.ko",
"drivers/soc/qcom/mem_buf/mem_buf_msgq.ko",

View File

@ -249,6 +249,9 @@ config QCOM_QMI_HELPERS
source "drivers/soc/qcom/memshare/Kconfig"
source "drivers/soc/qcom/hab/Kconfig"
source "drivers/soc/qcom/hgsl/Kconfig"
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
depends on ARCH_QCOM

View File

@ -65,6 +65,7 @@ crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER) += crypto-qti-hwkm.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o
obj-$(CONFIG_MSM_TMECOM_QMP) += tmecom/
obj-$(CONFIG_MSM_HAB) += hab/
obj-$(CONFIG_QCOM_HGSL) += hgsl/
obj-$(CONFIG_GIC_INTERRUPT_ROUTING) += gic_intr_routing.o
obj-$(CONFIG_QCOM_MINIDUMP) += minidump.o
minidump-y += msm_minidump.o minidump_log.o

View File

@ -0,0 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Hypervisor graphics system layer configuration
#
config QCOM_HGSL
tristate "Graphics driver for Hypervisor"
depends on QTI_QUIN_GVM
depends on MSM_HAB
help
This driver could help commands submmitting functions for
hypervisor Linux. With HFI feature provided by A6x, it
could submit commands directly to hardware without passing
them to host system.
config QCOM_HGSL_TCSR_SIGNAL
tristate "TCSR signal for Hypervisor GSL"
depends on MFD_SYSCON && QCOM_HGSL
help
The TCSR compute signal module provides hgsl driver
in hypervisor Linux a way to send/receive signals
to/from A6x GPU hardware directly, without going
through host system.

View File

@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
qcom_hgsl-objs = hgsl.o \
hgsl_sync.o \
hgsl_memory.o \
hgsl_hyp.o \
hgsl_hyp_socket.o
qcom_hgsl-$(CONFIG_QCOM_HGSL_TCSR_SIGNAL) += hgsl_tcsr.o
obj-$(CONFIG_QCOM_HGSL) += qcom_hgsl.o

3515
drivers/soc/qcom/hgsl/hgsl.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HGSL_H_
#define __HGSL_H_
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/dma-buf.h>
#include <linux/spinlock.h>
#include <linux/sync_file.h>
#include "hgsl_hyp.h"
#include "hgsl_memory.h"
#include "hgsl_tcsr.h"
#define HGSL_TIMELINE_NAME_LEN 64
#define HGSL_ISYNC_32BITS_TIMELINE 0
#define HGSL_ISYNC_64BITS_TIMELINE 1
/* Support upto 3 GVMs: 3 DBQs(Low/Medium/High priority) per GVM */
#define MAX_DB_QUEUE 9
#define HGSL_TCSR_NUM 2
struct qcom_hgsl;
struct hgsl_hsync_timeline;
#pragma pack(push, 4)
struct shadow_ts {
unsigned int sop;
unsigned int unused1;
unsigned int eop;
unsigned int unused2;
unsigned int reserved[6];
};
#pragma pack(pop)
struct reg {
unsigned long paddr;
unsigned long size;
void __iomem *vaddr;
};
struct hw_version {
unsigned int version;
unsigned int release;
};
struct db_buffer {
int32_t dwords;
void *vaddr;
};
struct doorbell_queue {
struct dma_buf *dma;
struct iosys_map map;
void *vbase;
struct db_buffer data;
uint32_t state;
int tcsr_idx;
uint32_t dbq_idx;
struct mutex lock;
atomic_t seq_num;
};
struct qcom_hgsl {
struct device *dev;
/* character device info */
struct cdev cdev;
dev_t device_no;
struct class *driver_class;
struct device *class_dev;
/* registers mapping */
struct reg reg_ver;
struct reg reg_dbidx;
struct doorbell_queue dbq[MAX_DB_QUEUE];
struct hgsl_dbq_info dbq_info[MAX_DB_QUEUE];
/* Could disable db and use isync only */
bool db_off;
/* global doorbell tcsr */
struct hgsl_tcsr *tcsr[HGSL_TCSR_NUM][HGSL_TCSR_ROLE_MAX];
int tcsr_idx;
struct hgsl_context **contexts;
rwlock_t ctxt_lock;
struct list_head active_wait_list;
spinlock_t active_wait_lock;
struct workqueue_struct *wq;
struct work_struct ts_retire_work;
struct hw_version *ver;
struct hgsl_hyp_priv_t global_hyp;
bool global_hyp_inited;
struct mutex mutex;
struct list_head release_list;
struct workqueue_struct *release_wq;
struct work_struct release_work;
struct idr isync_timeline_idr;
spinlock_t isync_timeline_lock;
atomic64_t total_mem_size;
};
/**
* HGSL context define
**/
struct hgsl_context {
struct hgsl_priv *priv;
struct iosys_map map;
uint32_t context_id;
uint32_t devhandle;
uint32_t flags;
struct shadow_ts *shadow_ts;
wait_queue_head_t wait_q;
pid_t pid;
bool dbq_assigned;
uint32_t dbq_info;
struct doorbell_queue *dbq;
struct hgsl_mem_node shadow_ts_node;
uint32_t shadow_ts_flags;
bool in_destroy;
bool destroyed;
struct kref kref;
uint32_t last_ts;
struct hgsl_hsync_timeline *timeline;
uint32_t queued_ts;
bool is_killed;
};
struct hgsl_priv {
struct qcom_hgsl *dev;
pid_t pid;
struct list_head node;
struct hgsl_hyp_priv_t hyp_priv;
struct mutex lock;
struct list_head mem_mapped;
struct list_head mem_allocated;
atomic64_t total_mem_size;
};
static inline bool hgsl_ts32_ge(uint32_t a, uint32_t b)
{
static const uint32_t TIMESTAMP_WINDOW = 0x80000000;
return (a - b) < TIMESTAMP_WINDOW;
}
static inline bool hgsl_ts64_ge(uint64_t a, uint64_t b)
{
static const uint64_t TIMESTAMP_WINDOW = 0x8000000000000000LL;
return (a - b) < TIMESTAMP_WINDOW;
}
static inline bool hgsl_ts_ge(uint64_t a, uint64_t b, bool is64)
{
if (is64)
return hgsl_ts64_ge(a, b);
else
return hgsl_ts32_ge((uint32_t)a, (uint32_t)b);
}
/**
* struct hgsl_hsync_timeline - A sync timeline attached under each hgsl context
* @kref: Refcount to keep the struct alive
* @name: String to describe this timeline
* @fence_context: Used by the fence driver to identify fences belonging to
* this context
* @child_list_head: List head for all fences on this timeline
* @lock: Spinlock to protect this timeline
* @last_ts: Last timestamp when signaling fences
*/
struct hgsl_hsync_timeline {
struct kref kref;
struct hgsl_context *context;
char name[HGSL_TIMELINE_NAME_LEN];
u64 fence_context;
spinlock_t lock;
struct list_head fence_list;
unsigned int last_ts;
};
/**
* struct hgsl_hsync_fence - A struct containing a fence and other data
* associated with it
* @fence: The fence struct
* @sync_file: Pointer to the sync file
* @parent: Pointer to the hgsl sync timeline this fence is on
* @child_list: List of fences on the same timeline
* @context_id: hgsl context id
* @ts: Context timestamp that this fence is associated with
*/
struct hgsl_hsync_fence {
struct dma_fence fence;
struct sync_file *sync_file;
struct hgsl_hsync_timeline *timeline;
struct list_head child_list;
u32 context_id;
unsigned int ts;
};
struct hgsl_isync_timeline {
struct kref kref;
struct list_head free_list;
char name[HGSL_TIMELINE_NAME_LEN];
int id;
struct hgsl_priv *priv;
struct list_head fence_list;
u64 context;
spinlock_t lock;
u64 last_ts;
u32 flags;
bool is64bits;
};
struct hgsl_isync_fence {
struct dma_fence fence;
struct list_head free_list; /* For free in batch */
struct hgsl_isync_timeline *timeline;
struct list_head child_list;
u64 ts;
};
/* Fence for commands. */
struct hgsl_hsync_fence *hgsl_hsync_fence_create(
struct hgsl_context *context,
uint32_t ts);
int hgsl_hsync_fence_create_fd(struct hgsl_context *context,
uint32_t ts);
int hgsl_hsync_timeline_create(struct hgsl_context *context);
void hgsl_hsync_timeline_signal(struct hgsl_hsync_timeline *timeline,
unsigned int ts);
void hgsl_hsync_timeline_put(struct hgsl_hsync_timeline *timeline);
void hgsl_hsync_timeline_fini(struct hgsl_context *context);
/* Fence for process sync. */
int hgsl_isync_timeline_create(struct hgsl_priv *priv,
uint32_t *timeline_id,
uint32_t flags,
uint64_t initial_ts);
int hgsl_isync_timeline_destroy(struct hgsl_priv *priv, uint32_t id);
void hgsl_isync_fini(struct hgsl_priv *priv);
int hgsl_isync_fence_create(struct hgsl_priv *priv, uint32_t timeline_id,
uint32_t ts, bool ts_is_valid, int *fence_fd);
int hgsl_isync_fence_signal(struct hgsl_priv *priv, uint32_t timeline_id,
int fence_fd);
int hgsl_isync_forward(struct hgsl_priv *priv, uint32_t timeline_id,
uint64_t ts, bool check_owner);
int hgsl_isync_query(struct hgsl_priv *priv, uint32_t timeline_id,
uint64_t *ts);
int hgsl_isync_wait_multiple(struct hgsl_priv *priv, struct hgsl_timeline_wait *param);
#endif /* __HGSL_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,518 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef GSL_HYP_INCLUDED
#define GSL_HYP_INCLUDED
#include "hgsl_utils.h"
#include "hgsl_memory.h"
/*
* 2-stage connection diagramm
*
* GFX BE GFX FE
* | |
* | connect(main connection ID) |
* | <----------------------------------- |
* | |
* | handshake(remote name, pid) |
* | <----------------------------------- |
* | |
* | reply(result, client connection ID) |
* | -----------------------------------> |
* | |
* | close connection on main ID |
* | <----------------------------------- |
* | |
* | |
* | connect(client connection ID) |
* | <----------------------------------- |
* | |
* | sub handshake(client data) |
* | <----------------------------------- |
* | |
* | reply(result) |
* | -----------------------------------> |
* | |
* | |
* | RPC function call(params data) |
* | <----------------------------------- |
* | |
* | reply(result, return call data) |
* | -----------------------------------> |
* | |
* | ... |
*/
/*
* protocol data format
*
* RPC function call
*
* <call magic><function ID><version><size><N: number of arguments>
* <argument1>...<argument N><checksum>
*
*
* RPC function argument
*
* <argument magic><argument ID><version><size><data><checksum>
*
*/
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <uapi/linux/hgsl.h>
#include "hgsl_types.h"
#include "hgsl_hyp_socket.h"
#define OFFSET_OF(type, member) ((int) &((type *)0)->member)
#define RPC_CLIENT_NAME_SIZE (64)
/* RPC opcodes */
/* WARNING: when inserting new opcode, please insert it to the end before RPC_FUNC_LAST */
/* Inserting the new opcode in the middle of the list will break the protocol ! */
enum gsl_rpc_func_t {
RPC_LIBRARY_OPEN = 0,
RPC_LIBRARY_CLOSE,
RPC_LIBRARY_VERSION,
RPC_LIBRARY_SET_MEMNOTIFY_TYPE,
RPC_DEVICE_OPEN,
RPC_DEVICE_CLOSE,
RPC_DEVICE_GETINFO,
RPC_DEVICE_GETINFO_EXT,
RPC_DEVICE_SETPOWERSTATE,
RPC_DEVICE_WAITIRQ,
RPC_DEVICE_GETIRQCNTRBASE,
RPC_DEVICE_DUMPSTATE,
RPC_COMMAND_ISSUEIB,
RPC_COMMAND_INSERTFENCE,
RPC_COMMAND_READTIMESTAMP,
RPC_COMMAND_ISSUEIB_SYNC,
RPC_COMMAND_ISSUEIB_WITH_ALLOC_LIST,
RPC_COMMAND_CHECKTIMESTAMP,
RPC_COMMAND_WAITTIMESTAMP,
RPC_COMMAND_FREEMEMONTIMESTAMP,
RPC_COMMAND_RESETSTATUS_INTERNAL,
RPC_CONTEXT_CREATE,
RPC_CONTEXT_DESTROY,
RPC_CONTEXT_BINDGMEMSHADOW,
RPC_CONTEXT_SETBINBASEOFFSET,
RPC_MEMORY_READ,
RPC_MEMORY_WRITE,
RPC_MEMORY_COPY,
RPC_MEMORY_SET,
RPC_MEMORY_QUERYSTATS,
RPC_MEMORY_ALLOC_PURE,
RPC_MEMORY_PHYS_ALLOC_PURE,
RPC_MEMORY_VIRT_ALLOC_PURE,
RPC_MEMORY_FREE_PURE,
RPC_MEMORY_CACHEOPERATION,
RPC_MEMORY_NOTIFY,
RPC_MEMORY_BIND,
RPC_MEMORY_BIND_SYNC,
RPC_MEMORY_MMAP,
RPC_MEMORY_MUNMAP,
RPC_MEMORY_CREATE_PAGETABLE,
RPC_MEMORY_DESTROY_PAGETABLE,
RPC_MEMORY_SET_PAGETABLE,
RPC_COMMAND_FREEMEMONTIMESTAMP_PURE,
RPC_PERFCOUNTER_SELECT,
RPC_PERFCOUNTER_DESELECT,
RPC_PERFCOUNTER_QUERYSELECTIONS,
RPC_PERFCOUNTER_READ,
RPC_SYNCOBJ_CREATE,
RPC_SYNCOBJ_CREATE_FROM_BIND,
RPC_SYNCOBJ_DESTROY,
RPC_SYNCOBJ_WAIT,
RPC_TIMESTAMP_CMP,
RPC_SYNCOBJ_CLONE,
RPC_SYNCOBJ_MERGE,
RPC_SYNCOBJ_MERGE_MULTIPLE,
RPC_SYNCSOURCE_CREATE,
RPC_SYNCSOURCE_DESTROY,
RPC_SYNCOBJ_CREATE_FROM_SOURCE,
RPC_SYNCOBJ_SIGNAL,
RPC_SYNCOBJ_WAIT_MULTIPLE,
RPC_DEVICE_DEBUG,
RPC_CFFDUMP_WAITIRQ,
RPC_CFFDUMP_WRITEVERIFYFILE,
RPC_MEMORY_MAP_EXT_FD_PURE, /* Linux extension */
RPC_MEMORY_UNMAP_EXT_FD_PURE, /* Linux extension */
RPC_GET_SHADOWMEM,
RPC_PUT_SHADOWMEM,
RPC_BLIT,
RPC_HANDSHAKE,
RPC_SUB_HANDSHAKE,
RPC_DISCONNECT,
RPC_MEMORY_SET_METAINFO,
RPC_GET_SYSTEM_TIME,
RPC_GET_DBQ_INFO,
RPC_DBQ_CREATE,
RPC_PERFCOUNTERS_READ,
RPC_NOTIFY_CLEANUP,
RPC_COMMAND_RESETSTATUS,
RPC_FUNC_LAST /* insert new func BEFORE this line! */
};
struct hgsl_hyp_priv_t {
struct device *dev;
struct mutex lock;
struct list_head free_channels;
struct list_head busy_channels;
int conn_id;
unsigned char client_name[RPC_CLIENT_NAME_SIZE];
int client_pid;
struct idr channel_idr;
};
/* backend i.e. server type: depends on server's underlying platform */
enum gsl_rpc_server_type_t {
GSL_RPC_SERVER_TYPE_1 = 1,
GSL_RPC_SERVER_TYPE_2,
GSL_RPC_SERVER_TYPE_3,
GSL_RPC_SERVER_TYPE_LAST
};
/* frontend i.e. client type: depends on client's underlying platform */
enum gsl_rpc_client_type_t {
GSL_RPC_CLIENT_TYPE_1 = 1,
GSL_RPC_CLIENT_TYPE_2,
GSL_RPC_CLIENT_TYPE_3,
GSL_RPC_CLIENT_TYPE_LAST
};
/* backend i.e. server mode in regards to the way of handling new client */
enum gsl_rpc_server_mode_t {
GSL_RPC_SERVER_MODE_1 = 1,
GSL_RPC_SERVER_MODE_2,
GSL_RPC_SERVER_MODE_3,
GSL_RPC_SERVER_MODE_LAST
};
#pragma pack(push, 4)
struct handshake_params_t {
uint32_t size;
uint32_t client_type;
uint32_t client_version;
uint32_t pid;
char name[RPC_CLIENT_NAME_SIZE];
};
struct sub_handshake_params_t {
uint32_t size;
uint32_t pid;
uint32_t memdesc_size;
};
struct library_open_params_t {
uint32_t size;
uint32_t flags;
};
struct context_create_params_t {
uint32_t size;
uint32_t devhandle;
enum gsl_context_type_t type;
uint32_t flags;
};
struct context_destroy_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
};
struct get_shadowmem_params_v1_t {
uint32_t size;
enum gsl_deviceid_t device_id;
uint32_t ctxthandle;
};
struct put_shadowmem_params_t {
uint32_t size;
uint32_t export_id;
};
struct shadowprop_t {
uint32_t size;
uint64_t sizebytes;
uint32_t flags;
};
struct memory_alloc_params_t {
uint32_t size;
uint32_t sizebytes;
uint32_t flags;
};
struct memory_free_params_t {
uint32_t size;
uint32_t flags;
};
struct memory_map_ext_fd_params_t {
uint32_t size;
int fd;
uint64_t hostptr;
uint64_t len;
uint64_t offset;
uint32_t memtype;
uint32_t flags;
};
struct memory_unmap_ext_fd_params {
uint32_t size;
uint64_t gpuaddr;
uint64_t hostptr;
uint64_t len;
uint32_t memtype;
};
struct hyp_ibdesc_t {
uint32_t size;
uint64_t gpuaddr;
uint64_t server_priv_memdesc;
uint32_t sizedwords;
};
struct command_issueib_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
uint32_t numibs;
uint32_t timestamp;
uint32_t flags;
};
struct command_issueib_with_alloc_list_params {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
uint32_t numibs;
uint32_t numallocations;
uint32_t timestamp;
uint32_t flags;
uint64_t syncobj;
};
struct memory_set_metainfo_params_t {
uint64_t memdesc_priv;
uint32_t flags;
char metainfo[128];
uint64_t metainfo_len;
};
struct command_waittimestamp_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
uint32_t timestamp;
uint32_t timeout;
};
struct command_readtimestamp_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
enum gsl_timestamp_type_t type;
};
struct command_checktimestamp_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
uint32_t timestamp;
enum gsl_timestamp_type_t type;
};
struct get_system_time_params_t {
uint32_t size;
enum gsl_systemtime_usage_t usage;
};
struct get_dbq_info_params_t {
uint32_t size;
uint64_t gpuaddr;
uint64_t priv_memdesc;
uint32_t sizedwords;
int q_idx;
};
struct dbq_create_params_t {
uint32_t size;
uint32_t ctxthandle;
};
struct syncobj_wait_multiple_params_t {
uint32_t size;
uint64_t num_syncobjs;
uint32_t timeout_ms;
};
struct perfcounter_select_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
int num_counters;
};
struct perfcounter_deselect_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
uint32_t timestamp;
int num_counters;
};
struct perfcounter_query_selections_params_t {
uint32_t size;
uint32_t devhandle;
uint32_t ctxthandle;
enum gsl_perfcountergroupid_t group;
int num_counters;
};
struct perfcounter_read_params_t {
uint32_t size;
uint32_t devhandle;
enum gsl_perfcountergroupid_t group;
uint32_t counter;
};
struct notify_cleanup_params_t {
uint32_t size;
uint32_t timeout;
};
#pragma pack(pop)
struct hgsl_hab_channel_t {
struct list_head node;
int socket;
int id;
bool wait_retry;
bool busy;
struct hgsl_hyp_priv_t *priv;
struct gsl_hab_payload send_buf;
struct gsl_hab_payload recv_buf;
};
struct hgsl_dbq_info {
uint32_t export_id;
uint32_t size;
uint32_t head_dwords;
int32_t head_off_dwords;
uint32_t queue_dwords;
int32_t queue_off_dwords;
uint32_t db_signal;
struct dma_buf *dma_buf;
struct hgsl_hab_channel_t *hab_channel;
};
int hgsl_hyp_init(struct hgsl_hyp_priv_t *priv, struct device *dev,
int client_pid, const char * const client_name);
void hgsl_hyp_close(struct hgsl_hyp_priv_t *priv);
int hgsl_hyp_channel_pool_get(
struct hgsl_hyp_priv_t *priv, int id, struct hgsl_hab_channel_t **channel);
void hgsl_hyp_channel_pool_put(struct hgsl_hab_channel_t *hab_channel);
int hgsl_hyp_generic_transaction(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_hyp_generic_transaction_params *params,
void **pSend, void **pReply, void *pRval);
int hgsl_hyp_gsl_lib_open(struct hgsl_hyp_priv_t *priv,
uint32_t flags, int32_t *rval);
int hgsl_hyp_ctxt_create(struct hgsl_hab_channel_t *hab_channel,
struct hgsl_ioctl_ctxt_create_params *hgsl_params);
int hgsl_hyp_dbq_create(struct hgsl_hab_channel_t *hab_channel,
uint32_t ctxthandle, uint32_t *dbq_info);
int hgsl_hyp_ctxt_destroy(struct hgsl_hab_channel_t *hab_channel,
uint32_t devhandle, uint32_t context_id, uint32_t *rval);
int hgsl_hyp_get_shadowts_mem(struct hgsl_hab_channel_t *hab_channel,
uint32_t context_id, uint32_t *shadow_ts_flags,
struct hgsl_mem_node *mem_node);
int hgsl_hyp_put_shadowts_mem(struct hgsl_hab_channel_t *hab_channel,
struct hgsl_mem_node *mem_node);
int hgsl_hyp_mem_alloc(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_mem_alloc_params *hgsl_params,
struct hgsl_mem_node *mem_node);
int hgsl_hyp_mem_free(struct hgsl_hyp_priv_t *priv,
struct hgsl_mem_node *mem_node);
int hgsl_hyp_mem_map_smmu(struct hgsl_hab_channel_t *hab_channel,
struct hgsl_ioctl_mem_map_smmu_params *hgsl_params,
struct hgsl_mem_node *mem_node);
int hgsl_hyp_mem_unmap_smmu(struct hgsl_hab_channel_t *hab_channel,
struct hgsl_mem_node *mem_node);
int hgsl_hyp_set_metainfo(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_set_metainfo_params *hgsl_param,
const char *metainfo);
int hgsl_hyp_issueib(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_issueib_params *hgsl_param,
struct hgsl_ibdesc *ib);
int hgsl_hyp_issueib_with_alloc_list(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_issueib_with_alloc_list_params *hgsl_param,
struct gsl_command_buffer_object_t *ib,
struct gsl_memory_object_t *allocations,
struct gsl_memdesc_t *be_descs,
uint64_t *be_offsets);
int hgsl_hyp_wait_timestamp(struct hgsl_hyp_priv_t *priv,
struct hgsl_wait_ts_info *hgsl_param);
int hgsl_hyp_check_timestamp(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_check_ts_params *hgsl_param);
int hgsl_hyp_read_timestamp(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_read_ts_params *hgsl_param);
int hgsl_hyp_get_system_time(struct hgsl_hyp_priv_t *priv,
uint64_t *hgsl_param);
int hgsl_hyp_syncobj_wait_multiple(struct hgsl_hyp_priv_t *priv,
uint64_t *rpc_syncobj, uint64_t num_syncobjs,
uint32_t timeout_ms, int32_t *status, int32_t *result);
int hgsl_hyp_perfcounter_select(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_perfcounter_select_params *hgsl_param,
uint32_t *groups, uint32_t *counter_ids,
uint32_t *counter_val_regs, uint32_t *counter_val_hi_regs);
int hgsl_hyp_perfcounter_deselect(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_perfcounter_deselect_params *hgsl_param,
uint32_t *groups, uint32_t *counter_ids);
int hgsl_hyp_perfcounter_query_selections(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_perfcounter_query_selections_params *hgsl_param,
int32_t *selections);
int hgsl_hyp_perfcounter_read(struct hgsl_hyp_priv_t *priv,
struct hgsl_ioctl_perfcounter_read_params *hgsl_param);
int hgsl_hyp_get_dbq_info(struct hgsl_hyp_priv_t *priv, uint32_t dbq_idx,
struct hgsl_dbq_info *dbq_info);
int hgsl_hyp_notify_cleanup(struct hgsl_hab_channel_t *hab_channel, uint32_t timeout);
#endif

View File

@ -0,0 +1,432 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2018, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "linux/habmm.h"
#include "hgsl_hyp.h"
#include "hgsl_hyp_socket.h"
#include "hgsl_utils.h"
#define HAB_OPEN_WAIT_TIMEOUT_MS (3000)
#define HGSL_DUMP_PAYLOAD_STR_SIZE ((HGSL_MAX_DUMP_PAYLOAD_SIZE * (2 * sizeof(uint32_t) + 3)) + 1)
int gsl_hab_open(int *habfd)
{
int ret = 0;
ret = habmm_socket_open(habfd
, HAB_MMID_CREATE(MM_GFX, (int)*habfd)
, HAB_OPEN_WAIT_TIMEOUT_MS
, HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
LOGD("habmm_socket_open returned with %d, %x", ret, *habfd);
return ret;
}
int gsl_hab_recv(int habfd, unsigned char *p, size_t sz, int interruptible)
{
int ret = 0;
uint32_t size_bytes = 0;
uint32_t flags = 0;
if (!interruptible)
flags |= HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE;
size_bytes = (uint32_t)sz;
ret = habmm_socket_recv(habfd, p, &size_bytes, 0, flags);
if (ret && (ret != -EINTR))
LOGE("habmm_socket_recv failed, %d, socket %x. size_bytes %u, expects %u",
ret, habfd, size_bytes, sz);
return ret;
}
int gsl_hab_send(int habfd, unsigned char *p, size_t sz)
{
return habmm_socket_send(habfd, p, sz, 0);
}
int gsl_hab_close(int habfd)
{
int ret = habmm_socket_close(habfd);
if (ret)
LOGE("Unable to habmm_socket_close, ret %d", ret);
return ret;
}
enum _gsl_rpc_payload_type_t {
GSL_RPC_BLOB_DATA = 0,
GSL_RPC_32BIT_DATA,
GSL_RPC_64BIT_DATA
};
struct __packed gsl_rpc_header_t {
uint32_t magic;
uint32_t id;
uint32_t version;
uint32_t size;
uint8_t data;
};
struct __packed gsl_rpc_footer_t {
uint32_t checksum;
};
/* the actual header size is one byte less because
* of the data pointer in the gsl_rpc_header_t
* which is there for the ease of access to the data
* but which is actually not being sent
*/
static const uint32_t gsl_rpc_header_size
= sizeof(struct gsl_rpc_header_t) - 1;
uint32_t gsl_rpc_parcel_get_data_offset(void)
{
return gsl_rpc_header_size;
}
static inline uint32_t gsl_rpc_gen_checksum(void *p_data, size_t size)
{
/*TODO: Implement checksum generation here*/
OS_UNUSED(p_data);
OS_UNUSED(size);
return (uint32_t)-1;
}
static inline int gsl_rpc_get_arg_ptr(struct gsl_hab_payload *p,
uint32_t id, void **p_data, size_t size)
{
int ret = -EINVAL;
if ((p->data_pos + size + gsl_rpc_header_size) <= p->data_size) {
struct gsl_rpc_header_t *hdr
= (struct gsl_rpc_header_t *)(p->data + p->data_pos);
if ((hdr->magic == GSL_HAB_DATA_MAGIC) &&
(hdr->id == id) && (hdr->size == size)) {
struct gsl_rpc_footer_t *footer = NULL;
uint32_t checksum;
checksum = gsl_rpc_gen_checksum(&hdr->data, hdr->size);
*p_data = (void *)&hdr->data;
p->data_pos += size + gsl_rpc_header_size;
footer = (struct gsl_rpc_footer_t *)
(p->data + p->data_pos);
p->data_pos += sizeof(struct gsl_rpc_footer_t);
if (checksum == footer->checksum)
ret = 0;
else
LOGE("checksum mismatch %d != %d",
checksum, footer->checksum);
} else {
struct gsl_rpc_header_t *call_hdr
= (struct gsl_rpc_header_t *)p->data;
size_t dump_size
= call_hdr->size + gsl_rpc_header_size
+ sizeof(struct gsl_rpc_footer_t);
dump_size = (dump_size <= p->data_size) ?
dump_size : p->data_size;
LOGE("@%d: argument type or size mismatch: call id %d",
p->data_pos, call_hdr->id);
LOGE("size %d magic 0x%X/0x%X, id %d/%d, size %d/%d",
call_hdr->size, hdr->magic, GSL_HAB_DATA_MAGIC,
hdr->id, id, hdr->size, size);
gsl_hab_payload_dump(p, dump_size);
}
}
return ret;
}
#define GSL_RPC_READ_ARG(p, id, p_arg, type) \
({ \
void *p_arg_data = NULL; \
int ret = gsl_rpc_get_arg_ptr(p, id, &p_arg_data, sizeof(type)); \
if (ret == 0) { \
*p_arg = *((type *)p_arg_data); \
} \
\
ret; \
})
#define GSL_RPC_WRITE_DATA(p, type, data_ptr, len, action) \
({ \
int status = 0; \
\
if ((p->data_pos + gsl_rpc_header_size + len +\
sizeof(struct gsl_rpc_footer_t) > p->data_size)) { \
status = grow_data(p, len); \
} \
\
if (status == 0) { \
struct gsl_rpc_header_t *hdr = (struct gsl_rpc_header_t *) \
(p->data + p->data_pos); \
struct gsl_rpc_footer_t *ftr = (struct gsl_rpc_footer_t *) \
(p->data + p->data_pos + gsl_rpc_header_size + len); \
void *data_ptr = (void *)&hdr->data; \
uint32_t checksum = 0; \
\
action; \
checksum = gsl_rpc_gen_checksum(data_ptr, len); \
hdr->magic = GSL_HAB_DATA_MAGIC; \
hdr->id = type; \
hdr->version = 2; \
hdr->size = len; \
ftr->checksum = checksum; \
p->data_pos += len + gsl_rpc_header_size \
+ sizeof(struct gsl_rpc_footer_t); \
} \
\
status; \
})
#define GSL_RPC_WRITE_ARG(p, id, type, val) \
GSL_RPC_WRITE_DATA(p, id, p_data, sizeof(type), *((type *)p_data) = val)
int gsl_rpc_parcel_init(struct gsl_hab_payload *p)
{
size_t size = 4096;
if (p == NULL)
return -EINVAL;
p->data = hgsl_zalloc(size);
if (p->data == NULL) {
LOGE("No memory allocated\n");
return -ENOMEM;
}
p->version = 1;
p->data_size = size;
p->data_pos = gsl_rpc_parcel_get_data_offset();
memset(p->data, 0, size);
return 0;
}
void gsl_rpc_parcel_free(struct gsl_hab_payload *p)
{
if (p == NULL || p->data == NULL)
return;
hgsl_free(p->data);
p->data = NULL;
}
int gsl_rpc_parcel_reset(struct gsl_hab_payload *p)
{
int ret = -EINVAL;
if (p == NULL || p->data == NULL) {
LOGE("parcel isn't inited\n");
} else {
p->data_pos = gsl_rpc_parcel_get_data_offset();
memset(p->data, 0, p->data_size);
ret = 0;
}
return ret;
}
uint32_t gsl_rpc_parcel_get_version(struct gsl_hab_payload *p)
{
return p->version;
}
int gsl_rpc_read(struct gsl_hab_payload *p, void *outData, size_t len)
{
void *p_arg_data = NULL;
int ret = gsl_rpc_get_arg_ptr(p, GSL_RPC_BLOB_DATA, &p_arg_data, len);
if ((ret == 0) && outData && len)
memcpy(outData, p_arg_data, len);
return ret;
}
int gsl_rpc_read_l(struct gsl_hab_payload *p, void **pOutData, size_t len)
{
return gsl_rpc_get_arg_ptr(p, GSL_RPC_BLOB_DATA, pOutData, len);
}
int gsl_rpc_read_int32_l(struct gsl_hab_payload *p, int32_t *pArg)
{
return GSL_RPC_READ_ARG(p, GSL_RPC_32BIT_DATA, pArg, int32_t);
}
int gsl_rpc_read_uint32_l(struct gsl_hab_payload *p, uint32_t *pArg)
{
return GSL_RPC_READ_ARG(p, GSL_RPC_32BIT_DATA, pArg, uint32_t);
}
int gsl_rpc_read_int64_l(struct gsl_hab_payload *p, int64_t *pArg)
{
return GSL_RPC_READ_ARG(p, GSL_RPC_64BIT_DATA, pArg, int64_t);
}
int gsl_rpc_read_uint64_l(struct gsl_hab_payload *p, uint64_t *pArg)
{
return GSL_RPC_READ_ARG(p, GSL_RPC_64BIT_DATA, pArg, uint64_t);
}
int gsl_rpc_finalize(struct gsl_hab_payload *p)
{
int ret = 0;
if ((p->data_pos + sizeof(struct gsl_rpc_footer_t)) > p->data_size)
ret = grow_data(p, sizeof(struct gsl_rpc_footer_t));
if (!ret) {
struct gsl_rpc_header_t *hdr
= (struct gsl_rpc_header_t *)p->data;
struct gsl_rpc_footer_t *ftr
= (struct gsl_rpc_footer_t *)(p->data + p->data_pos);
uint32_t data_size = p->data_pos - gsl_rpc_header_size;
uint32_t checksum = gsl_rpc_gen_checksum(&hdr->data, data_size);
hdr->size = data_size;
ftr->checksum = checksum;
p->data_pos += sizeof(struct gsl_rpc_footer_t);
}
return ret;
}
int gsl_rpc_set_call_params(struct gsl_hab_payload *p,
uint32_t opcode, uint32_t version)
{
struct gsl_rpc_header_t *hdr = (struct gsl_rpc_header_t *)p->data;
hdr->magic = GSL_HAB_CALL_MAGIC;
hdr->id = opcode;
hdr->version = version;
return 0;
}
int gsl_rpc_get_call_params(struct gsl_hab_payload *p,
uint32_t *opcode, uint32_t *version)
{
int ret = -EINVAL;
struct gsl_rpc_header_t *hdr = (struct gsl_rpc_header_t *)(p->data);
if (opcode) {
*opcode = hdr->id;
ret = 0;
}
if (version) {
*version = hdr->version;
ret = 0;
}
return ret;
}
int gsl_rpc_get_data_params(struct gsl_hab_payload *p,
void **p_data, uint32_t *size, uint32_t *max_size)
{
int ret = -EINVAL;
if (p_data) {
*p_data = p->data;
ret = 0;
}
if (size) {
*size = p->data_pos;
ret = 0;
}
if (max_size) {
*max_size = p->data_size;
ret = 0;
}
return ret;
}
int gsl_rpc_write(struct gsl_hab_payload *p, const void *data, size_t len)
{
return GSL_RPC_WRITE_DATA(p, GSL_RPC_BLOB_DATA, p_data, len,
do {if (data && len) memcpy(p_data, data, len); } while (0));
}
int gsl_rpc_write_int32(struct gsl_hab_payload *p, int32_t val)
{
return GSL_RPC_WRITE_ARG(p, GSL_RPC_32BIT_DATA, int32_t, val);
}
int gsl_rpc_write_uint32(struct gsl_hab_payload *p, uint32_t val)
{
return GSL_RPC_WRITE_ARG(p, GSL_RPC_32BIT_DATA, uint32_t, val);
}
int gsl_rpc_write_int64(struct gsl_hab_payload *p, int64_t val)
{
return GSL_RPC_WRITE_ARG(p, GSL_RPC_64BIT_DATA, int64_t, val);
}
int gsl_rpc_write_uint64(struct gsl_hab_payload *p, uint64_t val)
{
return GSL_RPC_WRITE_ARG(p, GSL_RPC_64BIT_DATA, uint64_t, val);
}
int grow_data(struct gsl_hab_payload *p, size_t len)
{
if (p->data != NULL && p->data_size != 0) {
size_t newSize = ((p->data_size + len) * 3) / 2;
void *newData = hgsl_malloc(newSize);
if (newData == NULL) {
LOGE("No memory allocated\n");
return -ENOMEM;
}
memcpy(newData, p->data, p->data_size);
hgsl_free(p->data);
p->data = newData;
p->data_size = newSize;
} else {
size_t newSize = ((4096 + len) * 3) / 2;
p->data = hgsl_malloc(newSize);
if (p->data == NULL) {
LOGE("No memory allocated\n");
return -ENOMEM;
}
}
return 0;
}
void gsl_hab_payload_dump(struct gsl_hab_payload *p, size_t size_bytes)
{
char str[HGSL_DUMP_PAYLOAD_STR_SIZE];
size_t size_dwords = size_bytes / sizeof(uint32_t);
unsigned int i;
char *p_str = str;
int size_left = sizeof(str);
uint32_t *p_data = (uint32_t *)p->data;
if (size_dwords > HGSL_MAX_DUMP_PAYLOAD_SIZE)
size_dwords = HGSL_MAX_DUMP_PAYLOAD_SIZE;
LOGI("dumping first %d dwords:", size_dwords);
for (i = 0; i < size_dwords; i++) {
int c = snprintf(p_str, size_left, "0x%08X ", p_data[i]);
if ((c < 0) || (c >= size_left))
break;
p_str += c;
size_left -= c;
}
LOGI("%s", str);
}

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2010-2013, 2015-2018, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef GSL_HYP_SOCKET_H
#define GSL_HYP_SOCKET_H
#include <linux/types.h>
#define GSL_HAB_CALL_MAGIC 0xfeedca11
#define GSL_HAB_DATA_MAGIC 0xfeedda7a
#define HAB_INVALID_HANDLE ((int)-1)
#define HGSL_MAX_DUMP_PAYLOAD_SIZE (32)
int gsl_hab_open(int *habfd);
int gsl_hab_recv(int habfd, unsigned char *p, size_t sz, int interruptible);
int gsl_hab_send(int habfd, unsigned char *p, size_t sz);
int gsl_hab_close(int habfd);
struct gsl_hab_payload {
uint32_t version;
size_t data_size;
size_t data_pos;
size_t send_size;
uint8_t *data;
};
int gsl_rpc_read(struct gsl_hab_payload *p, void *outData, size_t len);
int gsl_rpc_read_l(struct gsl_hab_payload *p, void **pOutData, size_t len);
int gsl_rpc_read_int32_l(struct gsl_hab_payload *p, int32_t *pArg);
int gsl_rpc_read_uint32_l(struct gsl_hab_payload *p, uint32_t *pArg);
int gsl_rpc_read_int64_l(struct gsl_hab_payload *p, int64_t *pArg);
int gsl_rpc_read_uint64_l(struct gsl_hab_payload *p, uint64_t *pArg);
int gsl_rpc_write(struct gsl_hab_payload *p, const void *data, size_t len);
int gsl_rpc_write_int32(struct gsl_hab_payload *p, int32_t val);
int gsl_rpc_write_uint32(struct gsl_hab_payload *p, uint32_t val);
int gsl_rpc_write_int64(struct gsl_hab_payload *p, int64_t val);
int gsl_rpc_write_uint64(struct gsl_hab_payload *p, uint64_t val);
int gsl_rpc_parcel_init(struct gsl_hab_payload *p);
void gsl_rpc_parcel_free(struct gsl_hab_payload *p);
int gsl_rpc_parcel_reset(struct gsl_hab_payload *p);
int gsl_rpc_parcel_rest_ext(struct gsl_hab_payload *p, uint32_t version);
int gsl_rpc_set_call_params(struct gsl_hab_payload *p,
uint32_t opcode, uint32_t version);
int gsl_rpc_finalize(struct gsl_hab_payload *p);
int gsl_rpc_get_call_params(struct gsl_hab_payload *p,
uint32_t *opcode, uint32_t *version);
int gsl_rpc_get_data_params(struct gsl_hab_payload *p,
void **p_data, uint32_t *size, uint32_t *max_size);
void gsl_hab_payload_dump(struct gsl_hab_payload *p, size_t size);
int grow_data(struct gsl_hab_payload *p, size_t len);
uint32_t gsl_rpc_parcel_get_version(struct gsl_hab_payload *p);
uint32_t gsl_rpc_parcel_get_data_offset(void);
#endif

View File

@ -0,0 +1,617 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hgsl_memory.h"
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/fs.h>
#include <soc/qcom/secure_buffer.h>
#ifndef pgprot_writebackcache
#define pgprot_writebackcache(_prot) (_prot)
#endif
#ifndef pgprot_writethroughcache
#define pgprot_writethroughcache(_prot) (_prot)
#endif
static DEFINE_MUTEX(hgsl_map_global_lock);
static struct sg_table *hgsl_get_sgt_internal(struct hgsl_mem_node *mem_node)
{
struct sg_table *sgt;
int ret = 0;
if (!mem_node) {
sgt = ERR_PTR(-EINVAL);
goto out;
}
mutex_lock(&hgsl_map_global_lock);
if (!IS_ERR_OR_NULL(mem_node->sgt)) {
sgt = mem_node->sgt;
mem_node->sgt_refcount++;
} else {
sgt = hgsl_zalloc(sizeof(struct sg_table));
if (!sgt) {
sgt = ERR_PTR(-ENOMEM);
} else {
ret = sg_alloc_table_from_pages(sgt, mem_node->pages,
mem_node->page_count, 0,
mem_node->page_count << PAGE_SHIFT, GFP_KERNEL);
if (ret) {
hgsl_free(sgt);
sgt = ERR_PTR(ret);
} else {
mem_node->sgt = sgt;
mem_node->sgt_refcount = 1;
if (mem_node->dma_buf)
get_dma_buf(mem_node->dma_buf);
}
}
}
mutex_unlock(&hgsl_map_global_lock);
out:
return sgt;
}
static void hgsl_put_sgt_internal(struct hgsl_mem_node *mem_node)
{
if (!mem_node)
return;
mutex_lock(&hgsl_map_global_lock);
if (!mem_node->sgt_refcount || !mem_node->sgt)
goto out;
if (--mem_node->sgt_refcount == 0) {
sg_free_table(mem_node->sgt);
hgsl_free(mem_node->sgt);
mem_node->sgt = NULL;
if (mem_node->dma_buf)
dma_buf_put(mem_node->dma_buf);
}
out:
mutex_unlock(&hgsl_map_global_lock);
}
static struct sg_table *hgsl_mem_map_dma_buf(
struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct hgsl_mem_node *mem_node = dmabuf->priv;
return hgsl_get_sgt_internal(mem_node);
}
static void hgsl_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct hgsl_mem_node *mem_node = dmabuf->priv;
if (!mem_node)
return;
if (sgt != mem_node->sgt) {
LOGE("invalid sgt");
return;
}
hgsl_put_sgt_internal(mem_node);
}
static int hgsl_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct hgsl_mem_node *mem_node = dmabuf->priv;
unsigned long page_count;
unsigned long addr;
uint32_t i;
uint32_t cache_mode;
int ret;
if ((vma == NULL) ||
(mem_node->flags & GSL_MEMFLAGS_PROTECTED))
return -EINVAL;
page_count = vma_pages(vma);
addr = vma->vm_start;
/* Check for valid size. */
if ((mem_node->page_count < vma->vm_pgoff) ||
(mem_node->page_count < page_count + vma->vm_pgoff))
return -EINVAL;
vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY;
vma->vm_private_data = mem_node;
cache_mode = mem_node->flags & GSL_MEMFLAGS_CACHEMODE_MASK;
switch (cache_mode) {
case GSL_MEMFLAGS_WRITECOMBINE:
case GSL_MEMFLAGS_UNCACHED:
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
break;
case GSL_MEMFLAGS_WRITETHROUGH:
vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
break;
case GSL_MEMFLAGS_WRITEBACK:
vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
break;
default:
LOGE("invalid cache mode");
return -EINVAL;
}
for (i = 0; i < page_count; i++) {
struct page *page = mem_node->pages[i + vma->vm_pgoff];
ret = vm_insert_page(vma, addr, page);
if (ret)
return ret;
addr += PAGE_SIZE;
}
return 0;
}
static void hgsl_free_pages(struct hgsl_mem_node *mem_node)
{
uint32_t pcount = mem_node->page_count;
unsigned int page_order;
uint32_t i;
for (i = 0; i < pcount;) {
struct page *p = mem_node->pages[i];
page_order = compound_order(p);
__free_pages(p, page_order);
i += 1 << page_order;
}
mem_node->page_count = 0;
}
static int hgsl_lock_pages(struct hgsl_mem_node *mem_node)
{
struct sg_table *sgt = hgsl_get_sgt_internal(mem_node);
struct scatterlist *sg;
int src_vmid = VMID_HLOS;
int dest_vmid = VMID_CP_PIXEL;
int dest_perms = PERM_READ | PERM_WRITE;
int ret;
int i;
if (IS_ERR(sgt))
return PTR_ERR(sgt);
ret = hyp_assign_table(sgt, &src_vmid, 1, &dest_vmid, &dest_perms, 1);
if (ret) {
LOGE("Failed to assign sgt %d", ret);
hgsl_put_sgt_internal(mem_node);
return ret;
}
for_each_sg(sgt->sgl, sg, sgt->nents, i)
SetPagePrivate(sg_page(sg));
return 0;
}
static int hgsl_unlock_pages(struct hgsl_mem_node *mem_node)
{
struct sg_table *sgt = mem_node->sgt;
struct scatterlist *sg;
int src_vmid = VMID_CP_PIXEL;
int dest_vmid = VMID_HLOS;
int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
int ret;
int i;
if (!sgt)
return -EINVAL;
ret = hyp_assign_table(sgt, &src_vmid, 1, &dest_vmid, &dest_perms, 1);
if (ret)
goto out;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
ClearPagePrivate(sg_page(sg));
out:
mem_node->dma_buf = NULL;
hgsl_put_sgt_internal(mem_node);
return ret;
}
static void hgsl_mem_free_actual(struct hgsl_mem_node *mem_node)
{
if (mem_node->flags & GSL_MEMFLAGS_PROTECTED)
hgsl_unlock_pages(mem_node);
hgsl_free_pages(mem_node);
hgsl_free(mem_node->pages);
hgsl_free(mem_node);
}
static void hgsl_mem_dma_buf_release(struct dma_buf *dmabuf)
{
struct hgsl_mem_node *mem_node = dmabuf->priv;
hgsl_mem_free_actual(mem_node);
}
static int hgsl_mem_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct hgsl_mem_node *mem_node = dmabuf->priv;
if (mem_node->flags & GSL_MEMFLAGS_PROTECTED)
return -EINVAL;
mutex_lock(&hgsl_map_global_lock);
if (IS_ERR_OR_NULL(mem_node->vmapping))
mem_node->vmapping = vmap(mem_node->pages,
mem_node->page_count,
VM_IOREMAP,
pgprot_writecombine(PAGE_KERNEL));
if (!IS_ERR_OR_NULL(mem_node->vmapping))
mem_node->vmap_count++;
mutex_unlock(&hgsl_map_global_lock);
if (!mem_node->vmapping)
return -ENOMEM;
iosys_map_set_vaddr(map, mem_node->vmapping);
return 0;
}
static void hgsl_mem_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct hgsl_mem_node *mem_node = dmabuf->priv;
if (mem_node->flags & GSL_MEMFLAGS_PROTECTED)
return;
mutex_lock(&hgsl_map_global_lock);
if (!mem_node->vmap_count)
goto out;
if (--mem_node->vmap_count == 0) {
if (!IS_ERR_OR_NULL(mem_node->vmapping)) {
vunmap(mem_node->vmapping);
mem_node->vmapping = NULL;
} else {
pr_err("HGSL %s vmapping is NULL\n", __func__);
}
}
out:
mutex_unlock(&hgsl_map_global_lock);
}
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = hgsl_mem_map_dma_buf,
.unmap_dma_buf = hgsl_mem_unmap_dma_buf,
.mmap = hgsl_mem_mmap,
.release = hgsl_mem_dma_buf_release,
.vmap = hgsl_mem_dma_buf_vmap,
.vunmap = hgsl_mem_dma_buf_vunmap,
};
static inline gfp_t hgsl_gfp_mask(unsigned int page_order)
{
unsigned int gfp_mask = __GFP_HIGHMEM | __GFP_ZERO;
if (page_order > 0) {
gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
gfp_mask &= ~__GFP_RECLAIM;
} else {
gfp_mask |= GFP_KERNEL;
}
return gfp_mask;
}
static void _dma_cache_op(struct device *dev, struct page *page,
uint32_t page_count, unsigned int op)
{
struct scatterlist sgl;
size_t size_bytes = page_count << PAGE_SHIFT;
if (!dev || !page || !page_count)
return;
sg_init_table(&sgl, 1);
sg_set_page(&sgl, page, size_bytes, 0);
sg_dma_address(&sgl) = page_to_phys(page);
switch (op) {
case GSL_CACHEFLAGS_FLUSH:
dma_sync_sg_for_device(dev, &sgl, 1, DMA_BIDIRECTIONAL);
break;
case GSL_CACHEFLAGS_CLEAN:
dma_sync_sg_for_device(dev, &sgl, 1, DMA_TO_DEVICE);
break;
case GSL_CACHEFLAGS_INVALIDATE:
dma_sync_sg_for_device(dev, &sgl, 1, DMA_FROM_DEVICE);
break;
default:
LOGE("invalid cache operation");
break;
}
}
static void hgsl_get_sgt(struct device *dev, struct hgsl_mem_node *mem_node,
bool internal)
{
if (!IS_ERR_OR_NULL(mem_node->sgt_ext))
return;
if (internal) {
mem_node->sgt_ext = hgsl_get_sgt_internal(mem_node);
return;
}
if (IS_ERR_OR_NULL(mem_node->attach)) {
if (IS_ERR_OR_NULL(mem_node->dma_buf)) {
mem_node->dma_buf = dma_buf_get(mem_node->fd);
if (IS_ERR_OR_NULL(mem_node->dma_buf))
return;
}
mem_node->attach = dma_buf_attach(mem_node->dma_buf, dev);
if (IS_ERR_OR_NULL(mem_node->attach))
return;
}
mem_node->sgt_ext = dma_buf_map_attachment(mem_node->attach, DMA_BIDIRECTIONAL);
}
void hgsl_put_sgt(struct hgsl_mem_node *mem_node, bool internal)
{
if (IS_ERR_OR_NULL(mem_node->sgt_ext))
return;
if (internal) {
hgsl_put_sgt_internal(mem_node);
mem_node->sgt_ext = NULL;
return;
}
if (!IS_ERR_OR_NULL(mem_node->attach)) {
dma_buf_unmap_attachment(mem_node->attach,
mem_node->sgt_ext,
DMA_BIDIRECTIONAL);
mem_node->sgt_ext = NULL;
} else {
LOGE("invalid attach");
}
if (!IS_ERR_OR_NULL(mem_node->attach)) {
if (!IS_ERR_OR_NULL(mem_node->dma_buf)) {
dma_buf_detach(mem_node->dma_buf, mem_node->attach);
mem_node->attach = NULL;
} else {
LOGE("invalid dma_buf");
}
}
if (!IS_ERR_OR_NULL(mem_node->dma_buf)) {
dma_buf_put(mem_node->dma_buf);
mem_node->dma_buf = NULL;
}
}
int hgsl_mem_cache_op(struct device *dev, struct hgsl_mem_node *mem_node,
bool internal, uint64_t offsetbytes, uint64_t sizebytes, uint32_t op)
{
int ret = 0;
uint32_t cache_mode = 0;
uint32_t pg_offset = 0;
uint32_t pg_size = 0;
uint32_t sg_size = 0;
uint32_t i = 0;
struct sg_table *sgt = NULL;
struct scatterlist *s = NULL;
if (!dev || !mem_node)
return -EINVAL;
if (mem_node->flags & GSL_MEMFLAGS_PROTECTED)
return -EINVAL;
cache_mode = mem_node->flags & GSL_MEMFLAGS_CACHEMODE_MASK;
switch (cache_mode) {
case GSL_MEMFLAGS_WRITETHROUGH:
case GSL_MEMFLAGS_WRITEBACK:
break;
default:
return 0;
}
if (sizebytes == 0 || sizebytes > UINT_MAX || offsetbytes > UINT_MAX)
return -ERANGE;
/* Check that offset+length does not exceed memdesc->size */
if ((sizebytes + offsetbytes) > mem_node->memdesc.size)
return -ERANGE;
sizebytes += offsetbytes & ~PAGE_MASK;
pg_size = PAGE_ALIGN(sizebytes) >> PAGE_SHIFT;
pg_offset = offsetbytes >> PAGE_SHIFT;
if (IS_ERR_OR_NULL(mem_node->sgt_ext)) {
hgsl_get_sgt(dev, mem_node, internal);
if (IS_ERR_OR_NULL(mem_node->sgt_ext))
return -EINVAL;
}
sgt = mem_node->sgt_ext;
for_each_sg(sgt->sgl, s, sgt->nents, i) {
sg_size = s->length >> PAGE_SHIFT;
if (sg_size > pg_offset) {
struct page *page = nth_page(sg_page(s), pg_offset);
sg_size -= pg_offset;
pg_offset = 0;
sg_size = min(sg_size, pg_size);
_dma_cache_op(dev, page, sg_size, op);
pg_size -= sg_size;
if (!pg_size)
break;
} else {
pg_offset -= sg_size;
}
}
return ret;
}
static int hgsl_alloc_pages(struct device *dev, uint32_t requested_pcount,
struct page **pages)
{
struct page *page = NULL;
unsigned int order = get_order(requested_pcount << PAGE_SHIFT);
uint32_t pcount = 0;
uint32_t i;
while (1) {
gfp_t gfp_mask = hgsl_gfp_mask(order);
page = alloc_pages(gfp_mask, order);
if ((page == NULL) && (order > 0)) {
order--;
continue;
}
break;
}
if (page) {
pcount = (1 << order);
if (requested_pcount < pcount)
pcount = requested_pcount;
for (i = 0; i < pcount; i++)
pages[i] = nth_page(page, i);
_dma_cache_op(dev, page, pcount, GSL_CACHEFLAGS_FLUSH);
}
return pcount;
}
static int hgsl_export_dma_buf_fd(struct hgsl_mem_node *mem_node)
{
int fd = -1;
struct dma_buf *dma_buf = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &dma_buf_ops;
exp_info.size = mem_node->page_count << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = mem_node;
dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
LOGE("dma_buf_export failed");
return -ENOMEM;
}
mem_node->dma_buf = dma_buf;
fd = dma_buf_fd(dma_buf, O_CLOEXEC);
if (fd < 0) {
LOGE("dma buf to fd failed");
return -EINVAL;
}
get_dma_buf(dma_buf);
mem_node->fd = fd;
return 0;
}
int hgsl_sharedmem_alloc(struct device *dev, uint32_t sizebytes,
uint32_t flags, struct hgsl_mem_node *mem_node)
{
uint32_t requested_size = PAGE_ALIGN(sizebytes);
uint32_t requested_pcount = requested_size >> PAGE_SHIFT;
uint32_t allocated_pcount = 0;
uint32_t nents = 0;
int ret = 0;
mem_node->pages =
hgsl_malloc(requested_pcount * sizeof(struct page *));
if (mem_node->pages == NULL)
return -ENOMEM;
while (requested_pcount > 0) {
uint32_t pcount;
pcount = hgsl_alloc_pages(dev, requested_pcount,
mem_node->pages + allocated_pcount);
if (pcount == 0) {
LOGE("Out of memory requested 0x%x, allocated 0x%x",
sizebytes, allocated_pcount << PAGE_SHIFT);
break;
}
allocated_pcount += pcount;
requested_pcount -= pcount;
nents++;
}
mem_node->page_count = allocated_pcount;
mem_node->sg_nents = nents;
mem_node->memtype = GSL_USER_MEM_TYPE_ASHMEM;
mem_node->memdesc.size = requested_size;
if (requested_pcount != 0)
return -ENOMEM;
if (flags & GSL_MEMFLAGS_PROTECTED) {
ret = hgsl_lock_pages(mem_node);
if (ret)
return ret;
}
return hgsl_export_dma_buf_fd(mem_node);
}
void hgsl_sharedmem_free(struct hgsl_mem_node *mem_node)
{
if (!mem_node)
return;
hgsl_put_sgt(mem_node, true);
if (mem_node->dma_buf)
dma_buf_put(mem_node->dma_buf);
else
hgsl_mem_free_actual(mem_node);
}
struct hgsl_mem_node *hgsl_mem_find_base_locked(struct list_head *head,
uint64_t gpuaddr, uint64_t size)
{
struct hgsl_mem_node *node_found = NULL;
struct hgsl_mem_node *tmp = NULL;
uint64_t end = gpuaddr + size;
list_for_each_entry(tmp, head, node) {
if ((tmp->memdesc.gpuaddr <= gpuaddr)
&& ((tmp->memdesc.gpuaddr + tmp->memdesc.size) >= end)) {
node_found = tmp;
break;
}
}
return node_found;
}
MODULE_IMPORT_NS(DMA_BUF);

View File

@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef HGSL_MEMORY_INCLUDED
#define HGSL_MEMORY_INCLUDED
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include "hgsl_types.h"
#include "hgsl_utils.h"
#define HGSL_MEM_META_MAX_SIZE 128
enum gsl_user_mem_type_t {
GSL_USER_MEM_TYPE_PMEM = 0x00000000,
GSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
GSL_USER_MEM_TYPE_ADDR = 0x00000002,
GSL_USER_MEM_TYPE_ION = 0x00000003
};
struct hgsl_mem_node {
struct list_head node;
struct gsl_memdesc_t memdesc;
int32_t fd;
uint32_t export_id;
struct hgsl_hab_channel_t *hab_channel;
enum gsl_user_mem_type_t memtype;
struct dma_buf *dma_buf;
struct page **pages;
uint32_t page_count;
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct sg_table *sgt_ext;
uint32_t sgt_refcount;
uint32_t sg_nents;
void *vmapping;
uint32_t vmap_count;
uint32_t flags;
char metainfo[HGSL_MEM_META_MAX_SIZE];
};
int hgsl_sharedmem_alloc(struct device *dev, uint32_t sizebytes,
uint32_t flags, struct hgsl_mem_node *mem_node);
void hgsl_sharedmem_free(struct hgsl_mem_node *mem_node);
int hgsl_mem_cache_op(struct device *dev, struct hgsl_mem_node *mem_node,
bool internal, uint64_t offsetbytes, uint64_t sizebytes, uint32_t op);
void hgsl_put_sgt(struct hgsl_mem_node *mem_node, bool internal);
struct hgsl_mem_node *hgsl_mem_find_base_locked(struct list_head *head,
uint64_t gpuaddr, uint64_t size);
#endif

View File

@ -0,0 +1,811 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/regmap.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
#include "hgsl.h"
#define HGSL_HSYNC_FINI_RETRY_COUNT 50
#define HGSL_HSYNC_FINI_RETRY_TIME_SLICE 10
#define HGSL_TIMELINE_INFINITE_TIMEOUT (~(0ULL))
static const struct dma_fence_ops hgsl_hsync_fence_ops;
static const struct dma_fence_ops hgsl_isync_fence_ops;
int hgsl_hsync_fence_create_fd(struct hgsl_context *context,
uint32_t ts)
{
int fence_fd;
struct hgsl_hsync_fence *fence;
fence_fd = get_unused_fd_flags(0);
if (fence_fd < 0)
return fence_fd;
fence = hgsl_hsync_fence_create(context, ts);
if (fence == NULL) {
put_unused_fd(fence_fd);
return -ENOMEM;
}
fd_install(fence_fd, fence->sync_file->file);
return fence_fd;
}
struct hgsl_hsync_fence *hgsl_hsync_fence_create(
struct hgsl_context *context,
uint32_t ts)
{
unsigned long flags;
struct hgsl_hsync_timeline *timeline = context->timeline;
struct hgsl_hsync_fence *fence;
if (timeline == NULL)
return NULL;
if (!kref_get_unless_zero(&timeline->kref))
return NULL;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (fence == NULL) {
hgsl_hsync_timeline_put(timeline);
return NULL;
}
fence->ts = ts;
dma_fence_init(&fence->fence, &hgsl_hsync_fence_ops,
&timeline->lock, timeline->fence_context, ts);
fence->sync_file = sync_file_create(&fence->fence);
dma_fence_put(&fence->fence);
if (fence->sync_file == NULL) {
hgsl_hsync_timeline_put(timeline);
return NULL;
}
fence->timeline = timeline;
spin_lock_irqsave(&timeline->lock, flags);
list_add_tail(&fence->child_list, &timeline->fence_list);
spin_unlock_irqrestore(&timeline->lock, flags);
return fence;
}
void hgsl_hsync_timeline_signal(struct hgsl_hsync_timeline *timeline,
unsigned int ts)
{
struct hgsl_hsync_fence *cur, *next;
unsigned long flags;
if (!kref_get_unless_zero(&timeline->kref))
return;
if (hgsl_ts32_ge(timeline->last_ts, ts)) {
hgsl_hsync_timeline_put(timeline);
return;
}
spin_lock_irqsave(&timeline->lock, flags);
timeline->last_ts = ts;
list_for_each_entry_safe(cur, next, &timeline->fence_list,
child_list) {
if (dma_fence_is_signaled_locked(&cur->fence))
list_del_init(&cur->child_list);
}
spin_unlock_irqrestore(&timeline->lock, flags);
hgsl_hsync_timeline_put(timeline);
}
int hgsl_hsync_timeline_create(struct hgsl_context *context)
{
struct hgsl_hsync_timeline *timeline;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return -ENOMEM;
snprintf(timeline->name, HGSL_TIMELINE_NAME_LEN,
"timeline_%s_%d",
current->comm, current->pid);
kref_init(&timeline->kref);
timeline->fence_context = dma_fence_context_alloc(1);
INIT_LIST_HEAD(&timeline->fence_list);
spin_lock_init(&timeline->lock);
timeline->context = context;
context->timeline = timeline;
return 0;
}
static void hgsl_hsync_timeline_destroy(struct kref *kref)
{
struct hgsl_hsync_timeline *timeline =
container_of(kref, struct hgsl_hsync_timeline, kref);
kfree(timeline);
}
void hgsl_hsync_timeline_put(struct hgsl_hsync_timeline *timeline)
{
if (timeline)
kref_put(&timeline->kref, hgsl_hsync_timeline_destroy);
}
void hgsl_hsync_timeline_fini(struct hgsl_context *context)
{
struct hgsl_hsync_timeline *timeline = context->timeline;
struct hgsl_hsync_fence *fence;
int retry_count = HGSL_HSYNC_FINI_RETRY_COUNT;
unsigned int max_ts = 0;
unsigned long flags;
if (!kref_get_unless_zero(&timeline->kref))
return;
spin_lock_irqsave(&timeline->lock, flags);
while ((retry_count >= 0) && (!list_empty(&timeline->fence_list))) {
spin_unlock_irqrestore(&timeline->lock, flags);
msleep(HGSL_HSYNC_FINI_RETRY_TIME_SLICE);
retry_count--;
spin_lock_irqsave(&timeline->lock, flags);
}
list_for_each_entry(fence, &timeline->fence_list, child_list)
if (max_ts < fence->ts)
max_ts = fence->ts;
spin_unlock_irqrestore(&timeline->lock, flags);
hgsl_hsync_timeline_signal(timeline, max_ts);
context->last_ts = max_ts;
hgsl_hsync_timeline_put(timeline);
}
static const char *hgsl_hsync_get_driver_name(struct dma_fence *base)
{
return "hgsl-timeline";
}
static const char *hgsl_hsync_get_timeline_name(struct dma_fence *base)
{
struct hgsl_hsync_fence *fence =
container_of(base, struct hgsl_hsync_fence, fence);
struct hgsl_hsync_timeline *timeline = fence->timeline;
return timeline->name;
}
static bool hgsl_hsync_enable_signaling(struct dma_fence *base)
{
return true;
}
static bool hgsl_hsync_has_signaled(struct dma_fence *base)
{
struct hgsl_hsync_fence *fence =
container_of(base, struct hgsl_hsync_fence, fence);
struct hgsl_hsync_timeline *timeline = fence->timeline;
return hgsl_ts32_ge(timeline->last_ts, fence->ts);
}
static void hgsl_hsync_fence_release(struct dma_fence *base)
{
struct hgsl_hsync_fence *fence =
container_of(base, struct hgsl_hsync_fence, fence);
struct hgsl_hsync_timeline *timeline = fence->timeline;
if (timeline) {
spin_lock(&timeline->lock);
list_del_init(&fence->child_list);
spin_unlock(&timeline->lock);
hgsl_hsync_timeline_put(timeline);
}
kfree(fence);
}
static void hgsl_hsync_fence_value_str(struct dma_fence *base,
char *str, int size)
{
struct hgsl_hsync_fence *fence =
container_of(base, struct hgsl_hsync_fence, fence);
snprintf(str, size, "%u", fence->ts);
}
static void hgsl_hsync_timeline_value_str(struct dma_fence *base,
char *str, int size)
{
struct hgsl_hsync_fence *fence =
container_of(base, struct hgsl_hsync_fence, fence);
struct hgsl_hsync_timeline *timeline = fence->timeline;
if (!kref_get_unless_zero(&timeline->kref))
return;
snprintf(str, size, "Last retired TS:%u", timeline->last_ts);
hgsl_hsync_timeline_put(timeline);
}
static const struct dma_fence_ops hgsl_hsync_fence_ops = {
.get_driver_name = hgsl_hsync_get_driver_name,
.get_timeline_name = hgsl_hsync_get_timeline_name,
.enable_signaling = hgsl_hsync_enable_signaling,
.signaled = hgsl_hsync_has_signaled,
.wait = dma_fence_default_wait,
.release = hgsl_hsync_fence_release,
.fence_value_str = hgsl_hsync_fence_value_str,
.timeline_value_str = hgsl_hsync_timeline_value_str,
};
static void hgsl_isync_timeline_release(struct kref *kref)
{
struct hgsl_isync_timeline *timeline = container_of(kref,
struct hgsl_isync_timeline,
kref);
kfree(timeline);
}
static struct hgsl_isync_timeline *
hgsl_isync_timeline_get(struct hgsl_priv *priv, int id, bool check_owner)
{
int ret = 0;
struct qcom_hgsl *hgsl = priv->dev;
struct hgsl_isync_timeline *timeline = NULL;
spin_lock(&hgsl->isync_timeline_lock);
timeline = idr_find(&hgsl->isync_timeline_idr, id);
if (timeline) {
if (check_owner && (timeline->priv != priv)) {
timeline = NULL;
} else {
ret = kref_get_unless_zero(&timeline->kref);
if (!ret)
timeline = NULL;
}
}
spin_unlock(&hgsl->isync_timeline_lock);
return timeline;
}
static void hgsl_isync_timeline_put(struct hgsl_isync_timeline *timeline)
{
if (timeline)
kref_put(&timeline->kref, hgsl_isync_timeline_release);
}
int hgsl_isync_timeline_create(struct hgsl_priv *priv,
uint32_t *timeline_id,
uint32_t flags,
uint64_t initial_ts)
{
struct qcom_hgsl *hgsl = priv->dev;
struct hgsl_isync_timeline *timeline;
int ret = -EINVAL;
uint32_t idr;
if (timeline_id == NULL)
return -EINVAL;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (timeline == NULL)
return -ENOMEM;
kref_init(&timeline->kref);
timeline->context = dma_fence_context_alloc(1);
INIT_LIST_HEAD(&timeline->fence_list);
spin_lock_init(&timeline->lock);
timeline->priv = priv;
snprintf((char *) timeline->name, sizeof(timeline->name),
"isync-timeline-%d", *timeline_id);
timeline->flags = flags;
timeline->last_ts = initial_ts;
timeline->is64bits = ((flags & HGSL_ISYNC_64BITS_TIMELINE) != 0);
idr_preload(GFP_KERNEL);
spin_lock(&hgsl->isync_timeline_lock);
idr = idr_alloc(&hgsl->isync_timeline_idr, timeline, 1, 0, GFP_NOWAIT);
spin_unlock(&hgsl->isync_timeline_lock);
idr_preload_end();
if (idr > 0) {
timeline->id = idr;
*timeline_id = idr;
ret = 0;
} else
kfree(timeline);
return ret;
}
int hgsl_isync_fence_create(struct hgsl_priv *priv, uint32_t timeline_id,
uint32_t ts, bool ts_is_valid, int *fence_fd)
{
unsigned long flags;
struct hgsl_isync_timeline *timeline = NULL;
struct hgsl_isync_fence *fence = NULL;
struct sync_file *sync_file = NULL;
int ret = 0;
if (fence_fd == NULL)
return -EINVAL;
timeline = hgsl_isync_timeline_get(priv, timeline_id, true);
if (timeline == NULL) {
ret = -EINVAL;
goto out;
}
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (fence == NULL) {
ret = -ENOMEM;
goto out;
}
/* set a minimal ts if user don't set it */
if (!ts_is_valid)
ts = 1;
fence->ts = ts;
dma_fence_init(&fence->fence, &hgsl_isync_fence_ops,
&timeline->lock,
timeline->context,
ts);
sync_file = sync_file_create(&fence->fence);
dma_fence_put(&fence->fence);
if (sync_file == NULL) {
ret = -ENOMEM;
goto out;
}
*fence_fd = get_unused_fd_flags(0);
if (*fence_fd < 0) {
ret = -EBADF;
goto out;
}
fd_install(*fence_fd, sync_file->file);
fence->timeline = timeline;
spin_lock_irqsave(&timeline->lock, flags);
list_add_tail(&fence->child_list, &timeline->fence_list);
spin_unlock_irqrestore(&timeline->lock, flags);
out:
if (ret) {
if (sync_file)
fput(sync_file->file);
if (timeline)
hgsl_isync_timeline_put(timeline);
}
return ret;
}
static int hgsl_isync_timeline_destruct(struct hgsl_priv *priv,
struct hgsl_isync_timeline *timeline)
{
unsigned long flags;
struct hgsl_isync_fence *cur, *next;
LIST_HEAD(flist);
if (timeline == NULL)
return -EINVAL;
spin_lock_irqsave(&timeline->lock, flags);
list_for_each_entry_safe(cur, next, &timeline->fence_list,
child_list) {
dma_fence_get(&cur->fence);
list_del_init(&cur->child_list);
list_add(&cur->free_list, &flist);
}
spin_unlock_irqrestore(&timeline->lock, flags);
list_for_each_entry_safe(cur, next, &flist, free_list) {
list_del(&cur->free_list);
dma_fence_signal(&cur->fence);
dma_fence_put(&cur->fence);
}
hgsl_isync_timeline_put(timeline);
return 0;
}
int hgsl_isync_timeline_destroy(struct hgsl_priv *priv, uint32_t id)
{
struct qcom_hgsl *hgsl = priv->dev;
struct hgsl_isync_timeline *timeline;
spin_lock(&hgsl->isync_timeline_lock);
timeline = idr_find(&hgsl->isync_timeline_idr, id);
if (timeline) {
if (timeline->priv == priv) {
idr_remove(&hgsl->isync_timeline_idr, timeline->id);
timeline->id = 0;
} else {
timeline = NULL;
}
}
spin_unlock(&hgsl->isync_timeline_lock);
if (timeline == NULL)
return -EINVAL;
return hgsl_isync_timeline_destruct(priv, timeline);
}
void hgsl_isync_fini(struct hgsl_priv *priv)
{
LIST_HEAD(flist);
struct qcom_hgsl *hgsl = priv->dev;
struct hgsl_isync_timeline *cur, *t;
uint32_t idr;
spin_lock(&hgsl->isync_timeline_lock);
idr_for_each_entry(&hgsl->isync_timeline_idr,
cur, idr) {
if (cur->priv == priv) {
idr_remove(&hgsl->isync_timeline_idr, idr);
list_add(&cur->free_list, &flist);
}
}
spin_unlock(&hgsl->isync_timeline_lock);
list_for_each_entry_safe(cur, t, &flist, free_list) {
list_del(&cur->free_list);
hgsl_isync_timeline_destruct(priv, cur);
}
}
static int _isync_timeline_signal(
struct hgsl_isync_timeline *timeline,
struct dma_fence *fence)
{
unsigned long flags;
int ret = -EINVAL;
struct hgsl_isync_fence *cur, *next;
bool found = false;
spin_lock_irqsave(&timeline->lock, flags);
list_for_each_entry_safe(cur, next, &timeline->fence_list,
child_list) {
if (fence == &cur->fence) {
list_del_init(&cur->child_list);
found = true;
break;
}
}
spin_unlock_irqrestore(&timeline->lock, flags);
if (found) {
dma_fence_signal(fence);
ret = 0;
}
return ret;
}
int hgsl_isync_fence_signal(struct hgsl_priv *priv, uint32_t timeline_id,
int fence_fd)
{
struct hgsl_isync_timeline *timeline;
struct dma_fence *fence = NULL;
int ret = -EINVAL;
timeline = hgsl_isync_timeline_get(priv, timeline_id, true);
if (timeline == NULL) {
ret = -EINVAL;
goto out;
}
fence = sync_file_get_fence(fence_fd);
if (fence == NULL) {
ret = -EBADF;
goto out;
}
ret = _isync_timeline_signal(timeline, fence);
out:
if (fence)
dma_fence_put(fence);
if (timeline)
hgsl_isync_timeline_put(timeline);
return ret;
}
int hgsl_isync_forward(struct hgsl_priv *priv, uint32_t timeline_id,
uint64_t ts, bool check_owner)
{
unsigned long flags;
struct hgsl_isync_timeline *timeline;
struct hgsl_isync_fence *cur, *next;
struct dma_fence *base;
LIST_HEAD(flist);
timeline = hgsl_isync_timeline_get(priv, timeline_id, check_owner);
if (timeline == NULL)
return -EINVAL;
if (hgsl_ts_ge(timeline->last_ts, ts, timeline->is64bits))
goto out;
spin_lock_irqsave(&timeline->lock, flags);
timeline->last_ts = ts;
list_for_each_entry_safe(cur, next, &timeline->fence_list,
child_list) {
if (hgsl_ts_ge(ts, cur->ts, timeline->is64bits)) {
base = dma_fence_get_rcu(&cur->fence);
list_del_init(&cur->child_list);
/* It *shouldn't* happen. If it does, it's
* the last thing you'll see
*/
if (base == NULL)
pr_warn(" Invalid fence:%p.\n", cur);
else
list_add(&cur->free_list, &flist);
}
}
spin_unlock_irqrestore(&timeline->lock, flags);
list_for_each_entry_safe(cur, next, &flist, free_list) {
list_del(&cur->free_list);
dma_fence_signal(&cur->fence);
dma_fence_put(&cur->fence);
}
out:
if (timeline)
hgsl_isync_timeline_put(timeline);
return 0;
}
int hgsl_isync_query(struct hgsl_priv *priv, uint32_t timeline_id,
uint64_t *ts)
{
struct hgsl_isync_timeline *timeline;
timeline = hgsl_isync_timeline_get(priv, timeline_id, false);
if (timeline == NULL)
return -EINVAL;
*ts = timeline->last_ts;
hgsl_isync_timeline_put(timeline);
return 0;
}
static struct dma_fence *hgsl_timelines_to_fence_array(struct hgsl_priv *priv,
u64 timelines, u32 count, u64 usize, bool any)
{
void __user *uptr = u64_to_user_ptr(timelines);
struct dma_fence_array *array;
struct dma_fence **fences;
struct hgsl_isync_fence *fence = NULL;
int i, ret = 0;
if (!count || count > INT_MAX)
return ERR_PTR(-EINVAL);
fences = kcalloc(count, sizeof(*fences),
GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
if (!fences)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
struct hgsl_timeline_val val;
struct hgsl_isync_timeline *timeline;
if (copy_struct_from_user(&val, sizeof(val), uptr, usize)) {
ret = -EFAULT;
goto err;
}
if (val.padding) {
ret = -EINVAL;
goto err;
}
timeline = hgsl_isync_timeline_get(priv, val.timeline_id, false);
if (!timeline) {
ret = -ENOENT;
goto err;
}
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (fence == NULL) {
hgsl_isync_timeline_put(timeline);
ret = -ENOMEM;
goto err;
}
fence->timeline = timeline;
fence->ts = val.timepoint;
dma_fence_init(&fence->fence, &hgsl_isync_fence_ops,
&timeline->lock,
timeline->context,
fence->ts);
spin_lock(&timeline->lock);
list_add_tail(&fence->child_list, &timeline->fence_list);
spin_unlock(&timeline->lock);
fences[i] = &fence->fence;
uptr += usize;
}
/* No need for a fence array for only one fence */
if (count == 1) {
struct dma_fence *fence = fences[0];
kfree(fences);
return fence;
}
array = dma_fence_array_create(count, fences,
dma_fence_context_alloc(1), 0, any);
if (array)
return &array->base;
ret = -ENOMEM;
err:
for (i = 0; i < count; i++) {
if (!IS_ERR_OR_NULL(fences[i]))
dma_fence_put(fences[i]);
}
kfree(fences);
return ERR_PTR(ret);
}
int hgsl_isync_wait_multiple(struct hgsl_priv *priv, struct hgsl_timeline_wait *param)
{
struct dma_fence *fence;
unsigned long timeout;
signed long ret;
if (param->flags != HGSL_TIMELINE_WAIT_ANY &&
param->flags != HGSL_TIMELINE_WAIT_ALL)
return -EINVAL;
if (param->padding)
return -EINVAL;
fence = hgsl_timelines_to_fence_array(priv, param->timelines,
param->count, param->timelines_size,
(param->flags == HGSL_TIMELINE_WAIT_ANY));
if (IS_ERR(fence))
return PTR_ERR(fence);
if (param->timeout_nanosec == HGSL_TIMELINE_INFINITE_TIMEOUT)
timeout = MAX_SCHEDULE_TIMEOUT;
else {
struct timespec64 timespec;
timespec.tv_sec = param->timeout_nanosec / NSEC_PER_SEC;
timespec.tv_nsec = param->timeout_nanosec % NSEC_PER_SEC;
timeout = timespec64_to_jiffies(&timespec);
}
if (!timeout)
ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
else {
ret = dma_fence_wait_timeout(fence, true, timeout);
if (!ret)
ret = -ETIMEDOUT;
else if (ret > 0)
ret = 0;
else if (ret == -ERESTARTSYS)
ret = -EINTR;
}
dma_fence_put(fence);
return ret;
}
static const char *hgsl_isync_get_driver_name(struct dma_fence *base)
{
return "hgsl";
}
static const char *hgsl_isync_get_timeline_name(struct dma_fence *base)
{
struct hgsl_isync_fence *fence =
container_of(base,
struct hgsl_isync_fence,
fence);
struct hgsl_isync_timeline *timeline = fence->timeline;
return (timeline == NULL) ? "null":timeline->name;
}
static bool hgsl_isync_has_signaled(struct dma_fence *base)
{
struct hgsl_isync_fence *fence = NULL;
struct hgsl_isync_timeline *timeline = NULL;
if (base) {
fence = container_of(base, struct hgsl_isync_fence, fence);
timeline = fence->timeline;
if (timeline)
return hgsl_ts_ge(timeline->last_ts, fence->ts, timeline->is64bits);
}
return false;
}
static bool hgsl_isync_enable_signaling(struct dma_fence *base)
{
return !hgsl_isync_has_signaled(base);
}
static void hgsl_isync_fence_release(struct dma_fence *base)
{
unsigned long flags;
struct hgsl_isync_fence *fence = container_of(base,
struct hgsl_isync_fence,
fence);
struct hgsl_isync_timeline *timeline = fence->timeline;
if (timeline) {
spin_lock_irqsave(&timeline->lock, flags);
list_del_init(&fence->child_list);
spin_unlock_irqrestore(&timeline->lock, flags);
dma_fence_signal(base);
hgsl_isync_timeline_put(fence->timeline);
}
dma_fence_free(&fence->fence);
}
static void hgsl_isync_fence_value_str(struct dma_fence *base,
char *str, int size)
{
snprintf(str, size, "%llu", base->context);
}
static const struct dma_fence_ops hgsl_isync_fence_ops = {
.get_driver_name = hgsl_isync_get_driver_name,
.get_timeline_name = hgsl_isync_get_timeline_name,
.enable_signaling = hgsl_isync_enable_signaling,
.signaled = hgsl_isync_has_signaled,
.wait = dma_fence_default_wait,
.release = hgsl_isync_fence_release,
.fence_value_str = hgsl_isync_fence_value_str,
};

View File

@ -0,0 +1,281 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "hgsl_tcsr.h"
/* Sender registers */
#define TCSR_GLB_CFG_COMPUTE_SIGNALING_REG 0x000
#define TCSR_COMPUTE_SIGNALING_REG 0x000
/* Receiver registers */
#define TCSR_COMPUTE_SIGNAL_STATUS_REG 0x000
#define TCSR_COMPUTE_SIGNAL_CLEAR_REG 0x400
#define TCSR_COMPUTE_SIGNAL_MASK_REG 0x800
struct hgsl_tcsr {
struct platform_device *pdev;
struct device *client_dev;
struct regmap *regmap;
struct regmap *glb_regmap;
enum hgsl_tcsr_role role;
unsigned int enable_count;
struct mutex dev_mutex;
unsigned int irq_num;
irqreturn_t (*isr)(struct device *dev, u32 status);
};
static irqreturn_t hgsl_tcsr_isr(int irq, void *ptr)
{
struct hgsl_tcsr *tcsr = ptr;
u32 status;
regmap_read(tcsr->regmap, TCSR_COMPUTE_SIGNAL_STATUS_REG, &status);
regmap_write(tcsr->regmap, TCSR_COMPUTE_SIGNAL_CLEAR_REG, status);
if (tcsr->isr)
return tcsr->isr(tcsr->client_dev, status);
else
return IRQ_HANDLED;
}
static int hgsl_tcsr_init_sender(struct hgsl_tcsr *tcsr)
{
struct device *dev = &tcsr->pdev->dev;
struct device_node *np = dev->of_node;
if (tcsr->glb_regmap != NULL)
return 0;
tcsr->regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR_OR_NULL(tcsr->regmap)) {
dev_err(dev, "failed to map sender register\n");
return -ENODEV;
}
tcsr->glb_regmap = syscon_regmap_lookup_by_phandle(np, "syscon-glb");
if (IS_ERR_OR_NULL(tcsr->glb_regmap)) {
dev_err(dev, "failed to map sender global register\n");
tcsr->glb_regmap = NULL;
return -ENODEV;
}
return 0;
}
static int hgsl_tcsr_init_receiver(struct hgsl_tcsr *tcsr)
{
struct device *dev = &tcsr->pdev->dev;
struct device_node *np = dev->of_node;
int ret;
if (tcsr->irq_num != 0)
return 0;
tcsr->regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR_OR_NULL(tcsr->regmap)) {
dev_err(dev, "failed to map receiver register\n");
return -ENODEV;
}
tcsr->irq_num = irq_of_parse_and_map(np, 0);
if (tcsr->irq_num == 0) {
dev_err(dev, "failed to get irq\n");
return -ENODEV;
}
ret = request_irq(tcsr->irq_num, hgsl_tcsr_isr,
IRQF_TRIGGER_HIGH, "hgsl-tcsr", tcsr);
if (ret < 0) {
dev_err(dev, "failed to request IRQ%u: %d\n",
tcsr->irq_num, ret);
tcsr->irq_num = 0;
return ret;
}
disable_irq(tcsr->irq_num);
return 0;
}
#if IS_ENABLED(CONFIG_QCOM_HGSL_TCSR_SIGNAL)
struct hgsl_tcsr *hgsl_tcsr_request(struct platform_device *pdev,
enum hgsl_tcsr_role role,
struct device *client,
irqreturn_t (*isr)(struct device *, u32))
{
struct hgsl_tcsr *tcsr = platform_get_drvdata(pdev);
int ret = -EINVAL;
if (!tcsr)
return ERR_PTR(-ENODEV);
else if (tcsr->role != role)
return ERR_PTR(-EINVAL);
else if (tcsr->client_dev)
return ERR_PTR(-EBUSY);
if (role == HGSL_TCSR_ROLE_RECEIVER) {
if (tcsr->isr)
return ERR_PTR(-EBUSY);
else if (!isr)
return ERR_PTR(-EINVAL);
tcsr->client_dev = client;
tcsr->isr = isr;
ret = hgsl_tcsr_init_receiver(tcsr);
} else { /* HGSL_TCSR_ROLE_SENDER */
if (isr)
return ERR_PTR(-EINVAL);
tcsr->client_dev = client;
ret = hgsl_tcsr_init_sender(tcsr);
}
if (ret) {
tcsr = ERR_PTR(ret);
tcsr->client_dev = NULL;
tcsr->isr = NULL;
}
return tcsr;
}
void hgsl_tcsr_free(struct hgsl_tcsr *tcsr)
{
if ((tcsr->role == HGSL_TCSR_ROLE_RECEIVER) &&
(tcsr->irq_num != 0) && (tcsr->isr != NULL))
free_irq(tcsr->irq_num, tcsr);
tcsr->client_dev = NULL;
tcsr->isr = NULL;
}
int hgsl_tcsr_enable(struct hgsl_tcsr *tcsr)
{
mutex_lock(&tcsr->dev_mutex);
if (tcsr->enable_count > 0)
goto done;
if (tcsr->irq_num)
enable_irq(tcsr->irq_num);
done:
tcsr->enable_count++;
mutex_unlock(&tcsr->dev_mutex);
return 0;
}
void hgsl_tcsr_disable(struct hgsl_tcsr *tcsr)
{
mutex_lock(&tcsr->dev_mutex);
if (--tcsr->enable_count > 0)
goto done;
if (tcsr->irq_num)
disable_irq(tcsr->irq_num);
done:
mutex_unlock(&tcsr->dev_mutex);
}
bool hgsl_tcsr_is_enabled(struct hgsl_tcsr *tcsr)
{
return (tcsr->enable_count > 0);
}
void hgsl_tcsr_irq_trigger(struct hgsl_tcsr *tcsr, int irq_id)
{
u32 reg;
/*
* Read back this global config register in case
* it has been modified by others.
*/
regmap_read(tcsr->glb_regmap,
TCSR_GLB_CFG_COMPUTE_SIGNALING_REG, &reg);
reg = irq_id << reg;
regmap_write(tcsr->regmap, TCSR_COMPUTE_SIGNALING_REG, reg);
}
void hgsl_tcsr_irq_enable(struct hgsl_tcsr *tcsr, u32 mask, bool enable)
{
u32 reg;
regmap_read(tcsr->regmap, TCSR_COMPUTE_SIGNAL_MASK_REG, &reg);
reg = enable ? (reg | mask) : (reg & ~mask);
regmap_write(tcsr->regmap, TCSR_COMPUTE_SIGNAL_MASK_REG, reg);
}
#endif
static const struct of_device_id hgsl_tcsr_match_table[] = {
{ .compatible = "qcom,hgsl-tcsr-sender" },
{ .compatible = "qcom,hgsl-tcsr-receiver" },
{}
};
static int hgsl_tcsr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct hgsl_tcsr *tcsr = devm_kzalloc(dev, sizeof(*tcsr),
GFP_KERNEL);
if (!tcsr)
return -ENOMEM;
if (of_device_is_compatible(np, "qcom,hgsl-tcsr-receiver")) {
tcsr->role = HGSL_TCSR_ROLE_RECEIVER;
} else if (of_device_is_compatible(np, "qcom,hgsl-tcsr-sender")) {
tcsr->role = HGSL_TCSR_ROLE_SENDER;
} else {
dev_err(dev, "Not compatible device\n");
return -ENODEV;
}
mutex_init(&tcsr->dev_mutex);
tcsr->pdev = pdev;
platform_set_drvdata(pdev, tcsr);
return 0;
}
static int hgsl_tcsr_remove(struct platform_device *pdev)
{
struct hgsl_tcsr *tcsr = platform_get_drvdata(pdev);
mutex_destroy(&tcsr->dev_mutex);
return 0;
}
struct platform_driver hgsl_tcsr_driver = {
.probe = hgsl_tcsr_probe,
.remove = hgsl_tcsr_remove,
.driver = {
.name = "hgsl-tcsr",
.of_match_table = hgsl_tcsr_match_table,
}
};

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __HGSL_TCSR_H
#define __HGSL_TCSR_H
#include <linux/device.h>
#include <linux/irqreturn.h>
#include <linux/platform_device.h>
/*
* We need to separate bit slots between KMD and GMU,
* to avoid one processor signaling back to itself.
* GMU should have the same definitions.
*
* Allocated 4 bits for KMD and 2 for GMU. 3 KMD bits
* can be used by submission and 1 GMU bits for TS
* retire. The other 1 bit is reserved for furture use.
*/
#define TCSR_KMD_TRIGGER_IRQ_ID_0 0
#define TCSR_KMD_TRIGGER_IRQ_ID_1 1
#define TCSR_KMD_TRIGGER_IRQ_ID_2 2
#define TCSR_KMD_TRIGGER_IRQ_ID_3 3
#define TCSR_GMU_TRIGGER_IRQ_ID_0 8
#define TCSR_GMU_TRIGGER_IRQ_ID_1 9
/* Define Source and Destination IRQ for KMD */
#define TCSR_SRC_IRQ_ID_0 TCSR_KMD_TRIGGER_IRQ_ID_0
#define TCSR_SRC_IRQ_ID_1 TCSR_KMD_TRIGGER_IRQ_ID_1
#define TCSR_SRC_IRQ_ID_2 TCSR_KMD_TRIGGER_IRQ_ID_2
#define TCSR_SRC_IRQ_ID_3 TCSR_KMD_TRIGGER_IRQ_ID_3
#define TCSR_DEST_IRQ_ID_0 TCSR_GMU_TRIGGER_IRQ_ID_0
#define TCSR_DEST_IRQ_ID_1 TCSR_GMU_TRIGGER_IRQ_ID_1
#define TCSR_DEST_IRQ_MASK_0 (1 << TCSR_DEST_IRQ_ID_0)
#define TCSR_DEST_IRQ_MASK_1 (1 << TCSR_DEST_IRQ_ID_1)
#define TCSR_DEST_IRQ_MASK (TCSR_DEST_IRQ_MASK_0 | TCSR_DEST_IRQ_MASK_1)
enum hgsl_tcsr_role {
HGSL_TCSR_ROLE_SENDER = 0,
HGSL_TCSR_ROLE_RECEIVER = 1,
HGSL_TCSR_ROLE_MAX,
};
struct hgsl_tcsr;
extern struct platform_driver hgsl_tcsr_driver;
#if IS_ENABLED(CONFIG_QCOM_HGSL_TCSR_SIGNAL)
struct hgsl_tcsr *hgsl_tcsr_request(struct platform_device *pdev,
enum hgsl_tcsr_role role,
struct device *client,
irqreturn_t (*isr)(struct device *, u32));
void hgsl_tcsr_free(struct hgsl_tcsr *tcsr);
int hgsl_tcsr_enable(struct hgsl_tcsr *tcsr);
void hgsl_tcsr_disable(struct hgsl_tcsr *tcsr);
bool hgsl_tcsr_is_enabled(struct hgsl_tcsr *tcsr);
void hgsl_tcsr_irq_trigger(struct hgsl_tcsr *tcsr, int irq_id);
void hgsl_tcsr_irq_enable(struct hgsl_tcsr *tcsr, u32 mask, bool enable);
#else
static inline struct hgsl_tcsr *hgsl_tcsr_request(struct platform_device *pdev,
enum hgsl_tcsr_role role,
struct device *client,
irqreturn_t (*isr)(struct device *, u32))
{
return NULL;
}
static inline void hgsl_tcsr_free(struct hgsl_tcsr *tcsr)
{
}
static inline int hgsl_tcsr_enable(struct hgsl_tcsr *tcsr)
{
return -ENODEV;
}
static inline void hgsl_tcsr_disable(struct hgsl_tcsr *tcsr)
{
}
static inline bool hgsl_tcsr_is_enabled(struct hgsl_tcsr *tcsr)
{
return false;
}
static inline void hgsl_tcsr_irq_trigger(struct hgsl_tcsr *tcsr, int irq_id)
{
}
static inline void hgsl_tcsr_irq_enable(struct hgsl_tcsr *tcsr, u32 mask,
bool enable)
{
}
#endif
#endif /* __HGSL_TCSR_H */

View File

@ -0,0 +1,230 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2006-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HGSL_TYPES_H
#define __HGSL_TYPES_H
#include <linux/stddef.h>
/****************************************************************************/
/* status */
/****************************************************************************/
#define GSL_SUCCESS 0
#define GSL_FAILURE -1
#define GSL_FAILURE_SYSTEMERROR -2
#define GSL_FAILURE_DEVICEERROR -3
#define GSL_FAILURE_OUTOFMEM -4
#define GSL_FAILURE_BADPARAM -5
#define GSL_FAILURE_NOTSUPPORTED -6
#define GSL_FAILURE_NOMOREAVAILABLE -7
#define GSL_FAILURE_NOTINITIALIZED -8
#define GSL_FAILURE_ALREADYINITIALIZED -9
#define GSL_FAILURE_TIMEOUT -10
#define GSL_FAILURE_OFFSETINVALID -11
#define GSL_FAILURE_CTXT_DEADLOCK -12
#define GSL_FAILURE_PERFCOUNTER_UNAVAILABLE -13
#define GSL_FAILURE_HANG -14
#define GSL_FAILURE_RETRY -15
#define GSL_FLAGS_INITIALIZED 0x00000004
#define GSL_MEMFLAGS_VM 0x80000000
/****************************************************************************/
/* memory allocation flags */
/****************************************************************************/
#define GSL_MEMFLAGS_ANY 0x00000000 /* dont care */
#define GSL_MEMFLAGS_ALIGNANY 0x00000000
#define GSL_MEMFLAGS_ALIGN32 0x00000000
#define GSL_MEMFLAGS_ALIGN64 0x00060000
#define GSL_MEMFLAGS_ALIGN128 0x00070000
#define GSL_MEMFLAGS_ALIGN256 0x00080000
#define GSL_MEMFLAGS_ALIGN512 0x00090000
#define GSL_MEMFLAGS_ALIGN1K 0x000A0000
#define GSL_MEMFLAGS_ALIGN2K 0x000B0000
#define GSL_MEMFLAGS_ALIGN4K 0x000C0000
#define GSL_MEMFLAGS_ALIGN8K 0x000D0000
#define GSL_MEMFLAGS_ALIGN16K 0x000E0000
#define GSL_MEMFLAGS_ALIGN32K 0x000F0000
#define GSL_MEMFLAGS_ALIGN64K 0x00100000
#define GSL_MEMFLAGS_ALIGN1MB 0x00140000
#define GSL_MEMFLAGS_ALIGNPAGE GSL_MEMFLAGS_ALIGN4K
#define GSL_MEMFLAGS_GPUREADWRITE 0x00000000
#define GSL_MEMFLAGS_GPUREADONLY 0x01000000
#define GSL_MEMFLAGS_GPUWRITEONLY 0x02000000
#define GSL_MEMFLAGS_GPUNOACCESS 0x03000000
#define GSL_MEMFLAGS_WRITECOMBINE 0x00000000
#define GSL_MEMFLAGS_PROTECTED 0x00000008 /* protected buffer flag*/
#define GSL_MEMFLAGS_UNCACHED 0x04000000
#define GSL_MEMFLAGS_WRITETHROUGH 0x08000000
#define GSL_MEMFLAGS_WRITEBACK 0x0C000000
#define GSL_MEMFLAGS_USE_CPU_MAP 0x10000000
#define GSL_MEMFLAGS_CONTIGUOUS 0x20000000
#define GSL_MEMFLAGS_FORCEPAGESIZE 0x40000000
#define GSL_MEMFLAGS_GPUIOCOHERENT 0x80000000
#define GSL_MEMFLAGS_CACHEMODE_MASK 0x0C000000
/****************************************************************************/
/* cache flags */
/****************************************************************************/
#define GSL_CACHEFLAGS_CLEAN 0x00000001
#define GSL_CACHEFLAGS_TO_GPU GSL_CACHEFLAGS_CLEAN
#define GSL_CACHEFLAGS_INVALIDATE 0x00000002
#define GSL_CACHEFLAGS_FROM_GPU GSL_CACHEFLAGS_INVALIDATE
#define GSL_CACHEFLAGS_FLUSH (GSL_CACHEFLAGS_CLEAN | GSL_CACHEFLAGS_INVALIDATE)
/****************************************************************************/
/* context */
/****************************************************************************/
#define GSL_CONTEXT_FLAG_USER_GENERATED_TS 0x00000080
#define GSL_CONTEXT_FLAG_BIND 0x00040000
#define GSL_CONTEXT_FLAG_CLIENT_GENERATED_TS 0x80000000
/****************************************************************************/
/* other */
/****************************************************************************/
#define GSL_TIMEOUT_NONE 0
#define GSL_TIMEOUT_INFINITE 0xFFFFFFFF
#define GSL_TIMEOUT_DEFAULT GSL_TIMEOUT_INFINITE
#define GSL_RPC_WAITTIMESTAMP_SLICE 1000
#define GSL_PAGESIZE 0x1000
#define GSL_PAGESIZE_SHIFT 12
#define GSL_TRUE 1
#define GSL_FALSE 0
#define GSL_EINVAL -1
/* ib desc of cmdbatch profiling buffer */
#define GSL_IBDESC_PROFILING_BUFFER 0x00000002
/*************/
/* device id */
/*************/
enum gsl_deviceid_t {
GSL_DEVICE_UNUSED = -1, /* gcc compiler warning fix, unsigned->signed */
GSL_DEVICE_ANY = 0,
GSL_DEVICE_3D = 1,
GSL_DEVICE_2DVG = 2,
GSL_DEVICE_2DVG_1 = 3,
GSL_DEVICE_MAX = 3,
GSL_DEVICE_FOOBAR = 0x7FFFFFFF
};
/****************************/
/* shared memory allocation */
/****************************/
struct gsl_memdesc_t {
union {
void *hostptr;
uint64_t hostptr64;
};
uint64_t gpuaddr;
union {
unsigned int size;
uint64_t size64;
};
uint64_t flags;
union {
uintptr_t priv;
uint64_t priv64;
};
};
struct gsl_command_buffer_object_t {
struct gsl_memdesc_t *memdesc;
uint64_t sizedwords;
uint64_t offset;
uint64_t flags;
};
struct gsl_memory_object_t {
struct gsl_memdesc_t *memdesc;
uint64_t sizedwords;
uint64_t offset;
uint64_t flags;
};
/****************/
/* timestamp id */
/****************/
enum gsl_timestamp_type_t {
GSL_TIMESTAMP_CONSUMED = 1, /* start-of-pipeline timestamp */
GSL_TIMESTAMP_RETIRED = 2, /* end-of-pipeline timestamp */
GSL_TIMESTAMP_QUEUED = 3, /* Timestamp of last submitted IB */
GSL_TIMESTAMP_MAX = 3,
GSL_TIMESTAMP_FOOBAR = 0x7FFFFFFF
};
enum gsl_context_type_t {
GSL_CONTEXT_TYPE_GENERIC = 1,
GSL_CONTEXT_TYPE_OPENGL = 2,
GSL_CONTEXT_TYPE_OPENVG = 3,
GSL_CONTEXT_TYPE_OPENCL = 4,
GSL_CONTEXT_TYPE_C2D = 5,
GSL_CONTEXT_TYPE_RS = 6,
GSL_CONTEXT_TYPE_DX = 7,
GSL_CONTEXT_TYPE_VK = 8,
GSL_CONTEXT_TYPE_FOOBAR = 0x7FFFFFFF
};
/*****************************/
/* Performance Counter Group */
/*****************************/
enum gsl_perfcountergroupid_t {
GSL_PERF_COUNTER_GROUP_INVALID = -1,
GSL_PERF_COUNTER_GROUP_CP = 0x0,
GSL_PERF_COUNTER_GROUP_RBBM = 0x1,
GSL_PERF_COUNTER_GROUP_PC = 0x2,
GSL_PERF_COUNTER_GROUP_VFD = 0x3,
GSL_PERF_COUNTER_GROUP_HLSQ = 0x4,
GSL_PERF_COUNTER_GROUP_VPC = 0x5,
GSL_PERF_COUNTER_GROUP_TSE = 0x6,
GSL_PERF_COUNTER_GROUP_RAS = 0x7,
GSL_PERF_COUNTER_GROUP_UCHE = 0x8,
GSL_PERF_COUNTER_GROUP_TP = 0x9,
GSL_PERF_COUNTER_GROUP_SP = 0xA,
GSL_PERF_COUNTER_GROUP_RB = 0xB,
GSL_PERF_COUNTER_GROUP_PWR = 0xC,
GSL_PERF_COUNTER_GROUP_VBIF = 0xD,
GSL_PERF_COUNTER_GROUP_VBIF_PWR = 0xE,
GSL_PERF_COUNTER_GROUP_MH = 0xF,
GSL_PERF_COUNTER_GROUP_PA_SU = 0x10,
GSL_PERF_COUNTER_GROUP_SQ = 0x11,
GSL_PERF_COUNTER_GROUP_SX = 0x12,
GSL_PERF_COUNTER_GROUP_TCF = 0x13,
GSL_PERF_COUNTER_GROUP_TCM = 0x14,
GSL_PERF_COUNTER_GROUP_TCR = 0x15,
GSL_PERF_COUNTER_GROUP_L2 = 0x16,
GSL_PERF_COUNTER_GROUP_VSC = 0x17,
GSL_PERF_COUNTER_GROUP_CCU = 0x18,
GSL_PERF_COUNTER_GROUP_LRZ = 0x19,
GSL_PERF_COUNTER_GROUP_CMP = 0x1A,
GSL_PERF_COUNTER_GROUP_ALWAYSON = 0x1B,
GSL_PERF_COUNTER_GROUP_SW = 0x1C,
GSL_PERF_COUNTER_GROUP_GMU_PWC = 0x1D,
GSL_PERF_COUNTER_GROUP_GLC = 0x1E,
GSL_PERF_COUNTER_GROUP_FCHE = 0x1F,
GSL_PERF_COUNTER_GROUP_MHUB = 0x20,
GSL_PERF_COUNTER_GROUP_MAX
};
/****************************************************************************/
/* system time usage */
/****************************************************************************/
enum gsl_systemtime_usage_t {
GSL_SYSTEMTIME_GENERIC = 0x0,
GSL_SYSTEMTIME_CL_PROFILING = 0x1,
};
#endif /* __HGSL_TYPES_H */

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HGSL_UTILS_H
#define __HGSL_UTILS_H
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
#include <linux/stdarg.h>
enum {
LOG_LEVEL_ERROR,
LOG_LEVEL_WARN,
LOG_LEVEL_INFO,
LOG_LEVEL_DEBUG,
LOG_LEVEL_NUM
};
#define LOGE(...) hgsl_log(LOG_LEVEL_ERROR, __func__, __LINE__, ##__VA_ARGS__)
#define LOGW(...) hgsl_log(LOG_LEVEL_WARN, __func__, __LINE__, ##__VA_ARGS__)
#define LOGI(...)
#define LOGD(...)
#define OS_UNUSED(param) ((void)param)
static inline void *hgsl_malloc(size_t size)
{
if (size <= PAGE_SIZE)
return kmalloc(size, GFP_KERNEL);
return vmalloc(size);
}
static inline void *hgsl_zalloc(size_t size)
{
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_KERNEL);
return vzalloc(size);
}
static inline void hgsl_free(void *ptr)
{
if (ptr != NULL) {
if (is_vmalloc_addr(ptr))
vfree(ptr);
else
kfree(ptr);
}
}
static inline void hgsl_log(unsigned int level, const char * const fun,
unsigned int line, const char *format, ...)
{
va_list arglist;
char buffer[512];
const char *tag = NULL;
unsigned int offset = 0;
struct pid *pid = task_tgid(current);
struct task_struct *task = pid_task(pid, PIDTYPE_PID);
switch (level) {
case LOG_LEVEL_DEBUG:
tag = "DEBUG";
break;
case LOG_LEVEL_INFO:
tag = "INFO";
break;
case LOG_LEVEL_WARN:
tag = "WARNING";
break;
case LOG_LEVEL_ERROR:
tag = "ERROR";
break;
default:
tag = "UNKNOWN";
break;
}
if (task)
snprintf(buffer, sizeof(buffer), "HGSL [%s] [%s:%u] [%s:%u:%u]",
tag, fun, line, task->comm, task_pid_nr(task), current->pid);
else
snprintf(buffer, sizeof(buffer), "HGSL [%s] [%s:%u]",
tag, fun, line);
offset = strlen(buffer);
va_start(arglist, format);
vsnprintf(buffer + offset, sizeof(buffer) - offset, format, arglist);
va_end(arglist);
pr_err("%s\n", buffer);
}
#endif /* __HGSL_UTILS_H */

553
include/uapi/linux/hgsl.h Normal file
View File

@ -0,0 +1,553 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_MSM_HGSL_H
#define _UAPI_MSM_HGSL_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define HGSL_SYNC_TYPE_HSYNC 1
#define HGSL_SYNC_TYPE_ISYNC 2
struct hgsl_ibdesc {
__u64 gpuaddr;
__u64 sizedwords;
};
struct hgsl_mem_object {
__u64 gpuaddr;
__u64 sizedwords;
};
#define HGSL_IOCTL_BASE 'h'
#define HGSL_IORW(n, t) _IOWR(HGSL_IOCTL_BASE, n, t)
#define HGSL_IOW(n, t) _IOW(HGSL_IOCTL_BASE, n, t)
/**
* return current status of Doorbell system
*/
#define DB_STATE_Q_MASK 0xffff
#define DB_STATE_Q_UNINIT 1
#define DB_STATE_Q_INIT_DONE 2
#define DB_STATE_Q_FAULT 3
#define HGSL_IOCTL_DBQ_GETSTATE HGSL_IORW(0x01, __s32)
/**
* struct hgsl_db_queue_inf - initialize Doorbell
* @fd: File descriptor of DB queue
* @head_dwords: Size of DB queue header
* @head_off_dwords: offset of DB queue header
* @queue_dwords: size of DB queue
* @queue_off_dwords: offset of DB queue
* @db_signal: DB signal type, One of DB_SIGNAL_* values
*
**/
struct hgsl_db_queue_inf {
__s32 fd;
__u32 head_dwords;
__s32 head_off_dwords;
__u32 queue_dwords;
__s32 queue_off_dwords;
__u32 db_signal;
};
#define DB_SIGNAL_INVALID 0
#define DB_SIGNAL_GLOBAL_0 1
#define DB_SIGNAL_GLOBAL_1 2
#define DB_SIGNAL_LOCAL 3
#define DB_SIGNAL_MAX DB_SIGNAL_LOCAL
#define HGSL_IOCTL_DBQ_INIT \
HGSL_IORW(0x02, struct hgsl_db_queue_inf)
/**
* Assign a DB to current instance, input is DB index.
*/
#define HGSL_IOCTL_DBQ_ASSIGN HGSL_IORW(0x03, __u32)
/**
* struct hgsl_dbq_release_info - release DB queue
* @ref_count; userspace could inform PVM to do cleanup
* if this param is zero
* @ctxt_id: Current context
*/
struct hgsl_dbq_release_info {
__u32 ref_count;
__u32 ctxt_id;
};
#define HGSL_IOCTL_DBQ_RELEASE \
HGSL_IORW(0x04, struct hgsl_dbq_release_info)
/**
* struct hgsl_fhi_issud_cmds - submit cmds to DB queue
* @ibs: db commands list
* @num_ibs: Number of db commands
* @bos: buffer objects list
* @num_bos: Number of buffer objects
* @context_id: Current context for these cmds
* @flags: reserved
* @timestamp: Userspace time stamp
*/
struct hgsl_fhi_issud_cmds {
__u64 ibs;
__u32 num_ibs;
__u64 bos;
__u32 num_bos;
__u32 context_id;
__u32 flags;
__u32 timestamp;
__u32 padding;
};
#define HGSL_IOCTL_ISSUE_CMDS \
HGSL_IORW(0x05, struct hgsl_fhi_issud_cmds)
/**
* struct hgsl_ctxt_create_info - create a DB context
* @context_id: Current context for these cmds
* @shadow_fd: File descriptor of time stamp buffer
* @shadow_sop_offset: Offset for start timestamp written
* by GPU before cmds
* @shadow_eop_offset: Offset for end timestamp written
* by GPU after cmds
*/
struct hgsl_ctxt_create_info {
__u32 context_id;
__s32 shadow_fd;
__u32 shadow_sop_offset;
__u32 shadow_eop_offset;
};
struct hgsl_ioctl_ctxt_create_params {
__u32 devhandle;
__u32 type;
__u32 flags;
__u32 ctxthandle;
__u32 sync_type;
__u32 padding;
};
#define HGSL_IOCTL_CTXT_CREATE HGSL_IOW(0x10, \
struct hgsl_ioctl_ctxt_create_params)
struct hgsl_ioctl_ctxt_destroy_params {
__u32 devhandle;
__u32 ctxthandle;
__u32 rval;
__u32 padding;
};
#define HGSL_IOCTL_CTXT_DESTROY HGSL_IOW(0x11, \
struct hgsl_ioctl_ctxt_destroy_params)
/**
* struct hgsl_wait_ts_info - wait a timestamp to be retired
* @timestamp: The user timestamp to wait
* @timeout: Expiry timeout
*/
struct hgsl_wait_ts_info {
__u32 context_id;
__u32 timestamp;
__u32 timeout;
__u32 padding;
__u32 devhandle;
__u32 channel_id;
};
#define HGSL_IOCTL_WAIT_TIMESTAMP \
HGSL_IOW(0x12, struct hgsl_wait_ts_info)
/**
* struct hgsl_ioctl_issueib_params - submit cmds to GPU
* @ibs: db commands list
* @devhandle: GPU device handle
* @ctxthandle: Current context handle for the cmds
* @timestamp: Userspace time stamp
* @flags: reserved
* @num_ibs: Number of ib cmds
* @rval: return value from host
* @channel_id: hab channel id
*/
struct hgsl_ioctl_issueib_params {
__u64 ibs;
__u32 devhandle;
__u32 ctxthandle;
__u32 timestamp;
__u32 flags;
__u32 num_ibs;
__u32 rval;
__u32 channel_id;
__u32 padding;
};
#define HGSL_IOCTL_ISSUE_IB \
HGSL_IORW(0x20, struct hgsl_ioctl_issueib_params)
struct hgsl_ioctl_issueib_with_alloc_list_params {
__u64 rpc_syncobj;
__u64 ibs;
__u64 allocations;
__u64 be_data;
__u32 devhandle;
__u32 ctxthandle;
__u32 timestamp;
__u32 flags;
__u32 num_ibs;
__u32 num_allocations;
__u32 rval;
__u32 channel_id;
};
#define HGSL_IOCTL_ISSUIB_WITH_ALLOC_LIST \
HGSL_IORW(0x21, struct hgsl_ioctl_issueib_with_alloc_list_params)
#define HGSL_HYP_GENERAL_MAX_SEND_NUM 2
#define HGSL_HYP_GENERAL_MAX_REPLY_NUM 1
struct hgsl_ioctl_hyp_generic_transaction_params {
__u64 send_data[HGSL_HYP_GENERAL_MAX_SEND_NUM];
__u64 reply_data[HGSL_HYP_GENERAL_MAX_REPLY_NUM];
__u32 send_size[HGSL_HYP_GENERAL_MAX_SEND_NUM];
__u32 reply_size[HGSL_HYP_GENERAL_MAX_REPLY_NUM];
__u32 send_num;
__u32 reply_num;
__u32 cmd_id;
__u64 ret_value;
};
#define HGSL_IOCTL_HYP_GENERIC_TRANSACTION \
HGSL_IORW(0x22, struct hgsl_ioctl_hyp_generic_transaction_params)
struct hgsl_ioctl_get_shadowts_mem_params {
__u64 size;
__u32 device_id;
__u32 ctxthandle;
__u32 flags;
__s32 fd;
};
#define HGSL_IOCTL_GET_SHADOWTS_MEM \
HGSL_IORW(0x23, struct hgsl_ioctl_get_shadowts_mem_params)
struct hgsl_ioctl_put_shadowts_mem_params {
__u32 ctxthandle;
__u32 padding;
};
#define HGSL_IOCTL_PUT_SHADOWTS_MEM \
HGSL_IOW(0x24, struct hgsl_ioctl_put_shadowts_mem_params)
struct hgsl_ioctl_mem_alloc_params {
__u64 memdesc;
__u32 sizebytes;
__u32 flags;
__s32 fd;
__u32 padding;
};
#define HGSL_IOCTL_MEM_ALLOC \
HGSL_IORW(0x25, struct hgsl_ioctl_mem_alloc_params)
struct hgsl_ioctl_mem_free_params {
__u64 memdesc;
};
#define HGSL_IOCTL_MEM_FREE \
HGSL_IORW(0x26, struct hgsl_ioctl_mem_free_params)
struct hgsl_ioctl_mem_map_smmu_params {
__u64 size;
__u64 offset;
__u64 uva;
__u64 memdesc;
__s32 fd;
__u32 memtype;
__u32 flags;
__u32 padding;
};
#define HGSL_IOCTL_MEM_MAP_SMMU \
HGSL_IORW(0x27, struct hgsl_ioctl_mem_map_smmu_params)
struct hgsl_ioctl_mem_unmap_smmu_params {
__u64 gpuaddr;
__u64 size;
__u32 memtype;
__u32 padding;
};
#define HGSL_IOCTL_MEM_UNMAP_SMMU \
HGSL_IOW(0x28, struct hgsl_ioctl_mem_unmap_smmu_params)
struct hgsl_ioctl_mem_cache_operation_params {
__u64 offsetbytes;
__u64 sizebytes;
__u64 gpuaddr;
__u32 operation;
__u32 padding;
};
#define HGSL_IOCTL_MEM_CACHE_OPERATION \
HGSL_IORW(0x29, struct hgsl_ioctl_mem_cache_operation_params)
struct hgsl_ioctl_read_ts_params {
__u32 devhandle;
__u32 ctxthandle;
__u32 type;
__u32 timestamp;
};
#define HGSL_IOCTL_READ_TIMESTAMP \
HGSL_IORW(0x2A, struct hgsl_ioctl_read_ts_params)
struct hgsl_ioctl_check_ts_params {
__u32 devhandle;
__u32 ctxthandle;
__u32 type;
__u32 timestamp;
__u32 rval;
__u32 padding;
};
#define HGSL_IOCTL_CHECK_TIMESTAMP \
HGSL_IORW(0x2B, struct hgsl_ioctl_check_ts_params)
struct hgsl_ioctl_syncobj_wait_multiple_params {
__u64 num_syncobjs;
__u64 rpc_syncobj;
__u64 status;
__u32 timeout_ms;
__u32 result;
};
#define HGSL_IOCTL_SYNCOBJ_WAIT_MULTIPLE \
HGSL_IORW(0x2C, struct hgsl_ioctl_syncobj_wait_multiple_params)
struct hgsl_ioctl_set_metainfo_params {
__u64 memdesc_priv;
__u64 metainfo;
__u32 flags;
__u32 metainfo_len;
};
#define HGSL_IOCTL_SET_METAINFO \
HGSL_IORW(0x2D, struct hgsl_ioctl_set_metainfo_params)
#define HGSL_IOCTL_GET_SYSTEM_TIME \
HGSL_IORW(0x2E, __u64)
struct hgsl_ioctl_perfcounter_select_params {
__u64 groups;
__u64 counter_ids;
__u64 counter_val_regs;
__u64 counter_val_hi_regs;
__u32 devhandle;
__u32 ctxthandle;
__u32 num_counters;
__u32 rval;
};
#define HGSL_IOCTL_PERFCOUNTER_SELECT \
HGSL_IORW(0x30, struct hgsl_ioctl_perfcounter_select_params)
struct hgsl_ioctl_perfcounter_deselect_params {
__u64 groups;
__u64 counter_ids;
__u32 devhandle;
__u32 ctxthandle;
__u32 timestamp;
__u32 num_counters;
};
#define HGSL_IOCTL_PERFCOUNTER_DESELECT \
HGSL_IOW(0x31, struct hgsl_ioctl_perfcounter_deselect_params)
struct hgsl_ioctl_perfcounter_query_selections_params {
__u64 selections;
__u32 devhandle;
__u32 ctxthandle;
__u32 num_counters;
__u32 group;
__u32 max_counters;
__u32 padding;
};
#define HGSL_IOCTL_PERFCOUNTER_QUERY_SELECTION \
HGSL_IORW(0x32, struct hgsl_ioctl_perfcounter_query_selections_params)
struct hgsl_ioctl_perfcounter_read_params {
__u64 value;
__u32 devhandle;
__u32 group;
__u32 counter;
__u32 rval;
};
#define HGSL_IOCTL_PERFCOUNTER_READ \
HGSL_IORW(0x33, struct hgsl_ioctl_perfcounter_read_params)
/**
* struct hgsl_hsync_fence_create - wait a h-sync fence
* @timestamp: The user timestamp attached to the fence
* @context_id; The conext to create fence
* @fence_fd: File descriptor of the new created fence
*/
struct hgsl_hsync_fence_create {
__u32 timestamp;
__u32 padding;
__s32 context_id;
__s32 fence_fd;
};
#define HGSL_IOCTL_HSYNC_FENCE_CREATE \
HGSL_IOW(0x13, struct hgsl_hsync_fence_create)
/**
* Create an i-fence timeline - param is id of the new timeline
*/
#define HGSL_IOCTL_ISYNC_TIMELINE_CREATE \
HGSL_IOW(0x14, __u32)
/**
* Destroy an i-fence timeline - param is id of timeline to be released
*/
#define HGSL_IOCTL_ISYNC_TIMELINE_DESTROY \
HGSL_IOW(0x15, __u32)
/* Use padding field to tell if ts is valid when create isync fence */
#define HGSL_ISYNC_FENCE_CREATE_USE_TS 1
/**
* struct hgsl_isync_create_fence - wait an i-sync fence
* @timeline_id: The timestamp for the new fence
* @fence_id: id of new created fence
* @ts: option, should set it if want to use isync forward
*/
struct hgsl_isync_create_fence {
__u32 timeline_id;
__s32 fence_id;
__u32 ts;
__u32 padding;
};
#define HGSL_IOCTL_ISYNC_FENCE_CREATE \
HGSL_IOW(0x16, \
struct hgsl_isync_create_fence)
/**
* struct hgsl_isync_signal_fence - signal an i-sync fence
* @timeline_id: The timestamp for current fence
* @fence_id: id of fence to be signalled
*/
struct hgsl_isync_signal_fence {
__u32 timeline_id;
__s32 fence_id;
};
#define HGSL_IOCTL_ISYNC_FENCE_SIGNAL \
HGSL_IOW(0x17, \
struct hgsl_isync_signal_fence)
/**
* struct hgsl_isync_signal_fence - signal an i-sync fence
* @timeline_id: The timestamp for current fence
* @fence_id: id of fence to be signalled
*/
struct hgsl_isync_forward {
__u32 timeline_id;
__s32 ts;
};
#define HGSL_IOCTL_ISYNC_FORWARD \
HGSL_IOW(0x18, \
struct hgsl_isync_forward)
/**
* struct hgsl_isync_timeline_create64 - create a isync timeline with 64 or 32bits timestamp
* @initial_ts: The initial timestamp value.
* @timeline_id: The timestamp for current fence
* @flags: flags used to timeline
*/
struct hgsl_timeline_create {
__u64 initial_ts;
__u32 timeline_id;
__s32 flags;
};
#define HGSL_IOCTL_TIMELINE_CREATE \
HGSL_IORW(0x19, struct hgsl_timeline_create)
/**
* struct hgsl_timeline_val - A container to store a timeline/sequence number
* pair.
* @timepoint: timepoint to signal/query/wait
* @timeline: The timeline identifier to signal/query/wait
*
* A container to store a timeline/timepoint pair used by the query, signal and wait
* ioctls.
*/
struct hgsl_timeline_val {
__u64 timepoint;
__u32 timeline_id;
__u32 padding;
};
/**
* struct hgsl_timeline_query - query multiple timelines
* @timelines: Address of an array of &struct hgsl_timeline_val entries
* @count: Number of entries in @timeline
* @timelines_size: Size of each entry in @timelines
*/
struct hgsl_timeline_query {
__u64 timelines;
__u32 count;
__u32 timelines_size;
};
#define HGSL_IOCTL_TIMELINE_QUERY \
HGSL_IORW(0x1A, struct hgsl_timeline_query)
/**
* struct hgsl_timeline_signal - signal multiple timelines
* @timelines: Address of an array of &struct hgsl_timeline_val entries
* @count: Number of entries in @timeline
* @timelines_size: Size of each entry in @timelines
*/
struct hgsl_timeline_signal {
__u64 timelines;
__u32 count;
__u32 timelines_size;
};
#define HGSL_IOCTL_TIMELINE_SIGNAL \
HGSL_IOW(0x1B, struct hgsl_timeline_signal)
#define HGSL_TIMELINE_WAIT_ALL 1
#define HGSL_TIMELINE_WAIT_ANY 2
/**
* struct hgsl_timeline_wait - wait multiple timelines
* @timeout_nanosec: Number of nanoseconds to wait for the signal
* @timelines: Address of an array of &struct hgsl_timeline_val entries
* @count: Number of entries in @timeline
* @timelines_size: Size of each entry in @timelines
* @flags: One of HGSL_TIMELINE_WAIT_ALL or HGSL_TIMELINE_WAIT_ANY
*/
struct hgsl_timeline_wait {
__u64 timeout_nanosec;
__u64 timelines;
__u32 count;
__u32 timelines_size;
__u32 flags;
/* private: padding for 64 bit compatibility */
__u32 padding;
};
#define HGSL_IOCTL_TIMELINE_WAIT \
HGSL_IOW(0x1C, struct hgsl_timeline_wait)
#endif /* _UAPI_MSM_HGSL_H */

View File

@ -2,6 +2,7 @@ virtio_mmio.ko
virtio_blk.ko
virtio_console.ko
msm_hab.ko
qcom_hgsl.ko
qcom_scm_hab.ko
qcom-scm.ko
secure_buffer.ko