Add 'qcom/opensource/mm-drivers/' from commit 'f37d625cda5ef9f033b3d34d9c1a64c52ebc51c2'

git-subtree-dir: qcom/opensource/mm-drivers
git-subtree-mainline: 880d405719
git-subtree-split: f37d625cda
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/mm-drivers
tag: DISPLAY.LA.4.0.r2-07600-lanai.0
This commit is contained in:
David Wronek 2024-10-06 16:45:02 +02:00
commit f9b254670f
45 changed files with 9571 additions and 0 deletions

View File

@ -0,0 +1,36 @@
headers_src = [
"sync_fence/include/uapi/*/**/*.h",
]
mm_drivers_headers_out = [
"sync_fence/qcom_sync_file.h",
]
mm_drivers_kernel_headers_verbose = "--verbose "
genrule {
name: "qti_generate_mm_drivers_kernel_headers",
tools: [
"headers_install.sh",
"unifdef"
],
tool_files: [
"mm_drivers_kernel_headers.py",
],
srcs: headers_src,
cmd: "python3 $(location mm_drivers_kernel_headers.py) " +
mm_drivers_kernel_headers_verbose +
"--header_arch arm64 " +
"--gen_dir $(genDir) " +
"--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " +
"--unifdef $(location unifdef) " +
"--headers_install $(location headers_install.sh)",
out: mm_drivers_headers_out,
}
cc_library_headers {
name: "qti_mm_drivers_kernel_headers",
generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
vendor: true,
recovery_available: true
}

View File

@ -0,0 +1,16 @@
MM_DRIVER_PATH := $(call my-dir)
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk
ifneq ($(TARGET_BOARD_PLATFORM), taro)
include $(MM_DRIVER_PATH)/hw_fence/Android.mk
include $(MM_DRIVER_PATH)/sync_fence/Android.mk
endif
endif

View File

@ -0,0 +1,22 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
package(
default_visibility = [
"//visibility:public",
],
)
ddk_headers(
name = "mm_drivers_configs",
hdrs = glob(["config/*.h"]),
includes = ["config"],
)
ddk_headers(
name = "mm_drivers_headers",
hdrs = [
":mm_drivers_configs",
"//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers",
"//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers",
],
)

View File

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2020, The Linux Foundation. All rights reserved.
export CONFIG_MSM_EXT_DISPLAY=y
export CONFIG_QCOM_SPEC_SYNC=y
export CONFIG_QTI_HW_FENCE=y

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#define CONFIG_MSM_EXT_DISPLAY 1
#define CONFIG_QCOM_SPEC_SYNC 1
#define CONFIG_QTI_HW_FENCE 1

View File

@ -0,0 +1,42 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=msm_hw_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := hw-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm_hw_fence.ko
LOCAL_MODULE_KBUILD_NAME := msm_hw_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@ -0,0 +1,16 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_hw_fence.bzl", "define_hw_fence")
package(
default_visibility = [
"//visibility:public"
],
)
ddk_headers(
name = "hw_fence_headers",
hdrs = glob(["include/*.h"]),
includes = ["include"]
)
define_hw_fence()

View File

@ -0,0 +1,28 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \
-I$(MSM_HW_FENCE_ROOT)hw_fence/include/
ifdef CONFIG_QTI_HW_FENCE
obj-m += msm_hw_fence.o
msm_hw_fence-y := src/msm_hw_fence.o \
src/hw_fence_drv_priv.o \
src/hw_fence_drv_utils.o \
src/hw_fence_drv_debug.o \
src/hw_fence_drv_ipc.o
ifneq ($(CONFIG_ARCH_KALAMA), y)
LINUXINCLUDE += -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \
-I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/
msm_hw_fence-y += src/msm_hw_fence_synx_translation.o
endif
msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@ -0,0 +1,4 @@
config QTI_HW_FENCE
bool "HW Fence"
help
Enable the hw_fence module

View File

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@ -0,0 +1 @@
CONFIG_QTI_HW_FENCE=y

View File

@ -0,0 +1,46 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
ddk_module(
name = "{}_msm_hw_fence".format(tv),
srcs = [
"src/hw_fence_drv_debug.c",
"src/hw_fence_drv_ipc.c",
"src/hw_fence_drv_priv.c",
"src/hw_fence_drv_utils.c",
"src/msm_hw_fence.c",
"src/msm_hw_fence_synx_translation.c",
],
out = "msm_hw_fence.ko",
defconfig = "defconfig",
kconfig = "Kconfig",
conditional_srcs = {
"CONFIG_DEBUG_FS": {
True: ["src/hw_fence_ioctl.c"],
},
},
deps = [
"//msm-kernel:all_headers",
"//vendor/qcom/opensource/synx-kernel:synx_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_msm_hw_fence_dist".format(tv),
data = [":{}_msm_hw_fence".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_hw_fence():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@ -0,0 +1,209 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_DEBUG
#define __HW_FENCE_DRV_DEBUG
#include "hw_fence_drv_ipc.h"
#define HW_FENCE_NAME_SIZE 64
enum hw_fence_drv_prio {
HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */
HW_FENCE_LOW = 0x000002, /* Low density debug messages */
HW_FENCE_INFO = 0x000004, /* Informational prints */
HW_FENCE_INIT = 0x00008, /* Initialization logs */
HW_FENCE_QUEUE = 0x000010, /* Queue logs */
HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */
HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */
HW_FENCE_LOCK = 0x000080, /* Lock-related messages */
HW_FENCE_PRINTK = 0x010000,
};
extern u32 msm_hw_fence_debug_level;
#define dprintk(__level, __fmt, ...) \
do { \
if (msm_hw_fence_debug_level & __level) \
if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \
pr_err(__fmt, ##__VA_ARGS__); \
} while (0)
#define HWFNC_ERR(fmt, ...) \
pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
#define HWFNC_DBG_H(fmt, ...) \
dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_L(fmt, ...) \
dprintk(HW_FENCE_LOW, "[hwfence:%s:%d][dbgl]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_INFO(fmt, ...) \
dprintk(HW_FENCE_INFO, "[hwfence:%s:%d][dbgi]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_INIT(fmt, ...) \
dprintk(HW_FENCE_INIT, "[hwfence:%s:%d][dbg]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_Q(fmt, ...) \
dprintk(HW_FENCE_QUEUE, "[hwfence:%s:%d][dbgq]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_LUT(fmt, ...) \
dprintk(HW_FENCE_LUT, "[hwfence:%s:%d][dbglut]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_IRQ(fmt, ...) \
dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_LOCK(fmt, ...) \
dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_DUMP(prio, fmt, ...) \
dprintk(prio, "[hwfence:%s:%d][dbgd]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_WARN(fmt, ...) \
pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data);
#if IS_ENABLED(CONFIG_DEBUG_FS)
int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id);
void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio,
struct msm_hw_fence_client *hw_fence_client);
void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash,
u32 count);
void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data);
void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data);
extern const struct file_operations hw_sync_debugfs_fops;
struct hw_fence_out_clients_map {
int ipc_client_id_vid; /* ipc client virtual id for the hw fence client */
int ipc_client_id_pid; /* ipc client physical id for the hw fence client */
int ipc_signal_id; /* ipc signal id for the hw fence client */
};
/* These signals are the ones that the actual clients should be triggering, hw-fence driver
* does not need to have knowledge of these signals. Adding them here for debugging purposes.
* Only fence controller and the cliens know these id's, since these
* are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller'
* The index of this struct must match the enum hw_fence_client_id
*/
static const struct hw_fence_out_clients_map
dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 4}, /* CTL1 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 6}, /* CTL2 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 8}, /* CTL3 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 10}, /* CTL4 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 12}, /* CTL5 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21}, /* VAL0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22}, /* VAL1 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23}, /* VAL2 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24}, /* VAL3 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25}, /* VAL4 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */
};
/**
* struct hw_dma_fence - fences created by hw-fence for debugging.
* @base: base dma-fence structure, this must remain at beginning of the struct.
* @name: name of each fence.
* @client_handle: handle for the client owner of this fence, this is returned by the hw-fence
* driver after a successful registration of the client and used by this fence
* during release.
*/
struct hw_dma_fence {
struct dma_fence base;
char name[HW_FENCE_NAME_SIZE];
void *client_handle;
};
static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence)
{
return container_of(fence, struct hw_dma_fence, base);
}
static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock)
{
struct hw_dma_fence *dma_fence;
int fence_idx;
for (fence_idx = i; fence_idx >= 0 ; fence_idx--) {
kfree(fences_lock[fence_idx]);
dma_fence = to_hw_dma_fence(fences[fence_idx]);
kfree(dma_fence);
}
kfree(fences_lock);
kfree(fences);
}
static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
return hw_dma_fence->name;
}
static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
return hw_dma_fence->name;
}
static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence)
{
return true;
}
static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence)
{
if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) {
HWFNC_ERR("invalid hwfence data, won't release hw_fence!\n");
return;
}
/* release hw-fence */
if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base))
HWFNC_ERR("failed to release hw_fence!\n");
}
static void hw_fence_dbg_release(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence;
if (!fence)
return;
HWFNC_DBG_H("release backing fence %pK\n", fence);
hw_dma_fence = to_hw_dma_fence(fence);
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
_hw_fence_release(hw_dma_fence);
kfree(fence->lock);
kfree(hw_dma_fence);
}
static struct dma_fence_ops hw_fence_dbg_ops = {
.get_driver_name = hw_fence_dbg_get_driver_name,
.get_timeline_name = hw_fence_dbg_get_timeline_name,
.enable_signaling = hw_fence_dbg_enable_signaling,
.wait = dma_fence_default_wait,
.release = hw_fence_dbg_release,
};
#endif /* CONFIG_DEBUG_FS */
#endif /* __HW_FENCE_DRV_DEBUG */

View File

@ -0,0 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_IPC_H
#define __HW_FENCE_DRV_IPC_H
/* ipc clients virtual client-id */
#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8
#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9
#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11
#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12
#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25
#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128
#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129
#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130
#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131
#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132
#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133
#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134
#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135
/* ipc clients physical client-id */
#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3
#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4
#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5
#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8
#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9
#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11
#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12
#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13
#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14
#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15
#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16
#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17
#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2
#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4
#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */
#define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */
#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \
(base + 0x14 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c))
/**
* hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair.
* @drv_data: driver data.
* @tx_client_id: ipc client id that sends the ipc signal.
* @rx_client_id: ipc client id that receives the ipc signal.
* @signal_id: signal id to send.
*
* This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id'
*/
void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
u32 tx_client_id, u32 rx_client_id, u32 signal_id);
/**
* hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver.
* @drv_data: driver data.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client.
* @drv_data: driver data.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the
* hw fence driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_get_client_phys_id() - Returns the ipc client physical id that corresponds to the
* hw fence driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence
* driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc signal id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs to update rxq, false otherwise
*/
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_ipcc_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt for
* already signaled fences
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs ipc interrupt for signaled fences, false otherwise
*/
bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id);
#endif /* __HW_FENCE_DRV_IPC_H */

View File

@ -0,0 +1,533 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_INTERNAL_H
#define __HW_FENCE_DRV_INTERNAL_H
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/soc/qcom/msm_hw_fence.h>
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
/* max u64 to indicate invalid fence */
#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL)
/* hash algorithm constants */
#define HW_FENCE_HASH_A_MULT 4969 /* a multiplier for Hash algorithm */
#define HW_FENCE_HASH_C_MULT 907 /* c multiplier for Hash algorithm */
/* number of queues per type (i.e. ctrl or client queues) */
#define HW_FENCE_CTRL_QUEUES 2 /* Rx and Tx Queues */
#define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */
/* hfi headers calculation */
#define HW_FENCE_HFI_TABLE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_table_header))
#define HW_FENCE_HFI_QUEUE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_header))
#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES))
#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * queues_num))
/*
* Max Payload size is the bigest size of the message that we can have in the CTRL queue
* in this case the max message is calculated like following, using 32-bits elements:
* 1 header + 1 msg-type + 1 client_id + 2 hash + 1 error
*/
#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE ((1 + 1 + 1 + 2 + 1) * sizeof(u32))
#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE
#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload))
/* Locks area for all clients with RxQ */
#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (sizeof(u64) * rxq_clients_num)
#define HW_FENCE_TX_QUEUE 1
#define HW_FENCE_RX_QUEUE 2
/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */
#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0)
/**
* msm hw fence flags:
* MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled
*/
#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0)
/**
* MSM_HW_FENCE_MAX_JOIN_PARENTS:
* Maximum number of parents that a fence can have for a join-fence
*/
#define MSM_HW_FENCE_MAX_JOIN_PARENTS 3
/**
* HW_FENCE_PAYLOAD_REV:
* Payload version with major and minor version information
*/
#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF))
/**
* HW_FENCE_EVENT_MAX_DATA:
* Maximum data that can be added to the debug event
*/
#define HW_FENCE_EVENT_MAX_DATA 12
enum hw_fence_lookup_ops {
HW_FENCE_LOOKUP_OP_CREATE = 0x1,
HW_FENCE_LOOKUP_OP_DESTROY,
HW_FENCE_LOOKUP_OP_CREATE_JOIN,
HW_FENCE_LOOKUP_OP_FIND_FENCE
};
/**
* enum hw_fence_client_data_id - Enum with the clients having client_data, an optional
* parameter passed from the waiting client and returned
* to it upon fence signaling. Only the first HW Fence
* Client for non-VAL clients (e.g. GFX, IPE, VPU) have
* client_data.
* @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0.
* @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client 0.
* @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client 0.
* @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0.
* @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1.
* @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an
* invalid hw_fence_client_data_id
*/
enum hw_fence_client_data_id {
HW_FENCE_CLIENT_DATA_ID_CTX0,
HW_FENCE_CLIENT_DATA_ID_IPE,
HW_FENCE_CLIENT_DATA_ID_VPU,
HW_FENCE_CLIENT_DATA_ID_VAL0,
HW_FENCE_CLIENT_DATA_ID_VAL1,
HW_FENCE_MAX_CLIENTS_WITH_DATA,
};
/**
* struct msm_hw_fence_queue - Structure holding the data of the hw fence queues.
* @va_queue: pointer to the virtual address of the queue elements
* @q_size_bytes: size of the queue
* @va_header: pointer to the hfi header virtual address
* @pa_queue: physical address of the queue
* @rd_wr_idx_start: start read and write indexes for client queue (zero by default)
* @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default)
* @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and
* hfi_header->tx_wm is updated instead
*/
struct msm_hw_fence_queue {
void *va_queue;
u32 q_size_bytes;
void *va_header;
phys_addr_t pa_queue;
u32 rd_wr_idx_start;
u32 rd_wr_idx_factor;
bool skip_wr_idx;
};
/**
* enum payload_type - Enum with the queue payload types.
* HW_FENCE_PAYLOAD_TYPE_1: client queue payload
* HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id
*/
enum payload_type {
HW_FENCE_PAYLOAD_TYPE_1 = 1,
HW_FENCE_PAYLOAD_TYPE_2
};
/**
* struct msm_hw_fence_client - Structure holding the per-Client allocated resources.
* @client_id: internal client_id used within HW fence driver; index into the clients struct
* @client_id_ext: external client_id, equal to client_id except for clients with configurable
* number of sub-clients (e.g. ife clients)
* @mem_descriptor: hfi header memory descriptor
* @queues: queues descriptor
* @queues_num: number of client queues
* @fence_error_cb: function called for waiting clients that need HLOS notification of fence error
* @fence_error_cb_userdata: opaque pointer registered with fence error callback and passed to
* client during invocation of callback function
* @error_cb_lock: lock to synchronize access to fence error cb and fence error cb data
* @ipc_signal_id: id of the signal to be triggered for this client
* @ipc_client_vid: virtual id of the ipc client for this hw fence driver client
* @ipc_client_pid: physical id of the ipc client for this hw fence driver client
* @update_rxq: bool to indicate if client uses rx-queue
* @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
* @wait_queue: wait queue for the validation clients
* @val_signal: doorbell flag to signal the validation clients in the wait queue
*/
struct msm_hw_fence_client {
enum hw_fence_client_id client_id;
enum hw_fence_client_id client_id_ext;
struct msm_hw_fence_mem_addr mem_descriptor;
struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
int queues_num;
msm_hw_fence_error_cb_t fence_error_cb;
void *fence_error_cb_userdata;
struct mutex error_cb_lock;
int ipc_signal_id;
int ipc_client_vid;
int ipc_client_pid;
bool update_rxq;
bool send_ipc;
#if IS_ENABLED(CONFIG_DEBUG_FS)
wait_queue_head_t wait_queue;
atomic_t val_signal;
#endif /* CONFIG_DEBUG_FS */
};
/**
* struct msm_hw_fence_mem_data - Structure holding internal memory attributes
*
* @attrs: attributes for the memory allocation
*/
struct msm_hw_fence_mem_data {
unsigned long attrs;
};
/**
* struct msm_hw_fence_dbg_data - Structure holding debugfs data
*
* @root: debugfs root
* @entry_rd: flag to indicate if debugfs dumps a single line or table
* @context_rd: debugfs setting to indicate which context id to dump
* @seqno_rd: debugfs setting to indicate which seqno to dump
* @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the
* hw-fences behavior, to release the hw-fences
* @create_hw_fences: boolean to continuosly create hw-fences within debugfs
* @clients_list: list of debug clients registered
* @clients_list_lock: lock to synchronize access to the clients list
* @lock_wake_cnt: number of times that driver triggers wake-up ipcc to unlock inter-vm try-lock
*/
struct msm_hw_fence_dbg_data {
struct dentry *root;
bool entry_rd;
u64 context_rd;
u64 seqno_rd;
u32 hw_fence_sim_release_delay;
bool create_hw_fences;
struct list_head clients_list;
struct mutex clients_list_lock;
u64 lock_wake_cnt;
};
/**
* struct hw_fence_client_type_desc - Structure holding client type properties, including static
* properties and client queue properties read from device-tree.
*
* @name: name of client type, used to parse properties from device-tree
* @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
* HW_FENCE_CLIENT_ID_CTL0 for DPU clients
* @max_clients_num: maximum number of clients of given client type
* @clients_num: number of clients of given client type
* @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
* two (for both Tx and Rx Queues)
* @queue_entries: number of entries per client queue of given client type
* @start_padding: size of padding between queue table header and first queue header in bytes
* @end_padding: size of padding between queue header(s) and first queue payload in bytes
* @mem_size: size of memory allocated for client queue(s) per client in bytes
* @txq_idx_start: start read and write indexes for client tx queue (zero by default)
* @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default)
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
*/
struct hw_fence_client_type_desc {
char *name;
enum hw_fence_client_id init_id;
u32 max_clients_num;
u32 clients_num;
u32 queues_num;
u32 queue_entries;
u32 start_padding;
u32 end_padding;
u32 mem_size;
u32 txq_idx_start;
u32 txq_idx_factor;
bool skip_txq_wr_idx;
};
/**
* struct hw_fence_client_queue_desc - Structure holding client queue properties for a client.
*
* @type: pointer to client queue properties of client type
* @start_offset: start offset of client queue memory region, from beginning of carved-out memory
* allocation for hw fence driver
*/
struct hw_fence_client_queue_desc {
struct hw_fence_client_type_desc *type;
u32 start_offset;
};
/**
* struct hw_fence_driver_data - Structure holding internal hw-fence driver data
*
* @dev: device driver pointer
* @resources_ready: value set by driver at end of probe, once all resources are ready
* @hw_fence_table_entries: total number of hw-fences in the global table
* @hw_fence_mem_fences_table_size: hw-fences global table total size
* @hw_fence_queue_entries: total number of entries that can be available in the queue
* @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
* @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
* @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client
* @hw_fence_client_types: descriptors of properties for each hw fence client type
* @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree)
* @clients_num: number of supported hw fence clients (configured based on device-tree)
* @hw_fences_tbl: pointer to the hw-fences table
* @hw_fences_tbl_cnt: number of elements in the hw-fence table
* @events: start address of hw fence debug events
* @total_events: total number of hw fence debug events supported
* @client_lock_tbl: pointer to the per-client locks table
* @client_lock_tbl_cnt: number of elements in the locks table
* @hw_fences_mem_desc: memory descriptor for the hw-fence table
* @clients_locks_mem_desc: memory descriptor for the locks table
* @ctrl_queue_mem_desc: memory descriptor for the ctrl queues
* @ctrl_queues: pointer to the ctrl queues
* @io_mem_base: pointer to the carved-out io memory
* @res: resources for the carved out memory
* @size: size of the carved-out memory
* @label: label for the carved-out memory (this is used by SVM to find the memory)
* @peer_name: peer name for this carved-out memory
* @rm_nb: hyp resource manager notifier
* @memparcel: memparcel for the allocated memory
* @used_mem_size: total memory size of global table, lock region, and ctrl and client queues
* @db_label: doorbell label
* @rx_dbl: handle to the Rx doorbell
* @debugfs_data: debugfs info
* @ipcc_reg_base: base for ipcc regs mapping
* @ipcc_io_mem: base for the ipcc io mem map
* @ipcc_size: size of the ipcc io mem mapping
* @protocol_id: ipcc protocol id used by this driver
* @ipcc_client_vid: ipcc client virtual-id for this driver
* @ipcc_client_pid: ipcc client physical-id for this driver
* @ipc_clients_table: table with the ipcc mapping for each client of this driver
* @qtime_reg_base: qtimer register base address
* @qtime_io_mem: qtimer io mem map
* @qtime_size: qtimer io mem map size
* @client_id_mask: bitmask for tracking registered client_ids
* @clients_register_lock: lock to synchronize clients registration and deregistration
* @clients: table with the handles of the registered clients; size is equal to clients_num
* @vm_ready: flag to indicate if vm has been initialized
* @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized
*/
struct hw_fence_driver_data {
struct device *dev;
bool resources_ready;
/* Table & Queues info */
u32 hw_fence_table_entries;
u32 hw_fence_mem_fences_table_size;
u32 hw_fence_queue_entries;
/* ctrl queues */
u32 hw_fence_ctrl_queue_size;
u32 hw_fence_mem_ctrl_queues_size;
/* client queues */
struct hw_fence_client_queue_desc *hw_fence_client_queue_size;
struct hw_fence_client_type_desc *hw_fence_client_types;
u32 rxq_clients_num;
u32 clients_num;
/* HW Fences Table VA */
struct msm_hw_fence *hw_fences_tbl;
u32 hw_fences_tbl_cnt;
/* events */
struct msm_hw_fence_event *events;
u32 total_events;
/* Table with a Per-Client Lock */
u64 *client_lock_tbl;
u32 client_lock_tbl_cnt;
/* Memory Descriptors */
struct msm_hw_fence_mem_addr hw_fences_mem_desc;
struct msm_hw_fence_mem_addr clients_locks_mem_desc;
struct msm_hw_fence_mem_addr ctrl_queue_mem_desc;
struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES];
/* carved out memory */
void __iomem *io_mem_base;
struct resource res;
size_t size;
u32 label;
u32 peer_name;
struct notifier_block rm_nb;
u32 memparcel;
u32 used_mem_size;
/* doorbell */
u32 db_label;
/* VM virq */
void *rx_dbl;
/* debugfs */
struct msm_hw_fence_dbg_data debugfs_data;
/* ipcc regs */
phys_addr_t ipcc_reg_base;
void __iomem *ipcc_io_mem;
uint32_t ipcc_size;
u32 protocol_id;
u32 ipcc_client_vid;
u32 ipcc_client_pid;
/* table with mapping of ipc client for each hw-fence client */
struct hw_fence_client_ipc_map *ipc_clients_table;
/* qtime reg */
phys_addr_t qtime_reg_base;
void __iomem *qtime_io_mem;
uint32_t qtime_size;
/* synchronize client_ids registration and deregistration */
struct mutex clients_register_lock;
/* table with registered client handles */
struct msm_hw_fence_client **clients;
bool vm_ready;
/* state variables */
bool ipcc_dpu_initialized;
};
/**
* struct msm_hw_fence_queue_payload - hardware fence clients queues payload.
* @size: size of queue payload
* @type: type of queue payload
* @version: version of queue payload. High eight bits are for major and lower eight
* bits are for minor version
* @ctxt_id: context id of the dma fence
* @seqno: sequence number of the dma fence
* @hash: fence hash
* @flags: see MSM_HW_FENCE_FLAG_* flags descriptions
* @client_data: data passed from and returned to waiting client upon fence signaling
* @error: error code for this fence, fence controller receives this
* error from the signaling client through the tx queue and
* propagates the error to the waiting client through rx queue
* @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue
* @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue
*/
struct msm_hw_fence_queue_payload {
u32 size;
u16 type;
u16 version;
u64 ctxt_id;
u64 seqno;
u64 hash;
u64 flags;
u64 client_data;
u32 error;
u32 timestamp_lo;
u32 timestamp_hi;
u32 reserve;
};
/**
* struct msm_hw_fence_event - hardware fence ctl debug event
* time: qtime when the event is logged
* cpu: cpu id where the event is logged
* data_cnt: count of valid data available in the data field
* data: debug data logged by the event
*/
struct msm_hw_fence_event {
u64 time;
u32 cpu;
u32 data_cnt;
u32 data[HW_FENCE_EVENT_MAX_DATA];
};
/**
* struct msm_hw_fence - structure holding each hw fence data.
* @valid: field updated when a hw-fence is reserved. True if hw-fence is in use
* @error: field to hold a hw-fence error
* @ctx_id: context id
* @seq_id: sequence id
* @wait_client_mask: bitmask holding the waiting-clients of the fence
* @fence_allocator: field to indicate the client_id that reserved the fence
* @fence_signal-client:
* @lock: this field is required to share information between the Driver & Driver ||
* Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock.
* @flags: field to indicate the state of the fence
* @parent_list: list of indexes with the parents for a child-fence in a join-fence
* @parent_cnt: total number of parents for a child-fence in a join-fence
* @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic
* or locked
* @fence_create_time: debug info with the create time timestamp
* @fence_trigger_time: debug info with the trigger time timestamp
* @fence_wait_time: debug info with the register-for-wait timestamp
* @debug_refcount: refcount used for debugging
* @client_data: array of data optionally passed from and returned to clients waiting on the fence
* during fence signaling
*/
struct msm_hw_fence {
u32 valid;
u32 error;
u64 ctx_id;
u64 seq_id;
u64 wait_client_mask;
u32 fence_allocator;
u32 fence_signal_client;
u64 lock; /* Datatype must be 64-bit. */
u64 flags;
u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS];
u32 parents_cnt;
u32 pending_child_cnt;
u64 fence_create_time;
u64 fence_trigger_time;
u64 fence_wait_time;
u64 debug_refcount;
u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA];
};
int hw_fence_init(struct hw_fence_driver_data *drv_data);
int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
struct msm_hw_fence_mem_addr *mem_descriptor);
int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client);
void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
int hw_fence_create(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
u64 context, u64 seqno, u64 *hash);
int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
u64 context, u64 seqno);
int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hash);
int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data);
int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash,
u64 client_data);
int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
u64 flags, u64 client_data, u32 error, int queue_type);
int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error);
inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data);
int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
struct msm_hw_fence_queue_payload *payload, int queue_type);
int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue,
struct msm_hw_fence_queue_payload *payload);
int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
u64 seqno, u64 *hash, u64 client_data);
struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
u64 context, u64 seqno, u64 *hash);
enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id);
#endif /* __HW_FENCE_DRV_INTERNAL_H */

View File

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_UTILS_H
#define __HW_FENCE_DRV_UTILS_H
/**
* HW_FENCE_MAX_CLIENT_TYPE_STATIC:
* Total number of client types without configurable number of sub-clients (GFX, DPU, VAL)
*/
#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 3
/**
* HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE:
* Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE)
*/
#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 10
/**
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX:
* Maximum number of static clients, i.e. clients without configurable numbers of sub-clients
*/
#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IPE
/**
* enum hw_fence_mem_reserve - Types of reservations for the carved-out memory.
* HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues.
* HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region.
* HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table.
* HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues.
* HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events
*/
enum hw_fence_mem_reserve {
HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
HW_FENCE_MEM_RESERVE_LOCKS_REGION,
HW_FENCE_MEM_RESERVE_TABLE,
HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
HW_FENCE_MEM_RESERVE_EVENTS_BUFF
};
/**
* global_atomic_store() - Inter-processor lock
* @drv_data: hw fence driver data
* @lock: memory to lock
* @val: if true, api locks the memory, if false it unlocks the memory
*/
void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val);
/**
* hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_process_doorbell_mask() - Sends doorbell mask to process the signaled clients
* this API is only exported for simulation purposes.
* @drv_data: hw fence driver data.
* @db_flags: doorbell flag
*/
void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags);
/**
* hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW
* Fence global table, locks and queues.
* @hw_fence_drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data);
/**
* hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool.
* @drv_data: hw fence driver data.
* @type: memory reservation type.
* @phys: physical address of the carved-out memory pool
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id);
/**
* hw_fence_utils_parse_dt_props() - Init dt properties
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_ipcc() - Maps IPCC registers and enable signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_qtime() - Maps qtime register
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client
* @drv_data: hw fence driver data
* @hw_fence_client: client, for which the fence must be cleared
* @hw_fence: hw-fence to cleanup
* @hash: hash of the hw-fence to cleanup
* @reset_flags: flags to determine how to handle the reset
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
u32 reset_flags);
/**
* hw_fence_utils_fence_error_cb() - Invokes fence error callback registered by specified client
*
* @hw_fence_client: client, for which fence error callback must be invoked
* @ctxt_id: context id of the hw-fence
* @seqno: sequence number of the hw-fence
* @hash: hash of the hw-fence
* @flags: flags of the hw-fence
* @error: error of the hw-fence
*
* Returns zero if success, otherwise returns negative error code
*/
int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id,
u64 seqno, u64 hash, u64 flags, u32 error);
/**
* hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver
* from the client_id used externally
*
* Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX,
* otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if
* provided with client IDs for such clients when support for those clients is not configured in
* device-tree.
*
* @drv_data: hw fence driver data
* @client_id: external client_id to get internal client_id for
*
* Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX
*/
enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
enum hw_fence_client_id client_id);
/**
* hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id.
*
* @drv_data: driver data
* @client_id: hw fence driver client id
*
* Returns: number of client queues
*/
int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id);
#endif /* __HW_FENCE_DRV_UTILS_H */

View File

@ -0,0 +1,222 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __MSM_HW_FENCE_SYNX_TRANSLATION_H
#define __MSM_HW_FENCE_SYNX_TRANSLATION_H
#include <synx_api.h>
extern bool hw_fence_driver_enable;
#ifndef SYNX_HW_FENCE_CLIENT_START
#define SYNX_HW_FENCE_CLIENT_START 1024
#define SYNX_HW_FENCE_CLIENT_END 4096
#define SYNX_MAX_SIGNAL_PER_CLIENT 64
/**
* enum synx_client_id : Unique identifier of the supported clients
*
* @SYNX_CLIENT_HW_FENCE_GFX_CTX0 : HW Fence GFX Client 0
* @SYNX_CLIENT_HW_FENCE_IPE_CTX0 : HW Fence IPE Client 0
* @SYNX_CLIENT_HW_FENCE_VID_CTX0 : HW Fence Video Client 0
* @SYNX_CLIENT_HW_FENCE_DPU0_CTL0 : HW Fence DPU0 Client 0
* @SYNX_CLIENT_HW_FENCE_DPU1_CTL0 : HW Fence DPU1 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE0_CTX0 : HW Fence IFE0 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE1_CTX0 : HW Fence IFE1 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE2_CTX0 : HW Fence IFE2 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE3_CTX0 : HW Fence IFE3 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE4_CTX0 : HW Fence IFE4 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE5_CTX0 : HW Fence IFE5 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE6_CTX0 : HW Fence IFE6 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE7_CTX0 : HW Fence IFE7 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE8_CTX0 : HW Fence IFE8 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE9_CTX0 : HW Fence IFE9 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE10_CTX0 : HW Fence IFE10 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE11_CTX0 : HW Fence IFE11 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE12_CTX0 : HW Fence IFE12 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE13_CTX0 : HW Fence IFE13 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE14_CTX0 : HW Fence IFE14 Client 0
* @SYNX_CLIENT_HW_FENCE_IFE15_CTX0 : HW Fence IFE15 Client 0
*/
enum synx_hwfence_client_id {
SYNX_CLIENT_HW_FENCE_GFX_CTX0 = SYNX_HW_FENCE_CLIENT_START,
SYNX_CLIENT_HW_FENCE_IPE_CTX0 = SYNX_CLIENT_HW_FENCE_GFX_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_VID_CTX0 = SYNX_CLIENT_HW_FENCE_IPE_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_DPU0_CTL0 = SYNX_CLIENT_HW_FENCE_VID_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_DPU1_CTL0 = SYNX_CLIENT_HW_FENCE_DPU0_CTL0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE0_CTX0 = SYNX_CLIENT_HW_FENCE_DPU1_CTL0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE1_CTX0 = SYNX_CLIENT_HW_FENCE_IFE0_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE2_CTX0 = SYNX_CLIENT_HW_FENCE_IFE1_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE3_CTX0 = SYNX_CLIENT_HW_FENCE_IFE2_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE4_CTX0 = SYNX_CLIENT_HW_FENCE_IFE3_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE5_CTX0 = SYNX_CLIENT_HW_FENCE_IFE4_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE6_CTX0 = SYNX_CLIENT_HW_FENCE_IFE5_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE7_CTX0 = SYNX_CLIENT_HW_FENCE_IFE6_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE8_CTX0 = SYNX_CLIENT_HW_FENCE_IFE7_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE9_CTX0 = SYNX_CLIENT_HW_FENCE_IFE8_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE10_CTX0 = SYNX_CLIENT_HW_FENCE_IFE9_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE11_CTX0 = SYNX_CLIENT_HW_FENCE_IFE10_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE12_CTX0 = SYNX_CLIENT_HW_FENCE_IFE11_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE13_CTX0 = SYNX_CLIENT_HW_FENCE_IFE12_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE14_CTX0 = SYNX_CLIENT_HW_FENCE_IFE13_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_IFE15_CTX0 = SYNX_CLIENT_HW_FENCE_IFE14_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT,
SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END,
};
#endif
#if IS_ENABLED(CONFIG_QTI_HW_FENCE)
/**
* synx_hwfence_initialize - Initializes a new client session
*
* @param params : Pointer to session init params
*
* @return Client session pointer on success. NULL or error in case of failure.
*/
struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params);
/**
* synx_hwfence_uninitialize - Destroys the client session
*
* @param session : Session ptr (returned from synx_initialize)
*
* @return Status of operation. SYNX_SUCCESS in case of success.
*/
int synx_hwfence_uninitialize(struct synx_session *session);
/**
* synx_hwfence_create - Creates a synx object
*
* Creates a new synx obj and returns the handle to client.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Pointer to create params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if params were invalid.
* -SYNX_NOMEM will be returned if the kernel can't allocate space for
* synx object.
*/
int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params);
/**
* synx_hwfence_release - Release the synx object
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle to be destroyed
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_hwfence_release(struct synx_session *session, u32 h_synx);
/**
* synx_hwfence_signal - Signals a synx object with the status argument.
*
* This function will signal the synx object referenced by h_synx
* and invoke any external binding synx objs.
* The status parameter will indicate whether the entity
* performing the signaling wants to convey an error case or a success case.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
* @param status : Status of signaling.
* Clients can send custom signaling status
* beyond SYNX_STATE_SIGNALED_MAX.
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status);
/**
* synx_hwfence_recover - Recover any possible handle leaks
*
* Function should be called on HW hang/reset to
* recover the Synx handles shared. This cleans up
* Synx handles held by the rest HW, and avoids
* potential resource leaks.
*
* Function does not destroy the session, but only
* recover synx handles belonging to the session.
* Synx session would still be active and clients
* need to destroy the session explicitly through
* synx_uninitialize API.
*
* @param id : Client ID of core to recover
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_hwfence_recover(enum synx_client_id id);
/**
* synx_hwfence_import - Imports (looks up) synx object from given handle/fence
*
* Import subscribes the client session for notification on signal
* of handles/fences.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Pointer to import params
*
* @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state
*/
int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params);
#else /* CONFIG_QTI_HW_FENCE */
static inline struct synx_session *synx_hwfence_initialize(
struct synx_initialization_params *params)
{
return ERR_PTR(-SYNX_INVALID);
}
static inline int synx_hwfence_uninitialize(struct synx_session *session)
{
return -SYNX_INVALID;
}
static inline int synx_hwfence_create(struct synx_session *session,
struct synx_create_params *params)
{
return -SYNX_INVALID;
}
static inline int synx_hwfence_release(struct synx_session *session, u32 h_synx)
{
return -SYNX_INVALID;
}
static inline int synx_hwfence_signal(struct synx_session *session, u32 h_synx,
enum synx_signal_status status)
{
return -SYNX_INVALID;
}
static inline int synx_hwfence_recover(enum synx_client_id id)
{
return -SYNX_INVALID;
}
static inline int synx_hwfence_import(struct synx_session *session,
struct synx_import_params *params)
{
return -SYNX_INVALID;
}
#endif /* CONFIG_QTI_HW_FENCE */
#endif /* __MSM_HW_FENCE_SYNX_TRANSLATION_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,428 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of_platform.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
/*
* Max size of base table with ipc mappings, with one mapping per client type with configurable
* number of subclients
*/
#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \
HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
/**
* struct hw_fence_client_ipc_map - map client id with ipc signal for trigger.
* @ipc_client_id_virt: virtual ipc client id for the hw-fence client.
* @ipc_client_id_phys: physical ipc client id for the hw-fence client.
* @ipc_signal_id: ipc signal id for the hw-fence client.
* @update_rxq: bool to indicate if clinet uses rx-queue.
* @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences
*/
struct hw_fence_client_ipc_map {
int ipc_client_id_virt;
int ipc_client_id_phys;
int ipc_signal_id;
bool update_rxq;
bool send_ipc;
};
/**
* struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for targets that support dpu client id.
*
* Note that the index of this struct must match the enum hw_fence_client_id
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true},/* ctl1 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true},/* ctl2 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true},/* ctl3 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/
#else
{0, 0, 0, false, false}, /* val0 */
{0, 0, 0, false, false}, /* val1 */
{0, 0, 0, false, false}, /* val2 */
{0, 0, 0, false, false}, /* val3 */
{0, 0, 0, false, false}, /* val4 */
{0, 0, 0, false, false}, /* val5 */
{0, 0, 0, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true}, /* ipe */
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true}, /* vpu */
};
/**
* struct hw_fence_clients_ipc_map_v2 - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for targets that support dpu client id and IPC v2.
*
* Note that the index of this struct must match the enum hw_fence_client_id for clients ids less
* than HW_FENCE_MAX_STATIC_CLIENTS_INDEX.
* For clients with configurable sub-clients, the index of this struct matches
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC).
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true},/* ctl1 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true},/* ctl2 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true},/* ctl3 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/
#else
{0, 0, 0, false, false}, /* val0 */
{0, 0, 0, false, false}, /* val1 */
{0, 0, 0, false, false}, /* val2 */
{0, 0, 0, false, false}, /* val3 */
{0, 0, 0, false, false}, /* val4 */
{0, 0, 0, false, false}, /* val5 */
{0, 0, 0, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true}, /* ipe */
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */
{HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/
{HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/
{HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/
{HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/
{HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/
{HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/
{HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/
{HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/
};
int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id_virt;
}
int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id_phys;
}
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_signal_id;
}
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->ipc_clients_table[client_id].update_rxq;
}
bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
return false;
return drv_data->ipc_clients_table[client_id].send_ipc;
}
/**
* _get_ipc_phys_client_name() - Returns ipc client name from its physical id, used for debugging.
*/
static inline char *_get_ipc_phys_client_name(u32 client_id)
{
switch (client_id) {
case HW_FENCE_IPC_CLIENT_ID_APPS_PID:
return "APPS_PID";
case HW_FENCE_IPC_CLIENT_ID_GPU_PID:
return "GPU_PID";
case HW_FENCE_IPC_CLIENT_ID_DPU_PID:
return "DPU_PID";
case HW_FENCE_IPC_CLIENT_ID_IPE_PID:
return "IPE_PID";
case HW_FENCE_IPC_CLIENT_ID_VPU_PID:
return "VPU_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE0_PID:
return "IFE0_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE1_PID:
return "IFE1_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE2_PID:
return "IFE2_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE3_PID:
return "IFE3_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE4_PID:
return "IFE4_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE5_PID:
return "IFE5_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE6_PID:
return "IFE6_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE7_PID:
return "IFE7_PID";
}
return "UNKNOWN_PID";
}
/**
* _get_ipc_virt_client_name() - Returns ipc client name from its virtual id, used for debugging.
*/
static inline char *_get_ipc_virt_client_name(u32 client_id)
{
switch (client_id) {
case HW_FENCE_IPC_CLIENT_ID_APPS_VID:
return "APPS_VID";
case HW_FENCE_IPC_CLIENT_ID_GPU_VID:
return "GPU_VID";
case HW_FENCE_IPC_CLIENT_ID_DPU_VID:
return "DPU_VID";
case HW_FENCE_IPC_CLIENT_ID_IPE_VID:
return "IPE_VID";
case HW_FENCE_IPC_CLIENT_ID_VPU_VID:
return "VPU_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE0_VID:
return "IFE0_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE1_VID:
return "IFE1_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE2_VID:
return "IFE2_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE3_VID:
return "IFE3_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE4_VID:
return "IFE4_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE5_VID:
return "IFE5_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE6_VID:
return "IFE6_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE7_VID:
return "IFE7_VID";
}
return "UNKNOWN_VID";
}
void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
u32 tx_client_pid, u32 rx_client_vid, u32 signal_id)
{
void __iomem *ptr;
u32 val;
/* Send signal */
ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id,
tx_client_pid);
val = (rx_client_vid << 16) | signal_id;
HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n",
_get_ipc_phys_client_name(tx_client_pid), tx_client_pid,
_get_ipc_virt_client_name(rx_client_vid), rx_client_vid,
signal_id, val, ptr);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
/* Make sure value is written */
wmb();
}
static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data,
struct hw_fence_client_ipc_map *base_table)
{
int i, j, map_idx;
size_t size;
size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map);
drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL);
if (!drv_data->ipc_clients_table)
return -ENOMEM;
/* copy mappings for static hw fence clients */
size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map);
memcpy(drv_data->ipc_clients_table, base_table, size);
/* initialize mappings for ipc clients with configurable number of hw fence clients */
map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX;
for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) {
int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i;
int clients_num = drv_data->hw_fence_client_types[client_type].clients_num;
for (j = 0; j < clients_num; j++) {
/* this should never happen if drv_data->clients_num is correct */
if (map_idx >= drv_data->clients_num) {
HWFNC_ERR("%s clients_num:%lu exceeds drv_data->clients_num:%lu\n",
drv_data->hw_fence_client_types[client_type].name,
clients_num, drv_data->clients_num);
return -EINVAL;
}
drv_data->ipc_clients_table[map_idx] =
base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i];
drv_data->ipc_clients_table[map_idx].ipc_signal_id = j;
map_idx++;
}
}
return 0;
}
/**
* _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data,
* according to the ipcc hw revision.
* @drv_data: driver data.
* @hwrev: ipcc hw revision.
*/
static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev)
{
int ret = 0;
switch (hwrev) {
case HW_FENCE_IPCC_HW_REV_170:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA;
drv_data->ipc_clients_table = hw_fence_clients_ipc_map;
HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n");
break;
case HW_FENCE_IPCC_HW_REV_203:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID;
drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */
ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data,
hw_fence_clients_ipc_map_v2);
HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n");
break;
default:
return -1;
}
return ret;
}
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
{
void __iomem *ptr;
u32 val;
int ret;
HWFNC_DBG_H("enable ipc +\n");
ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val);
if (ret || !val) {
HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val);
return -EINVAL;
}
if (_hw_fence_ipcc_hwrev_init(drv_data, val)) {
HWFNC_ERR("ipcc protocol id not supported\n");
return -EINVAL;
}
/* Enable compute l1 (protocol_id = 2) */
val = 0x00000000;
ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id,
drv_data->ipcc_client_pid);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
/* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */
val = 0x000080000;
ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id,
drv_data->ipcc_client_pid);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
HWFNC_DBG_H("enable ipc -\n");
return 0;
}
int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data)
{
struct hw_fence_client_ipc_map *hw_fence_client;
bool protocol_enabled = false;
void __iomem *ptr;
u32 val;
int i;
HWFNC_DBG_H("enable dpu ipc +\n");
if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table) {
HWFNC_ERR("invalid drv data\n");
return -1;
}
HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem);
HWFNC_DBG_H("Initialize dpu signals\n");
/* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */
for (i = 0; i < drv_data->clients_num; i++) {
hw_fence_client = &drv_data->ipc_clients_table[i];
/* skip any client that is not a dpu client */
if (hw_fence_client->ipc_client_id_virt != HW_FENCE_IPC_CLIENT_ID_DPU_VID)
continue;
if (!protocol_enabled) {
/*
* First DPU client will enable the protocol for dpu, e.g. compute l1
* (protocol_id = 2) or fencing protocol, depending on the target, for the
* dpu client (vid = 25, pid = 9).
* Sets bit(1) to clear when RECV_ID is read
*/
val = 0x00000001;
ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem,
drv_data->protocol_id, hw_fence_client->ipc_client_id_phys);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr);
writel_relaxed(val, ptr);
protocol_enabled = true;
}
/* Enable signals for dpu client */
HWFNC_DBG_H("dpu client:%d vid:%d pid:%d signal:%d\n", i,
hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys,
hw_fence_client->ipc_signal_id);
/* Enable input apps-signal for dpu */
val = (HW_FENCE_IPC_CLIENT_ID_APPS_VID << 16) |
(hw_fence_client->ipc_signal_id & 0xFFFF);
ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem,
drv_data->protocol_id, hw_fence_client->ipc_client_id_phys);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr);
writel_relaxed(val, ptr);
}
HWFNC_DBG_H("enable dpu ipc -\n");
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,629 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/ktime.h>
#include <linux/types.h>
#include <linux/sync_file.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
#define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls)
#define HW_FENCE_ARRAY_SIZE 10
#define HW_SYNC_IOC_MAGIC 'W'
#define HW_SYNC_IOC_REG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long)
#define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long)
#define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\
struct hw_fence_sync_create_data)
#define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\
struct hw_fence_array_sync_create_data)
#define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int)
#define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long)
#define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int)
#define HW_SYNC_IOC_RESET_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long)
#define HW_FENCE_IOCTL_NR(n) (_IOC_NR(n) - 2)
#define HW_IOCTL_DEF(ioctl, _func) \
[HW_FENCE_IOCTL_NR(ioctl)] = { \
.cmd = ioctl, \
.func = _func, \
.name = #ioctl \
}
#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
/**
* struct hw_sync_obj - per client hw sync object.
* @context: context id used to create fences.
* @client_id: to uniquely represent client.
* @client_handle: Pointer to the structure holding the resources
* allocated to the client.
* @mem_descriptor: Memory descriptor of the queue allocated by the
* hardware fence driver for each client during register.
*/
struct hw_sync_obj {
u64 context;
int client_id;
void *client_handle;
struct msm_hw_fence_mem_addr mem_descriptor;
};
/**
* struct hw_fence_sync_create_data - data used in creating fences.
* @seqno: sequence number.
* @incr_context: if set, then the context would be incremented.
* @fence: returns the fd of the new sync_file with the created fence.
* @hash: fence hash
*/
struct hw_fence_sync_create_data {
u64 seqno;
bool incr_context;
__s32 fence;
u64 hash;
};
/**
* struct hw_fence_array_sync_create_data - data used in creating multiple fences.
* @seqno: sequence number used to create fence array.
* @num_fences: number of fence fds received.
* @fences: array of fence fds.
* @fence_array_fd: fd of fence array.
*/
struct hw_fence_array_sync_create_data {
u64 seqno;
int num_fences;
u64 fences[HW_FENCE_ARRAY_SIZE];
__s32 fence_array_fd;
};
/**
* struct hw_fence_sync_signal_data - data used to signal fences.
* @hash: hash of the fence.
* @error_flag: error flag
*/
struct hw_fence_sync_signal_data {
u64 hash;
u32 error_flag;
};
/**
* struct hw_fence_sync_wait_data - data used to wait on fences.
* @fence: fence fd.
* @timeout_ms: fence wait time out.
*/
struct hw_fence_sync_wait_data {
__s32 fence;
u64 timeout_ms;
};
/**
* struct hw_fence_sync_reset_data - data used to reset client.
* @client_id: client id.
* @reset_flag: reset flag
*/
struct hw_fence_sync_reset_data {
int client_id;
u32 reset_flag;
};
typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg);
/**
* struct hw_sync_ioctl_def - hw_sync driver ioctl entry
* @cmd: ioctl command number, without flags
* @func: handler for this ioctl
* @name: user-readable name for debug output
*/
struct hw_sync_ioctl_def {
unsigned int cmd;
hw_fence_ioctl_t *func;
const char *name;
};
static bool _is_valid_client(struct hw_sync_obj *obj)
{
if (!obj)
return false;
if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
return false;
}
return true;
}
static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id;
if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id)))
return -EFAULT;
if (!obj)
return -EINVAL;
if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
return -EINVAL;
}
return client_id;
}
static void *_hw_sync_get_fence(int fd)
{
return fd >= 0 ? sync_file_get_fence(fd) : NULL;
}
static int hw_sync_debugfs_open(struct inode *inode, struct file *file)
{
struct hw_sync_obj *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return -ENOMEM;
obj->context = dma_fence_context_alloc(1);
file->private_data = obj;
return 0;
}
static int hw_sync_debugfs_release(struct inode *inode, struct file *file)
{
struct hw_sync_obj *obj = file->private_data;
if (!obj)
return -EINVAL;
kfree(obj);
return 0;
}
static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id = _get_client_id(obj, arg);
if (IS_ERR(&client_id)) {
return client_id;
} else if (obj->client_handle) {
HWFNC_ERR("client:%d already registered as validation client\n", client_id);
return -EINVAL;
}
obj->client_id = client_id;
obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor);
if (IS_ERR_OR_NULL(obj->client_handle))
return -EINVAL;
return 0;
}
static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id = _get_client_id(obj, arg);
if (IS_ERR(&client_id)) {
return client_id;
} else if (client_id != obj->client_id) {
HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n",
obj->client_id, client_id);
return -EINVAL;
}
return msm_hw_fence_deregister(obj->client_handle);
}
static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_create_params params;
struct hw_fence_sync_create_data data;
struct hw_dma_fence *fence;
spinlock_t *fence_lock;
u64 hash;
struct sync_file *sync_file;
int fd, ret;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
/* create dma fence */
fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
if (!fence_lock)
return -ENOMEM;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence) {
kfree(fence_lock);
return -ENOMEM;
}
snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu",
obj->client_id, obj->context, data.seqno);
spin_lock_init(fence_lock);
dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, obj->context, data.seqno);
HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", obj->client_id,
obj->context, data.seqno);
params.fence = &fence->base;
params.handle = &hash;
/* create hw fence */
ret = msm_hw_fence_create(obj->client_handle, &params);
if (ret) {
HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n",
obj->client_id, obj->context, data.seqno);
dma_fence_put(&fence->base);
return -EINVAL;
}
/* keep handle in dma_fence, to destroy hw-fence during release */
fence->client_handle = obj->client_handle;
if (data.incr_context)
obj->context = dma_fence_context_alloc(1);
/* create fd */
fd = get_unused_fd_flags(0);
if (fd < 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence->base);
return fd;
}
sync_file = sync_file_create(&fence->base);
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence->base);
ret = -EINVAL;
goto exit;
}
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence->base);
data.fence = fd;
data.hash = hash;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
dma_fence_put(&fence->base);
fput(sync_file->file);
ret = -EFAULT;
goto exit;
}
fd_install(fd, sync_file->file);
return 0;
exit:
put_unused_fd(fd);
return ret;
}
static void _put_child_fences(int i, struct dma_fence **fences)
{
int fence_idx;
for (fence_idx = i; fence_idx >= 0 ; fence_idx--)
dma_fence_put(fences[i]);
}
static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence_array *fence_array;
struct hw_fence_array_sync_create_data data;
struct dma_fence **fences = NULL;
struct sync_file *sync_file;
int num_fences, i, fd, ret;
struct hw_dma_fence *fence;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
num_fences = data.num_fences;
if (num_fences > HW_FENCE_ARRAY_SIZE) {
HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n",
num_fences, HW_FENCE_ARRAY_SIZE);
return -EINVAL;
}
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences) {
return -ENOMEM;
}
for (i = 0; i < num_fences; i++) {
fd = data.fences[i];
if (fd <= 0) {
kfree(fences);
return -EINVAL;
}
fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
_put_child_fences(i-1, fences);
kfree(fences);
return -EINVAL;
}
fences[i] = &fence->base;
}
/* create the fence array from array of dma fences */
fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0);
if (!fence_array) {
HWFNC_ERR("Error creating fence_array\n");
/* decrease the refcount incremented for each child fences */
for (i = 0; i < num_fences; i++)
dma_fence_put(fences[i]);
kfree(fences);
return -EINVAL;
}
/* create fd */
fd = get_unused_fd_flags(0);
if (fd <= 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence_array->base);
return fd;
}
sync_file = sync_file_create(&fence_array->base);
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence_array->base);
kfree(fence_array);
ret = -EINVAL;
goto exit;
}
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence_array->base);
data.fence_array_fd = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
fput(sync_file->file);
dma_fence_put(&fence_array->base);
ret = -EFAULT;
goto exit;
}
fd_install(fd, sync_file->file);
return 0;
exit:
put_unused_fd(fd);
return ret;
}
/*
* this IOCTL only supports receiving one fence as input-parameter, which can be
* either a "dma_fence" or a "dma_fence_array", but eventually we would expand
* this API to receive more fences
*/
static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence *fence;
int ret, fd, num_fences = 1;
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
return -EFAULT;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
ret = msm_hw_fence_wait_update_v2(obj->client_handle, &fence, NULL, NULL, num_fences, 1);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return ret;
}
static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_client *hw_fence_client;
struct hw_fence_sync_signal_data data;
int ret, tx_client, rx_client, signal_id;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
if (!hw_fence_client) {
HWFNC_ERR("invalid client handle\n");
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag);
if (ret) {
HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id);
return ret;
}
signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id;
if (signal_id < 0)
return -EINVAL;
tx_client = hw_fence_client->ipc_client_pid;
rx_client = hw_fence_client->ipc_client_vid;
ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id);
if (ret) {
HWFNC_ERR("hw fence trigger signal has failed\n");
return ret;
}
return 0;
}
static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence_queue_payload payload;
struct hw_fence_sync_wait_data data;
struct dma_fence *fence;
ktime_t cur_ktime, exp_ktime;
int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
fd = data.fence;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
if (!hw_fence_client) {
HWFNC_ERR("invalid client handle for fd:%d\n", fd);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return -EINVAL;
}
exp_ktime = ktime_add_ms(ktime_get(), data.timeout_ms);
do {
ret = wait_event_timeout(hw_fence_client->wait_queue,
atomic_read(&hw_fence_client->val_signal) > 0,
msecs_to_jiffies(data.timeout_ms));
cur_ktime = ktime_get();
} while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) &&
ktime_compare_safe(exp_ktime, cur_ktime) > 0);
if (!ret) {
HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return -ETIMEDOUT;
}
/* clear doorbell signal flag */
atomic_set(&hw_fence_client->val_signal, 0);
while (read) {
read = hw_fence_read_queue(obj->client_handle, &payload, queue_type);
if (read < 0) {
HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id);
break;
}
HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%lu\n",
payload.hash, payload.flags, payload.error);
if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) {
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return 0;
}
}
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
HWFNC_ERR("fence received did not match the fence expected\n");
HWFNC_ERR("fence received: context:%d seqno:%d fence expected: context:%d seqno:%d\n",
payload.ctxt_id, payload.seqno, fence->context, fence->seqno);
return read;
}
static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg)
{
int ret;
struct hw_fence_sync_reset_data data;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag);
if (ret) {
HWFNC_ERR("hw fence reset client has failed\n");
return ret;
}
return 0;
}
static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = {
HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client),
HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client),
HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence),
HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array),
HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait),
HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal),
HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait),
HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client)
};
static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hw_sync_obj *obj = file->private_data;
int num = HW_FENCE_IOCTL_NR(cmd);
hw_fence_ioctl_t *func;
if (num >= HW_SYNC_IOCTL_COUNT) {
HWFNC_ERR("invalid ioctl num = %d\n", num);
return -EINVAL;
}
func = (&hw_sync_debugfs_ioctls[num])->func;
if (unlikely(!func)) {
HWFNC_ERR("no function num = %d\n", num);
return -ENOTTY;
}
return func(obj, arg);
}
const struct file_operations hw_sync_debugfs_fops = {
.open = hw_sync_debugfs_open,
.release = hw_sync_debugfs_release,
.unlocked_ioctl = hw_sync_debugfs_ioctl,
};

View File

@ -0,0 +1,807 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_debug.h"
#include "hw_fence_drv_ipc.h"
struct hw_fence_driver_data *hw_fence_drv_data;
bool hw_fence_driver_enable;
void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
struct msm_hw_fence_mem_addr *mem_descriptor)
{
struct msm_hw_fence_client *hw_fence_client;
enum hw_fence_client_id client_id;
int ret;
if (!hw_fence_driver_enable)
return ERR_PTR(-ENODEV);
HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext);
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return ERR_PTR(-EAGAIN);
}
if (!mem_descriptor || client_id_ext >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: %d client_id_ext:%d\n",
!mem_descriptor, client_id_ext);
return ERR_PTR(-EINVAL);
}
client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n",
client_id, client_id_ext);
return ERR_PTR(-EINVAL);
}
/* Alloc client handle */
hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL);
if (!hw_fence_client)
return ERR_PTR(-ENOMEM);
/* Avoid race condition if multiple-threads request same client at same time */
mutex_lock(&hw_fence_drv_data->clients_register_lock);
if (hw_fence_drv_data->clients[client_id]) {
HWFNC_ERR("client with id %d already registered\n", client_id);
mutex_unlock(&hw_fence_drv_data->clients_register_lock);
kfree(hw_fence_client);
return ERR_PTR(-EINVAL);
}
/* Mark client as registered */
hw_fence_drv_data->clients[client_id] = hw_fence_client;
mutex_unlock(&hw_fence_drv_data->clients_register_lock);
hw_fence_client->client_id = client_id;
hw_fence_client->client_id_ext = client_id_ext;
hw_fence_client->ipc_client_vid =
hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id);
hw_fence_client->ipc_client_pid =
hw_fence_ipcc_get_client_phys_id(hw_fence_drv_data, client_id);
if (hw_fence_client->ipc_client_vid <= 0 || hw_fence_client->ipc_client_pid <= 0) {
HWFNC_ERR("Failed to find client:%d ipc vid:%d pid:%d\n", client_id,
hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid);
ret = -EINVAL;
goto error;
}
hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id);
if (hw_fence_client->ipc_signal_id < 0) {
HWFNC_ERR("Failed to find client:%d signal\n", client_id);
ret = -EINVAL;
goto error;
}
hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id);
if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq &&
hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) {
HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id,
hw_fence_client->queues_num,
hw_fence_client->update_rxq ? "true" : "false");
ret = -EINVAL;
goto error;
}
/* Alloc Client HFI Headers and Queues */
ret = hw_fence_alloc_client_resources(hw_fence_drv_data,
hw_fence_client, mem_descriptor);
if (ret)
goto error;
/* Initialize signal for communication with FenceCTL */
ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client);
if (ret)
goto error;
/*
* Update Fence Controller with the address of the Queues and
* the Fences Tables for this client
*/
ret = hw_fence_init_controller_resources(hw_fence_client);
if (ret)
goto error;
mutex_init(&hw_fence_client->error_cb_lock);
HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n",
hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num,
hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid,
hw_fence_client->ipc_client_pid);
#if IS_ENABLED(CONFIG_DEBUG_FS)
init_waitqueue_head(&hw_fence_client->wait_queue);
#endif /* CONFIG_DEBUG_FS */
return (void *)hw_fence_client;
error:
/* Free all the allocated resources */
hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
HWFNC_ERR("failed with error:%d\n", ret);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(msm_hw_fence_register);
int msm_hw_fence_deregister(void *client_handle)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client handle\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Free all the allocated resources */
hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_deregister);
int msm_hw_fence_create(void *client_handle,
struct msm_hw_fence_create_params *params)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
struct dma_fence *fence;
int ret;
if (IS_ERR_OR_NULL(client_handle) || !params || !params->fence || !params->handle) {
HWFNC_ERR("Invalid input\n");
return -EINVAL;
}
if (!hw_fence_drv_data->vm_ready) {
HWFNC_DBG_H("VM not ready, cannot create fence\n");
return -EAGAIN;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
fence = (struct dma_fence *)params->fence;
HWFNC_DBG_H("+\n");
/* Block any Fence-Array, we should only get individual fences */
array = to_dma_fence_array(fence);
if (array) {
HWFNC_ERR("HW Fence must be created for individual fences\n");
return -EINVAL;
}
/* This Fence is already a HW-Fence */
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence already has HW Fence Flag set\n");
return -EINVAL;
}
/* Create the HW Fence, i.e. add entry in the Global Table for this Fence */
ret = hw_fence_create(hw_fence_drv_data, hw_fence_client,
fence->context, fence->seqno, params->handle);
if (ret) {
HWFNC_ERR("Error creating HW fence\n");
return ret;
}
/* If no error, set the HW Fence Flag in the dma-fence */
set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_create);
int msm_hw_fence_destroy(void *client_handle,
struct dma_fence *fence)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
int ret;
if (IS_ERR_OR_NULL(client_handle) || !fence) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("+\n");
/* Block any Fence-Array, we should only get individual fences */
array = to_dma_fence_array(fence);
if (array) {
HWFNC_ERR("HW Fence must be destroy for individual fences\n");
return -EINVAL;
}
/* This Fence not a HW-Fence */
if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%llx\n", fence->flags);
return -EINVAL;
}
/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client,
fence->context, fence->seqno);
if (ret) {
HWFNC_ERR("Error destroying the HW fence\n");
return ret;
}
/* Clear the HW Fence Flag in the dma-fence */
clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_destroy);
int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, handle);
if (ret) {
HWFNC_ERR("Error destroying the HW fence handle:%llu client_id:%d\n", handle,
hw_fence_client->client_id);
return ret;
}
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_destroy_with_handle);
int msm_hw_fence_wait_update_v2(void *client_handle,
struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences,
bool create)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
int i, ret = 0;
enum hw_fence_client_data_id data_id;
if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
if (!hw_fence_drv_data->vm_ready) {
HWFNC_DBG_H("VM not ready, cannot destroy fence\n");
return -EAGAIN;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
HWFNC_ERR("Populating non-NULL client_data_list with invalid client_id_ext:%d\n",
hw_fence_client->client_id_ext);
return -EINVAL;
}
if (hw_fence_client->client_id > hw_fence_drv_data->rxq_clients_num) {
HWFNC_ERR("Transmit-only client client_id:%d client_id_ext:%d register for wait\n",
hw_fence_client->client_id, hw_fence_client->client_id_ext);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Process all the list of fences */
for (i = 0; i < num_fences; i++) {
struct dma_fence *fence = fence_list[i];
u64 hash, client_data = 0;
if (client_data_list)
client_data = client_data_list[i];
/* Process a Fence-Array */
array = to_dma_fence_array(fence);
if (array) {
ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client,
array, &hash, client_data);
if (ret) {
HWFNC_ERR("Failed to process FenceArray\n");
return ret;
}
} else {
/* Process individual Fence */
ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence,
&hash, client_data);
if (ret) {
HWFNC_ERR("Failed to process Fence\n");
return ret;
}
}
if (handles)
handles[i] = hash;
}
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_wait_update_v2);
int msm_hw_fence_wait_update(void *client_handle,
struct dma_fence **fence_list, u32 num_fences, bool create)
{
return msm_hw_fence_wait_update_v2(client_handle, fence_list, NULL, NULL, num_fences,
create);
}
EXPORT_SYMBOL(msm_hw_fence_wait_update);
int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence *hw_fences_tbl;
int i;
if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client handle!\n");
return -EINVAL;
}
if (!hw_fence_drv_data->vm_ready) {
HWFNC_DBG_H("VM not ready, cannot reset client\n");
return -EAGAIN;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl;
HWFNC_DBG_L("reset fences and queues for client:%d\n", hw_fence_client->client_id);
for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++)
hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client,
&hw_fences_tbl[i], i, reset_flags);
hw_fence_utils_reset_queues(hw_fence_drv_data, hw_fence_client);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_reset_client);
int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags)
{
enum hw_fence_client_id client_id;
if (client_id_ext >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext);
return -EINVAL;
}
client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext);
return -EINVAL;
}
return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id],
reset_flags);
}
EXPORT_SYMBOL(msm_hw_fence_reset_client_by_id);
int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready ||
!hw_fence_drv_data->vm_ready) {
HWFNC_ERR("hw fence driver or vm not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle) ||
(handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) {
HWFNC_ERR("Invalid handle:%d or client handle:%d max:%d\n", handle,
IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
/* Write to Tx queue */
hw_fence_update_queue(hw_fence_drv_data, hw_fence_client,
hw_fence_drv_data->hw_fences_tbl[handle].ctx_id,
hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle,
flags, 0, error, HW_FENCE_TX_QUEUE - 1);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_update_txq);
int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready ||
!hw_fence_drv_data->vm_ready) {
HWFNC_ERR("hw fence driver or vm not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle) ||
(handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) {
HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%d max:%d or error:%d\n",
client_handle, handle, hw_fence_drv_data->hw_fences_tbl_cnt, error);
return -EINVAL;
} else if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) {
HWFNC_ERR("invalid flags:0x%x expected:0x%x no support of in-place error update\n",
update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
/* Write to Tx queue */
hw_fence_update_existing_txq_payload(hw_fence_drv_data, hw_fence_client,
handle, error);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_update_txq_error);
/* tx client has to be the physical, rx client virtual id*/
int msm_hw_fence_trigger_signal(void *client_handle,
u32 tx_client_pid, u32 rx_client_vid,
u32 signal_id)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready
|| !hw_fence_drv_data->vm_ready) {
HWFNC_ERR("hw fence driver or vm not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id);
hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_pid,
rx_client_vid, signal_id);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_trigger_signal);
int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle) || IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) {
HWFNC_ERR("Invalid params client:0x%pK cb_func:0x%pK data:0x%pK\n", client_handle,
cb, data);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->fence_error_cb) {
HWFNC_ERR("client_id:%d client_id_ext:%d already registered cb_func:%pK data:%pK\n",
hw_fence_client->client_id, hw_fence_client->client_id_ext,
hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata);
return -EINVAL;
}
hw_fence_client->fence_error_cb_userdata = data;
hw_fence_client->fence_error_cb = cb;
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_register_error_cb);
int msm_hw_fence_deregister_error_cb(void *client_handle)
{
struct msm_hw_fence_client *hw_fence_client;
int ret = 0;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client: 0x%pK\n", client_handle);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (!mutex_trylock(&hw_fence_client->error_cb_lock)) {
HWFNC_ERR("client_id:%d is modifying or using fence_error_cb:0x%pK data:0x%pK\n",
hw_fence_client->client_id, hw_fence_client->fence_error_cb,
hw_fence_client->fence_error_cb_userdata);
return -EAGAIN;
}
if (!hw_fence_client->fence_error_cb) {
HWFNC_ERR("client_id:%d client_id_ext:%d did not register cb:%pK data:%pK\n",
hw_fence_client->client_id, hw_fence_client->client_id_ext,
hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata);
ret = -EINVAL;
goto exit;
}
hw_fence_client->fence_error_cb = NULL;
hw_fence_client->fence_error_cb_userdata = NULL;
exit:
mutex_unlock(&hw_fence_client->error_cb_lock);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_deregister_error_cb);
#if IS_ENABLED(CONFIG_DEBUG_FS)
int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask)
{
struct msm_hw_fence_client *hw_fence_client;
int client_id;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle));
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) {
hw_fence_debug_dump_queues(HW_FENCE_PRINTK, hw_fence_client);
if (dump_clients_mask)
for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++)
if ((dump_clients_mask & (1 << client_id)) &&
hw_fence_drv_data->clients[client_id])
hw_fence_debug_dump_queues(HW_FENCE_PRINTK,
hw_fence_drv_data->clients[client_id]);
}
if (dump_flags & MSM_HW_FENCE_DBG_DUMP_TABLE)
hw_fence_debug_dump_table(HW_FENCE_PRINTK, hw_fence_drv_data);
if (dump_flags & MSM_HW_FENCE_DBG_DUMP_EVENTS)
hw_fence_debug_dump_events(HW_FENCE_PRINTK, hw_fence_drv_data);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_dump_debug_data);
int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence *hw_fence;
u64 hash;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle));
return -EINVAL;
} else if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%llx\n",
fence->context, fence->seqno, fence->flags);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, fence->context,
fence->seqno, &hash);
if (!hw_fence) {
HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n",
hw_fence_client->client_id, fence, fence->context, fence->seqno);
return -EINVAL;
}
hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_dump_fence);
#endif /* CONFIG_DEBUG_FS */
/* Function used for simulation purposes only. */
int msm_hw_fence_driver_doorbell_sim(u64 db_mask)
{
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -EAGAIN;
}
HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n",
db_mask, hw_fence_get_qtime(hw_fence_drv_data));
hw_fence_utils_process_doorbell_mask(hw_fence_drv_data, db_mask);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_driver_doorbell_sim);
static int msm_hw_fence_probe_init(struct platform_device *pdev)
{
int rc;
HWFNC_DBG_H("+\n");
hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL);
if (!hw_fence_drv_data)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, hw_fence_drv_data);
hw_fence_drv_data->dev = &pdev->dev;
if (hw_fence_driver_enable) {
/* Initialize HW Fence Driver resources */
rc = hw_fence_init(hw_fence_drv_data);
if (rc)
goto error;
mutex_init(&hw_fence_drv_data->clients_register_lock);
/* set ready value so clients can register */
hw_fence_drv_data->resources_ready = true;
} else {
/* Allocate hw fence driver mem pool and share it with HYP */
rc = hw_fence_utils_alloc_mem(hw_fence_drv_data);
if (rc) {
HWFNC_ERR("failed to alloc base memory\n");
goto error;
}
HWFNC_DBG_INFO("hw fence driver not enabled\n");
}
HWFNC_DBG_H("-\n");
return rc;
error:
dev_set_drvdata(&pdev->dev, NULL);
kfree(hw_fence_drv_data);
hw_fence_drv_data = (void *) -EPROBE_DEFER;
HWFNC_ERR("error %d\n", rc);
return rc;
}
static int msm_hw_fence_probe(struct platform_device *pdev)
{
int rc = -EINVAL;
HWFNC_DBG_H("+\n");
if (!pdev) {
HWFNC_ERR("null platform dev\n");
return -EINVAL;
}
if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence"))
rc = msm_hw_fence_probe_init(pdev);
if (rc)
goto err_exit;
HWFNC_DBG_H("-\n");
return 0;
err_exit:
HWFNC_ERR("error %d\n", rc);
return rc;
}
static int msm_hw_fence_remove(struct platform_device *pdev)
{
HWFNC_DBG_H("+\n");
if (!pdev) {
HWFNC_ERR("null platform dev\n");
return -EINVAL;
}
hw_fence_drv_data = dev_get_drvdata(&pdev->dev);
if (!hw_fence_drv_data) {
HWFNC_ERR("null driver data\n");
return -EINVAL;
}
dev_set_drvdata(&pdev->dev, NULL);
kfree(hw_fence_drv_data);
hw_fence_drv_data = (void *) -EPROBE_DEFER;
HWFNC_DBG_H("-\n");
return 0;
}
static const struct of_device_id msm_hw_fence_dt_match[] = {
{.compatible = "qcom,msm-hw-fence"},
{}
};
static struct platform_driver msm_hw_fence_driver = {
.probe = msm_hw_fence_probe,
.remove = msm_hw_fence_remove,
.driver = {
.name = "msm-hw-fence",
.of_match_table = of_match_ptr(msm_hw_fence_dt_match),
},
};
static int __init msm_hw_fence_init(void)
{
int rc = 0;
HWFNC_DBG_H("+\n");
rc = platform_driver_register(&msm_hw_fence_driver);
if (rc) {
HWFNC_ERR("%s: failed to register platform driver\n",
__func__);
return rc;
}
HWFNC_DBG_H("-\n");
return 0;
}
static void __exit msm_hw_fence_exit(void)
{
HWFNC_DBG_H("+\n");
platform_driver_unregister(&msm_hw_fence_driver);
HWFNC_DBG_H("-\n");
}
module_param_named(enable, hw_fence_driver_enable, bool, 0600);
MODULE_PARM_DESC(enable, "Enable hardware fences");
module_init(msm_hw_fence_init);
module_exit(msm_hw_fence_exit);
MODULE_DESCRIPTION("QTI HW Fence Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,335 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/soc/qcom/msm_hw_fence.h>
#include "msm_hw_fence_synx_translation.h"
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_debug.h"
/**
* MAX_SUPPORTED_DPU0:
* Maximum number of dpu clients supported
*/
#define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0)
static int to_synx_status(int hw_fence_status_code)
{
int synx_status_code;
switch (hw_fence_status_code) {
case 0:
synx_status_code = SYNX_SUCCESS;
break;
case -ENOMEM:
synx_status_code = -SYNX_NOMEM;
break;
case -EPERM:
synx_status_code = -SYNX_NOPERM;
break;
case -ETIMEDOUT:
synx_status_code = -SYNX_TIMEOUT;
break;
case -EALREADY:
synx_status_code = -SYNX_ALREADY;
break;
case -ENOENT:
synx_status_code = -SYNX_NOENT;
break;
case -EINVAL:
synx_status_code = -SYNX_INVALID;
break;
case -EBUSY:
synx_status_code = -SYNX_BUSY;
break;
default:
synx_status_code = hw_fence_status_code;
break;
}
return synx_status_code;
}
static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id)
{
enum hw_fence_client_id hw_fence_client_id;
switch ((int)synx_client_id) {
case SYNX_CLIENT_HW_FENCE_GFX_CTX0:
hw_fence_client_id = HW_FENCE_CLIENT_ID_CTX0;
break;
case SYNX_CLIENT_HW_FENCE_IPE_CTX0 ... SYNX_CLIENT_HW_FENCE_IPE_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPE_CTX0 +
HW_FENCE_CLIENT_ID_IPE;
break;
case SYNX_CLIENT_HW_FENCE_VID_CTX0 ... SYNX_CLIENT_HW_FENCE_VID_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_VID_CTX0 +
HW_FENCE_CLIENT_ID_VPU;
break;
case SYNX_CLIENT_HW_FENCE_DPU0_CTL0 ... SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + MAX_SUPPORTED_DPU0:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 +
HW_FENCE_CLIENT_ID_CTL0;
break;
case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE7_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 +
HW_FENCE_CLIENT_ID_IFE0;
break;
default:
HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id);
hw_fence_client_id = HW_FENCE_CLIENT_MAX;
break;
}
return hw_fence_client_id;
}
static bool is_hw_fence_client(enum synx_client_id synx_client_id)
{
return synx_client_id >= SYNX_HW_FENCE_CLIENT_START
&& synx_client_id < SYNX_HW_FENCE_CLIENT_END;
}
struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params)
{
struct synx_session *session = NULL;
enum hw_fence_client_id client_id;
void *client_handle;
if (!hw_fence_driver_enable)
return ERR_PTR(-SYNX_INVALID);
if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->ptr)) {
HWFNC_ERR("invalid params:0x%pK params->ptr:0x%pK\n", params,
IS_ERR_OR_NULL(params) ? NULL : params->ptr);
return ERR_PTR(-SYNX_INVALID);
}
client_id = _get_hw_fence_client_id(params->id);
if (!is_hw_fence_client(params->id) || client_id == HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Initializing session for invalid synx_id:%d\n", params->id);
return ERR_PTR(-SYNX_INVALID);
}
session = kzalloc(sizeof(struct synx_session), GFP_KERNEL);
if (!session)
return ERR_PTR(-SYNX_NOMEM);
client_handle = msm_hw_fence_register(client_id,
(struct msm_hw_fence_mem_addr *)params->ptr);
if (IS_ERR_OR_NULL(client_handle)) {
kfree(session);
HWFNC_ERR("failed to initialize synx_id:%d ret:%d\n", params->id,
PTR_ERR(client_handle));
return ERR_PTR(to_synx_status(PTR_ERR(client_handle)));
}
session->client = client_handle;
session->type = params->id;
HWFNC_DBG_INIT("initialized session synx_id:%d hw_fence_id:%d\n", params->id, client_id);
return session;
}
EXPORT_SYMBOL(synx_hwfence_initialize);
int synx_hwfence_uninitialize(struct synx_session *session)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type);
return -SYNX_INVALID;
}
ret = msm_hw_fence_deregister(session->client);
if (ret)
HWFNC_ERR("Failed to deregister synx_id:%d ret:%d\n", session->type, ret);
else
kfree(session);
return to_synx_status(ret);
}
EXPORT_SYMBOL(synx_hwfence_uninitialize);
int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params)
{
int ret = 0;
struct msm_hw_fence_create_params hwfence_params;
u64 handle;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
IS_ERR_OR_NULL(params)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type, params);
return -SYNX_INVALID;
}
if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) ||
!(params->flags & SYNX_CREATE_DMA_FENCE) ||
(params->flags & SYNX_CREATE_CSL_FENCE) ||
IS_ERR_OR_NULL(params->fence)) {
HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n",
session->type, params->h_synx, params->flags, params->fence);
return -SYNX_INVALID;
}
hwfence_params.fence = params->fence;
hwfence_params.handle = &handle;
ret = msm_hw_fence_create(session->client, &hwfence_params);
if (ret) {
HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type,
params->fence, params->flags, ret);
return to_synx_status(ret);
}
if (handle > U32_MAX) {
HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type,
handle);
msm_hw_fence_destroy_with_handle(session->client, handle);
return -SYNX_INVALID;
}
*params->h_synx = handle;
return SYNX_SUCCESS;
}
EXPORT_SYMBOL(synx_hwfence_create);
int synx_hwfence_release(struct synx_session *session, u32 h_synx)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type);
return -SYNX_INVALID;
}
ret = msm_hw_fence_destroy_with_handle(session->client, h_synx);
if (ret)
HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type,
h_synx, ret);
return to_synx_status(ret);
}
EXPORT_SYMBOL(synx_hwfence_release);
int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type);
return -SYNX_INVALID;
}
ret = msm_hw_fence_update_txq(session->client, h_synx, 0, (u32)status);
if (ret)
HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n",
session->type, h_synx, status, ret);
return to_synx_status(ret);
}
EXPORT_SYMBOL(synx_hwfence_signal);
int synx_hwfence_recover(enum synx_client_id id)
{
int ret;
if (!is_hw_fence_client(id)) {
HWFNC_ERR("invalid synx_id:%d\n", id);
return -SYNX_INVALID;
}
ret = msm_hw_fence_reset_client_by_id(_get_hw_fence_client_id(id),
MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
if (ret)
HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret);
return to_synx_status(ret);
}
EXPORT_SYMBOL(synx_hwfence_recover);
static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params)
{
u64 handle;
int ret;
if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
IS_ERR_OR_NULL(params->new_h_synx) ||
!(params->flags & SYNX_IMPORT_DMA_FENCE) ||
(params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) {
HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n",
client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx,
IS_ERR_OR_NULL(params) ? 0 : params->flags,
IS_ERR_OR_NULL(params) ? NULL : params->fence);
return -SYNX_INVALID;
}
ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)&params->fence, &handle,
NULL, 1, true);
if (ret) {
HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence,
params->flags, ret);
return to_synx_status(ret);
}
if (handle > U32_MAX) {
HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle);
msm_hw_fence_wait_update_v2(client, (struct dma_fence **)&params->fence, &handle,
NULL, 1, false);
return -SYNX_INVALID;
}
*params->new_h_synx = handle;
return SYNX_SUCCESS;
}
static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params)
{
int i, ret;
if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || !params->num_fences) {
HWFNC_ERR("invalid import arr client:0x%pK params:0x%pK num_fences:%u\n", client,
params, IS_ERR_OR_NULL(params) ? -1 : params->num_fences);
return -SYNX_INVALID;
}
for (i = 0; i < params->num_fences; i++) {
ret = synx_hwfence_import_indv(client, &params->list[i]);
if (ret) {
HWFNC_ERR("importing fence[%u] 0x%pK failed ret:%d\n", i,
params->list[i].fence, ret);
return ret;
}
}
return SYNX_SUCCESS;
}
int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)
|| IS_ERR_OR_NULL(params)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type, params);
return -SYNX_INVALID;
}
if (params->type == SYNX_IMPORT_ARR_PARAMS)
ret = synx_hwfence_import_arr(session->client, &params->arr);
else
ret = synx_hwfence_import_indv(session->client, &params->indv);
if (ret)
HWFNC_ERR("synx_id:%d failed to import type:%s fences ret:%d\n", session->type,
(params->type == SYNX_IMPORT_ARR_PARAMS) ? "arr" : "indv", ret);
return ret;
}
EXPORT_SYMBOL(synx_hwfence_import);

View File

@ -0,0 +1,26 @@
#SPDX-License-Identifier: GPL-2.0-only
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
ifneq ($(TARGET_BOARD_AUTO),true)
ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
ifneq ($(TARGET_BOARD_PLATFORM), taro)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
endif
endif
endif
endif

View File

@ -0,0 +1,17 @@
PRODUCT_PACKAGES += msm_ext_display.ko
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
ifneq ($(TARGET_BOARD_PLATFORM), taro)
PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko
endif
endif
DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko

View File

@ -0,0 +1,95 @@
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import filecmp
import os
import re
import subprocess
import sys
def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
if not h.startswith(prefix):
print('error: expected prefix [%s] on header [%s]' % (prefix, h))
return False
out_h = os.path.join(gen_dir, h[len(prefix):])
(out_h_dirname, out_h_basename) = os.path.split(out_h)
env = os.environ.copy()
env["LOC_UNIFDEF"] = unifdef
cmd = ["sh", headers_install, h, out_h]
if True:
print('run_headers_install: cmd is %s' % cmd)
result = subprocess.call(cmd, env=env)
if result != 0:
print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
return False
return True
def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi):
error_count = 0
for h in mm_drivers_include_uapi:
mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0],
'sync_fence', 'include', 'uapi') + os.sep
if not run_headers_install(
verbose, gen_dir, headers_install, unifdef,
mm_drivers_uapi_include_prefix, h): error_count += 1
return error_count
def main():
"""Parse command line arguments and perform top level control."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments that apply to every invocation of this script.
parser.add_argument(
'--verbose', action='store_true',
help='Print output that describes the workings of this script.')
parser.add_argument(
'--header_arch', required=True,
help='The arch for which to generate headers.')
parser.add_argument(
'--gen_dir', required=True,
help='Where to place the generated files.')
parser.add_argument(
'--mm_drivers_include_uapi', required=True, nargs='*',
help='The list of techpack/*/include/uapi header files.')
parser.add_argument(
'--headers_install', required=True,
help='The headers_install tool to process input headers.')
parser.add_argument(
'--unifdef',
required=True,
help='The unifdef tool used by headers_install.')
args = parser.parse_args()
if args.verbose:
print('header_arch [%s]' % args.header_arch)
print('gen_dir [%s]' % args.gen_dir)
print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi)
print('headers_install [%s]' % args.headers_install)
print('unifdef [%s]' % args.unifdef)
return gen_mm_drivers_headers(args.verbose, args.gen_dir,
args.headers_install, args.unifdef, args.mm_drivers_include_uapi)
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,43 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR)
KBUILD_OPTIONS += MODNAME=msm_ext_display
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm-ext-disp-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm_ext_display.ko
LOCAL_MODULE_KBUILD_NAME := msm_ext_display.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@ -0,0 +1,10 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_msm_ext_display.bzl", "define_msm_ext_display")
package(
default_visibility = [
"//visibility:public"
],
)
define_msm_ext_display()

View File

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h
obj-m += msm_ext_display.o
msm_ext_display-y := src/msm_ext_display.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@ -0,0 +1,4 @@
config MSM_EXT_DISPLAY
bool "Enable msm_ext_display"
help
Enable msm_ext_display driver

View File

@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@ -0,0 +1 @@
CONFIG_MSM_EXT_DISPLAY=y

View File

@ -0,0 +1,31 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
ddk_module(
name = "{}_msm_ext_display".format(tv),
srcs = ["src/msm_ext_display.c"],
out = "msm_ext_display.ko",
defconfig = "defconfig",
kconfig = "Kconfig",
deps = ["//msm-kernel:all_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers"],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_msm_ext_display_dist".format(tv),
data = [":{}_msm_ext_display".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_msm_ext_display():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@ -0,0 +1,702 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/iopoll.h>
#include <linux/types.h>
#include <linux/of_platform.h>
#include <linux/extcon-provider.h>
#include <linux/soc/qcom/msm_ext_display.h>
#include <linux/extcon-provider.h>
struct msm_ext_disp_list {
struct msm_ext_disp_init_data *data;
struct list_head list;
};
struct msm_ext_disp {
struct msm_ext_disp_data ext_disp_data;
struct platform_device *pdev;
struct msm_ext_disp_codec_id current_codec;
struct msm_ext_disp_audio_codec_ops *ops;
struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS];
bool audio_session_on;
struct list_head display_list;
struct mutex lock;
bool update_audio;
};
static const unsigned int msm_ext_disp_supported_cable[] = {
EXTCON_DISP_DP,
EXTCON_DISP_HDMI,
EXTCON_NONE,
};
static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id)
{
int ret = 0;
if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("invalid params\n");
return -EINVAL;
}
ext_disp->audio_sdev[id] = devm_extcon_dev_allocate(
&ext_disp->pdev->dev,
msm_ext_disp_supported_cable);
if (IS_ERR(ext_disp->audio_sdev[id]))
return PTR_ERR(ext_disp->audio_sdev[id]);
ret = devm_extcon_dev_register(&ext_disp->pdev->dev,
ext_disp->audio_sdev[id]);
if (ret) {
pr_err("audio registration failed\n");
return ret;
}
pr_debug("extcon registration done\n");
return ret;
}
static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp,
int id)
{
if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("Invalid params\n");
return;
}
devm_extcon_dev_unregister(&ext_disp->pdev->dev,
ext_disp->audio_sdev[id]);
}
static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
{
switch (type) {
case EXT_DISPLAY_TYPE_HDMI:
return "EXT_DISPLAY_TYPE_HDMI";
case EXT_DISPLAY_TYPE_DP:
return "EXT_DISPLAY_TYPE_DP";
default: return "???";
}
}
static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_init_data *data)
{
struct msm_ext_disp_list *node;
if (!ext_disp || !data) {
pr_err("Invalid params\n");
return -EINVAL;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->data = data;
list_add(&node->list, &ext_disp->display_list);
pr_debug("Added new display (%s) ctld (%d) stream (%d)\n",
msm_ext_disp_name(data->codec.type),
data->codec.ctrl_id, data->codec.stream_id);
return 0;
}
static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_init_data *data)
{
struct msm_ext_disp_list *node;
struct list_head *pos = NULL;
if (!ext_disp || !data) {
pr_err("Invalid params\n");
return -EINVAL;
}
list_for_each(pos, &ext_disp->display_list) {
node = list_entry(pos, struct msm_ext_disp_list, list);
if (node->data == data) {
list_del(pos);
pr_debug("Deleted the intf data\n");
kfree(node);
return 0;
}
}
pr_debug("Intf data not present for delete op\n");
return 0;
}
static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec,
struct msm_ext_disp_init_data **data)
{
int ret = 0;
struct msm_ext_disp_list *node;
struct list_head *position = NULL;
if (!ext_disp || !data || !codec) {
pr_err("Invalid params\n");
ret = -EINVAL;
goto end;
}
*data = NULL;
list_for_each(position, &ext_disp->display_list) {
node = list_entry(position, struct msm_ext_disp_list, list);
if (node->data->codec.type == codec->type &&
node->data->codec.stream_id == codec->stream_id &&
node->data->codec.ctrl_id == codec->ctrl_id) {
*data = node->data;
break;
}
}
if (!*data)
ret = -ENODEV;
end:
return ret;
}
static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state new_state)
{
int ret = 0;
int state;
struct extcon_dev *audio_sdev;
if (!ext_disp->ops) {
pr_err("codec not registered, skip notification\n");
ret = -EPERM;
goto end;
}
audio_sdev = ext_disp->audio_sdev[codec->stream_id];
state = extcon_get_state(audio_sdev, codec->type);
if (state == !!new_state) {
ret = -EEXIST;
pr_debug("same state\n");
goto end;
}
ret = extcon_set_state_sync(audio_sdev,
codec->type, !!new_state);
if (ret)
pr_err("Failed to set state. Error = %d\n", ret);
else
pr_debug("state changed to %d\n", new_state);
end:
return ret;
}
static struct msm_ext_disp *msm_ext_disp_validate_and_get(
struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
struct msm_ext_disp_data *ext_disp_data;
struct msm_ext_disp *ext_disp;
if (!pdev) {
pr_err("invalid platform device\n");
goto err;
}
if (!codec ||
codec->type >= EXT_DISPLAY_TYPE_MAX ||
codec->ctrl_id != 0 ||
codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("invalid display codec id\n");
goto err;
}
if (state < EXT_DISPLAY_CABLE_DISCONNECT ||
state >= EXT_DISPLAY_CABLE_STATE_MAX) {
pr_err("invalid HPD state (%d)\n", state);
goto err;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("invalid drvdata\n");
goto err;
}
ext_disp = container_of(ext_disp_data,
struct msm_ext_disp, ext_disp_data);
return ext_disp;
err:
return ERR_PTR(-EINVAL);
}
static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec)
{
int ret = 0;
struct msm_ext_disp_init_data *data = NULL;
ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data);
if (ret || !data) {
pr_err("Display not found (%s) ctld (%d) stream (%d)\n",
msm_ext_disp_name(codec->type),
codec->ctrl_id, codec->stream_id);
goto end;
}
if (ext_disp->ops) {
*ext_disp->ops = data->codec_ops;
ext_disp->current_codec = *codec;
/* update pdev for interface to use */
ext_disp->ext_disp_data.intf_pdev = data->pdev;
ext_disp->ext_disp_data.intf_data = data->intf_data;
}
end:
return ret;
}
static int msm_ext_disp_audio_config(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
int ret = 0;
struct msm_ext_disp *ext_disp;
ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
if (IS_ERR(ext_disp)) {
ret = PTR_ERR(ext_disp);
goto end;
}
if (state == EXT_DISPLAY_CABLE_CONNECT) {
ret = msm_ext_disp_select_audio_codec(pdev, codec);
} else {
mutex_lock(&ext_disp->lock);
if (ext_disp->ops)
memset(ext_disp->ops, 0, sizeof(*ext_disp->ops));
pr_debug("codec ops cleared for %s\n",
msm_ext_disp_name(ext_disp->current_codec.type));
ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
mutex_unlock(&ext_disp->lock);
}
end:
return ret;
}
static int msm_ext_disp_audio_notify(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
int ret = 0;
struct msm_ext_disp *ext_disp;
ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
if (IS_ERR(ext_disp)) {
ret = PTR_ERR(ext_disp);
goto end;
}
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_process_audio(ext_disp, codec, state);
mutex_unlock(&ext_disp->lock);
end:
return ret;
}
static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp)
{
int ret;
struct msm_ext_disp_init_data *data = NULL;
if (!ext_disp) {
pr_err("invalid input\n");
return;
}
ret = msm_ext_disp_get_intf_data(ext_disp,
&ext_disp->current_codec, &data);
if (ret) {
pr_err("%s not found\n",
msm_ext_disp_name(ext_disp->current_codec.type));
return;
}
*ext_disp->ops = data->codec_ops;
data->codec_ops.ready(ext_disp->pdev);
}
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
return msm_ext_disp_register_audio_codec(pdev, ops);
}
/**
* Register audio codec ops to display driver
* for HDMI/Display Port usecase support.
*
* @return 0 on success, negative value on error
*
*/
int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !ops) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
if (ext_disp->ops) {
pr_err("Codec already registered\n");
ret = -EINVAL;
goto end;
}
ext_disp->ops = ops;
pr_debug("audio codec registered\n");
if (ext_disp->update_audio) {
ext_disp->update_audio = false;
msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec);
msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec,
EXT_DISPLAY_CABLE_CONNECT);
}
end:
mutex_unlock(&ext_disp->lock);
if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX)
msm_ext_disp_ready_for_display(ext_disp);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_register_audio_codec);
int msm_ext_disp_select_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !codec) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
if (!ext_disp->ops) {
pr_warn("Codec is not registered\n");
ext_disp->update_audio = true;
ext_disp->current_codec = *codec;
ret = -EINVAL;
goto end;
}
ret = msm_ext_disp_update_audio_ops(ext_disp, codec);
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_select_audio_codec);
static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
{
struct msm_ext_disp_audio_codec_ops *ops;
if (!init_data) {
pr_err("Invalid init_data\n");
return -EINVAL;
}
if (!init_data->pdev) {
pr_err("Invalid display intf pdev\n");
return -EINVAL;
}
if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX ||
init_data->codec.ctrl_id != 0 ||
init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n",
init_data->codec.type,
init_data->codec.ctrl_id,
init_data->codec.stream_id);
return -EINVAL;
}
ops = &init_data->codec_ops;
if (!ops->audio_info_setup || !ops->get_audio_edid_blk ||
!ops->cable_status || !ops->get_intf_id ||
!ops->teardown_done || !ops->acknowledge ||
!ops->ready) {
pr_err("Invalid codec operation pointers\n");
return -EINVAL;
}
return 0;
}
int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
int ret = 0;
struct msm_ext_disp_init_data *data = NULL;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !init_data) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_validate_intf(init_data);
if (ret)
goto end;
ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data);
if (!ret) {
pr_err("%s already registered. ctrl(%d) stream(%d)\n",
msm_ext_disp_name(init_data->codec.type),
init_data->codec.ctrl_id,
init_data->codec.stream_id);
goto end;
}
ret = msm_ext_disp_add_intf_data(ext_disp, init_data);
if (ret)
goto end;
init_data->intf_ops.audio_config = msm_ext_disp_audio_config;
init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify;
pr_debug("%s registered. ctrl(%d) stream(%d)\n",
msm_ext_disp_name(init_data->codec.type),
init_data->codec.ctrl_id,
init_data->codec.stream_id);
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_register_intf);
int msm_ext_disp_deregister_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !init_data) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_remove_intf_data(ext_disp, init_data);
if (ret)
goto end;
init_data->intf_ops.audio_config = NULL;
init_data->intf_ops.audio_notify = NULL;
pr_debug("%s deregistered\n",
msm_ext_disp_name(init_data->codec.type));
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_deregister_intf);
static int msm_ext_disp_probe(struct platform_device *pdev)
{
int ret = 0, id;
struct device_node *of_node = NULL;
struct msm_ext_disp *ext_disp = NULL;
if (!pdev) {
pr_err("No platform device found\n");
ret = -ENODEV;
goto end;
}
of_node = pdev->dev.of_node;
if (!of_node) {
pr_err("No device node found\n");
ret = -ENODEV;
goto end;
}
ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL);
if (!ext_disp) {
ret = -ENOMEM;
goto end;
}
platform_set_drvdata(pdev, &ext_disp->ext_disp_data);
ext_disp->pdev = pdev;
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) {
ret = msm_ext_disp_extcon_register(ext_disp, id);
if (ret)
goto child_node_failure;
}
ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
if (ret) {
pr_err("Failed to add child devices. Error = %d\n", ret);
goto child_node_failure;
} else {
pr_debug("%s: Added child devices.\n", __func__);
}
mutex_init(&ext_disp->lock);
INIT_LIST_HEAD(&ext_disp->display_list);
ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
ext_disp->update_audio = false;
return ret;
child_node_failure:
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
msm_ext_disp_extcon_unregister(ext_disp, id);
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
return ret;
}
static int msm_ext_disp_remove(struct platform_device *pdev)
{
int ret = 0, id;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev) {
pr_err("No platform device\n");
ret = -ENODEV;
goto end;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("No drvdata found\n");
ret = -ENODEV;
goto end;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
msm_ext_disp_extcon_unregister(ext_disp, id);
mutex_destroy(&ext_disp->lock);
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
return ret;
}
static const struct of_device_id msm_ext_dt_match[] = {
{.compatible = "qcom,msm-ext-disp",},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, msm_ext_dt_match);
static struct platform_driver this_driver = {
.probe = msm_ext_disp_probe,
.remove = msm_ext_disp_remove,
.driver = {
.name = "msm-ext-disp",
.of_match_table = msm_ext_dt_match,
},
};
static int __init msm_ext_disp_init(void)
{
int ret = 0;
ret = platform_driver_register(&this_driver);
if (ret)
pr_err("failed, ret = %d\n", ret);
return ret;
}
subsys_initcall(msm_ext_disp_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM External Display");

View File

@ -0,0 +1,42 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=sync_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync_fence.ko
LOCAL_MODULE_KBUILD_NAME := sync_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@ -0,0 +1,16 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_sync_fence.bzl", "define_sync_fence")
package(
default_visibility = [
"//visibility:public"
],
)
ddk_headers(
name = "sync_fence_uapi_headers",
hdrs = glob(["include/uapi/sync_fence/*.h"]),
includes = ["include"]
)
define_sync_fence()

View File

@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/
include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h
ifdef CONFIG_QCOM_SPEC_SYNC
obj-m += sync_fence.o
sync_fence-y := src/qcom_sync_file.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@ -0,0 +1,4 @@
config QCOM_SPEC_SYNC
bool "Enable spec fence"
help
Enable sync_fence driver

View File

@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@ -0,0 +1 @@
CONFIG_QCOM_SPEC_SYNC=y

View File

@ -0,0 +1,33 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
ddk_module(
name = "{}_sync_fence".format(tv),
srcs = ["src/qcom_sync_file.c"],
out = "sync_fence.ko",
kconfig = "Kconfig",
defconfig = "defconfig",
deps = [
"//msm-kernel:all_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_sync_fence_dist".format(tv),
data = [":{}_sync_fence".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_sync_fence():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
header-y += sync_fence/

View File

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_LINUX_SPEC_SYNC_H
#define _UAPI_LINUX_SPEC_SYNC_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define SPEC_FENCE_SIGNAL_ANY 0x1
#define SPEC_FENCE_SIGNAL_ALL 0x2
/**
* struct fence_bind_data - data passed to bind ioctl
* @out_bind_fd: file descriptor of second fence
* @fds: file descriptor list of child fences
*/
struct fence_bind_data {
__u32 out_bind_fd;
__u64 fds;
};
/**
* struct fence_create_data - detailed fence information
* @num_fences: Total fences that array needs to carry.
* @flags: Flags specifying on how to signal the array
* @out_bind_fd: Returns the fence fd.
*/
struct fence_create_data {
__u32 num_fences;
__u32 flags;
__u32 out_bind_fd;
};
#define SPEC_SYNC_MAGIC '>'
/**
* DOC: SPEC_SYNC_IOC_BIND - bind two fences
*
* Takes a struct fence_bind_data. binds the child fds with the fence array
* pointed by fd1.
*/
#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data)
/**
* DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array
*
* Takes a struct fence_create_data. If num_fences is > 0, fence array will be
* created and returns the array fd in fence_create_data.fd1
*/
#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data)
/**
* DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version
*
* Returns Spec driver version.
*/
#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64)
#endif /* _UAPI_LINUX_SPEC_SYNC_H */

View File

@ -0,0 +1,584 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/sync_file.h>
#include <uapi/sync_fence/qcom_sync_file.h>
#include <linux/soc/qcom/qcom_sync_file.h>
#define CLASS_NAME "sync"
#define DRV_NAME "spec_sync"
#define DRV_VERSION 1
#define NAME_LEN 32
#define FENCE_MIN 1
#define FENCE_MAX 32
#if IS_ENABLED(CONFIG_DEBUG_FS)
#define MAX_DEVICE_SUPPORTED 2
#else
#define MAX_DEVICE_SUPPORTED 1
#endif
#define DUMMY_CONTEXT 0xfafadadafafadada
#define DUMMY_SEQNO 0xefa9ce00efa9ce00
struct dummy_spec_fence {
struct dma_fence fence;
spinlock_t lock;
};
struct sync_device {
/* device info */
struct class *dev_class;
dev_t dev_num;
struct device *dev;
struct cdev *cdev;
struct mutex lock;
struct dummy_spec_fence *dummy_fence;
/* device drv data */
atomic_t device_available;
char name[NAME_LEN];
uint32_t version;
struct mutex l_lock;
struct list_head fence_array_list;
wait_queue_head_t wait_queue;
};
struct fence_array_node {
struct dma_fence_array *fence_array;
struct list_head list;
};
/* Speculative Sync Device Driver State */
static struct sync_device sync_dev;
static const char *spec_fence_get_name_dummy(struct dma_fence *fence)
{
return "dummy_fence";
}
static const struct dma_fence_ops dummy_spec_fence_ops = {
.get_driver_name = spec_fence_get_name_dummy,
.get_timeline_name = spec_fence_get_name_dummy,
};
static bool sanitize_fence_array(struct dma_fence_array *fence)
{
struct fence_array_node *node;
int ret = false;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry(node, &sync_dev.fence_array_list, list) {
if (node->fence_array == fence) {
ret = true;
break;
}
}
mutex_unlock(&sync_dev.l_lock);
return ret;
}
static void clear_fence_array_tracker(bool force_clear)
{
struct fence_array_node *node, *temp;
struct dma_fence_array *array;
struct dma_fence *fence;
bool is_signaled;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) {
array = node->fence_array;
fence = &array->base;
is_signaled = dma_fence_is_signaled(fence);
if (force_clear && !array->fences)
array->num_fences = 0;
pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled,
atomic_read(&array->num_pending));
if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending))
dma_fence_signal(fence);
if (force_clear || is_signaled) {
dma_fence_put(fence);
list_del(&node->list);
kfree(node);
}
}
mutex_unlock(&sync_dev.l_lock);
}
static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name)
{
if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) {
pr_err("number of device fds are limited to %d, device opened:%d\n",
MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available));
return NULL;
} else if (!atomic_read(&obj->device_available)) {
memset(obj->name, 0, NAME_LEN);
strscpy(obj->name, name, sizeof(obj->name));
}
atomic_inc(&obj->device_available);
return obj;
}
static int spec_sync_open(struct inode *inode, struct file *file)
{
char task_comm[TASK_COMM_LEN];
struct sync_device *obj = &sync_dev;
int ret = 0;
if (!inode || !inode->i_cdev || !file) {
pr_err("NULL pointer passed\n");
return -EINVAL;
}
mutex_lock(&sync_dev.lock);
get_task_comm(task_comm, current);
obj = spec_fence_init_locked(obj, task_comm);
if (!obj) {
pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm);
ret = -EEXIST;
goto end;
}
file->private_data = obj;
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_release(struct inode *inode, struct file *file)
{
int ret = 0;
struct sync_device *obj = file->private_data;
mutex_lock(&sync_dev.lock);
if (!atomic_read(&obj->device_available)) {
pr_err("no device to release!!\n");
ret = -ENODEV;
goto end;
}
atomic_dec(&obj->device_available);
if (!atomic_read(&obj->device_available))
clear_fence_array_tracker(true);
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg)
{
uint32_t version = obj->version;
if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t)))
return -EFAULT;
return 0;
}
static int spec_sync_create_array(struct fence_create_data *f)
{
int fd = get_unused_fd_flags(O_CLOEXEC);
struct sync_file *sync_file;
struct dma_fence_array *fence_array;
struct fence_array_node *node;
struct dma_fence **fences;
struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence;
bool signal_any;
int i, ret = 0;
if (fd < 0) {
pr_err("failed to get_unused_fd_flags\n");
return fd;
}
if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) {
pr_err("invalid arguments num_fences:%d\n", f->num_fences);
ret = -ERANGE;
goto error_args;
}
fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO);
if (!fences) {
ret = -ENOMEM;
goto error_args;
}
for (i = 0; i < f->num_fences; i++) {
fences[i] = &dummy_fence_p->fence;
/*
* Increase dummy-fences refcount here, we must do this since any call to
* fence-array release while dummy-fences are the children of the fence-array
* will decrement the dummy_fence refcount. Therefore, to prevent the release
* of the dummy_fence fences, we must keep an extra refcount for every time that
* the fence-array->release can decrement its children's refcount. the extra
* refcount will be decreased impilictly when dma_fence_put(&fence_array->base)
* called.
*/
dma_fence_get(&dummy_fence_p->fence);
}
signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true;
fence_array = dma_fence_array_create(f->num_fences, fences,
dma_fence_context_alloc(1), 0, signal_any);
if (!fence_array) {
/* fence-array create failed, remove extra refcounts */
for (i = 0; i < f->num_fences; i++)
dma_fence_put(&dummy_fence_p->fence);
kfree(fences);
ret = -EINVAL;
goto error_args;
}
/* Set the enable signal such that signalling is not done during wait*/
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags);
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags);
sync_file = sync_file_create(&fence_array->base);
if (!sync_file) {
pr_err("sync_file_create fail\n");
ret = -EINVAL;
goto err;
}
node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL);
if (!node) {
fput(sync_file->file);
ret = -ENOMEM;
goto err;
}
fd_install(fd, sync_file->file);
node->fence_array = fence_array;
mutex_lock(&sync_dev.l_lock);
list_add_tail(&node->list, &sync_dev.fence_array_list);
mutex_unlock(&sync_dev.l_lock);
pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences);
return fd;
err:
dma_fence_put(&fence_array->base);
error_args:
put_unused_fd(fd);
return ret;
}
static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg)
{
struct fence_create_data f;
int fd;
if (copy_from_user(&f, (void __user *)arg, sizeof(f)))
return -EFAULT;
fd = spec_sync_create_array(&f);
if (fd < 0)
return fd;
f.out_bind_fd = fd;
if (copy_to_user((void __user *)arg, &f, sizeof(f)))
return -EFAULT;
return 0;
}
int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
{
int ret;
/* Check if fence-array is a speculative fence */
if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) {
pr_err("invalid fence!\n");
return -EINVAL;
} else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) {
/* This fence-array is already bound, just return success */
return 0;
}
/* Wait for the fence-array bind */
ret = wait_event_timeout(sync_dev.wait_queue,
test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags),
msecs_to_jiffies(timeout_ms));
if (!ret) {
pr_err("timed out waiting for bind fence-array %d\n", timeout_ms);
ret = -ETIMEDOUT;
} else {
ret = 0;
}
return ret;
}
EXPORT_SYMBOL(spec_sync_wait_bind_array);
static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info)
{
struct dma_fence_array *fence_array;
struct dma_fence *fence = NULL;
struct dma_fence *user_fence = NULL;
int *user_fds, ret = 0, i;
u32 num_fences;
fence = sync_file_get_fence(sync_bind_info->out_bind_fd);
if (!fence) {
pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd);
return -EINVAL;
}
if (dma_fence_is_signaled(fence)) {
pr_err("spec fence is already signaled, out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
fence_array = container_of(fence, struct dma_fence_array, base);
if (!sanitize_fence_array(fence_array)) {
pr_err("spec fence not found in the registered list out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
num_fences = fence_array->num_fences;
for (i = 0; i < num_fences; i++) {
if (!(fence_array->fences[i]->context == DUMMY_CONTEXT &&
fence_array->fences[i]->seqno == DUMMY_SEQNO)) {
pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n",
sync_bind_info->out_bind_fd, dma_fence_get_status(fence),
fence->flags);
ret = -EINVAL;
goto end;
}
}
user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL);
if (!user_fds) {
ret = -ENOMEM;
goto end;
}
if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds,
num_fences * sizeof(int))) {
ret = -EFAULT;
goto out;
}
spin_lock(fence->lock);
for (i = 0; i < num_fences; i++) {
user_fence = sync_file_get_fence(user_fds[i]);
if (!user_fence) {
pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n",
user_fds[i], sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto bind_invalid;
}
fence_array->fences[i] = user_fence;
/*
* At this point the fence-array fully contains valid fences and no more the
* dummy-fence, therefore, we must release the extra refcount that the
* creation of the speculative fence added to the dummy-fence.
*/
dma_fence_put(&sync_dev.dummy_fence->fence);
pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd,
i, user_fds[i], fence_array->fences[i]->error);
}
clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
spin_unlock(fence->lock);
dma_fence_enable_sw_signaling(&fence_array->base);
clear_fence_array_tracker(false);
bind_invalid:
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags);
wake_up_all(&sync_dev.wait_queue);
if (ret) {
dma_fence_set_error(fence, -EINVAL);
spin_unlock(fence->lock);
dma_fence_signal(fence);
clear_fence_array_tracker(false);
}
out:
kfree(user_fds);
end:
dma_fence_put(fence);
return ret;
}
static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg)
{
struct fence_bind_data sync_bind_info;
if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data)))
return -EFAULT;
if (sync_bind_info.out_bind_fd < 0) {
pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd);
return -EINVAL;
}
return spec_sync_bind_array(&sync_bind_info);
}
static long spec_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct sync_device *obj = file->private_data;
int ret = 0;
switch (cmd) {
case SPEC_SYNC_IOC_CREATE_FENCE:
ret = spec_sync_ioctl_create_fence(obj, arg);
break;
case SPEC_SYNC_IOC_BIND:
ret = spec_sync_ioctl_bind(obj, arg);
break;
case SPEC_SYNC_IOC_GET_VER:
ret = spec_sync_ioctl_get_ver(obj, arg);
break;
default:
ret = -ENOTTY;
}
return ret;
}
const struct file_operations spec_sync_fops = {
.owner = THIS_MODULE,
.open = spec_sync_open,
.release = spec_sync_release,
.unlocked_ioctl = spec_sync_ioctl,
};
static int spec_sync_register_device(void)
{
struct dummy_spec_fence *dummy_fence_p = NULL;
int ret;
sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME);
if (sync_dev.dev_class == NULL) {
pr_err("%s: class_create fail.\n", __func__);
goto res_err;
}
ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME);
if (ret) {
pr_err("%s: alloc_chrdev_region fail.\n", __func__);
goto alloc_chrdev_region_err;
}
sync_dev.dev = device_create(sync_dev.dev_class, NULL,
sync_dev.dev_num,
&sync_dev, DRV_NAME);
if (IS_ERR(sync_dev.dev)) {
pr_err("%s: device_create fail.\n", __func__);
goto device_create_err;
}
sync_dev.cdev = cdev_alloc();
if (sync_dev.cdev == NULL) {
pr_err("%s: cdev_alloc fail.\n", __func__);
goto cdev_alloc_err;
}
cdev_init(sync_dev.cdev, &spec_sync_fops);
sync_dev.cdev->owner = THIS_MODULE;
ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1);
if (ret) {
pr_err("%s: cdev_add fail.\n", __func__);
goto cdev_add_err;
}
sync_dev.version = DRV_VERSION;
mutex_init(&sync_dev.lock);
mutex_init(&sync_dev.l_lock);
INIT_LIST_HEAD(&sync_dev.fence_array_list);
init_waitqueue_head(&sync_dev.wait_queue);
dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL);
if (!dummy_fence_p) {
ret = -ENOMEM;
goto cdev_add_err;
}
spin_lock_init(&dummy_fence_p->lock);
dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock,
DUMMY_CONTEXT, DUMMY_SEQNO);
sync_dev.dummy_fence = dummy_fence_p;
return 0;
cdev_add_err:
cdev_del(sync_dev.cdev);
cdev_alloc_err:
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
device_create_err:
unregister_chrdev_region(sync_dev.dev_num, 1);
alloc_chrdev_region_err:
class_destroy(sync_dev.dev_class);
res_err:
return -ENODEV;
}
static int __init spec_sync_init(void)
{
int ret = 0;
ret = spec_sync_register_device();
if (ret) {
pr_err("%s: speculative sync driver register fail.\n", __func__);
return ret;
}
return ret;
}
static void __exit spec_sync_deinit(void)
{
cdev_del(sync_dev.cdev);
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
unregister_chrdev_region(sync_dev.dev_num, 1);
class_destroy(sync_dev.dev_class);
dma_fence_put(&sync_dev.dummy_fence->fence);
}
module_init(spec_sync_init);
module_exit(spec_sync_deinit);
MODULE_DESCRIPTION("QCOM Speculative Sync Driver");
MODULE_LICENSE("GPL v2");