Add 'qcom/opensource/synx-kernel/' from commit '2657c18a7869feec83f4383bf72623b8d6a2ef18'

git-subtree-dir: qcom/opensource/synx-kernel
git-subtree-mainline: fe7b3b613f
git-subtree-split: 2657c18a78
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/synx-kernel
tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
This commit is contained in:
David Wronek 2024-10-06 16:45:32 +02:00
commit 51ff30338b
33 changed files with 12646 additions and 0 deletions

View File

@ -0,0 +1,5 @@
cc_library_headers {
name: "qti_synx_kernel_headers",
export_include_dirs: ["include/uapi/synx/media"],
vendor_available: true
}

View File

@ -0,0 +1,82 @@
TARGET_SYNX_ENABLE := false
ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
TARGET_SYNX_ENABLE := true
endif
else
TARGET_SYNX_ENABLE := true
endif
ifneq (,$(call is-board-platform-in-list2,volcano))
TARGET_SYNX_ENABLE := false
endif
ifneq (,$(call is-board-platform-in-list2,pitti))
TARGET_SYNX_ENABLE := false
endif
ifeq ($(TARGET_SYNX_ENABLE),true)
SYNX_BLD_DIR := $(TOP)/vendor/qcom/opensource/synx-kernel
# Build synx-driver.ko
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SYNX_ROOT=$(SYNX_BLD_DIR)
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
LOCAL_MODULE_KO_DIRS := msm/synx/synx-driver.ko msm/synx/ipclite.ko msm/synx/test/ipclite_test.ko
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := synx-driver-symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
#LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
LOCAL_MODULE := synx-driver.ko
LOCAL_MODULE_KBUILD_NAME := msm/synx/synx-driver.ko
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
LOCAL_MODULE := ipclite.ko
LOCAL_MODULE_KBUILD_NAME := msm/synx/ipclite.ko
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
#BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
LOCAL_MODULE := ipclite_test.ko
LOCAL_MODULE_KBUILD_NAME := msm/synx/test/ipclite_test.ko
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
#BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
# print out variables
$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS))
$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY))
$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES))
$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES))
$(info DLKM_DIR = $(DLKM_DIR))
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif # End of check for TARGET_SYNX_ENABLE

View File

@ -0,0 +1,27 @@
package(
default_visibility = [
"//visibility:public",
],
)
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
driver_header_globs = [
"include/uapi/synx/**/*.h",
"include/uapi/synx/media/**/*.h",
"msm/synx/**/*.h",
]
ddk_headers(
name = "synx_headers",
hdrs = glob(driver_header_globs),
includes = [
"include/uapi/synx",
"include/uapi/synx/media",
"msm/synx",
],
)
load(":pineapple.bzl", "define_pineapple")
define_pineapple()

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
CONFIG_BUILD_VENDORSI := true
# auto-detect subdirs
obj-y +=msm/

View File

@ -0,0 +1,11 @@
KBUILD_OPTIONS+= SYNX_ROOT=$(KERNEL_SRC)/$(M)
all:
$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
modules_install:
$(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@ -0,0 +1,5 @@
ifeq ($(CONFIG_QGKI),y)
export TARGET_SYNX_ENABLE=y
else
export TARGET_SYNX_ENABLE=m
endif

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define TARGET_SYNX_ENABLE 1

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __DT_BINDINGS_IPCLITE_SIGNALS_H
#define __DT_BINDINGS_IPCLITE_SIGNALS_H
/* Signal IDs for COMPUTE_L0 protocol */
#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_MSG 0
#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_MEM_INIT 1
#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_VERSION 2
#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_TEST 3
#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_SSR 4
#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_DEBUG 5
#endif

View File

@ -0,0 +1,326 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __UAPI_SYNX_H__
#define __UAPI_SYNX_H__
#include <linux/types.h>
#include <linux/ioctl.h>
/* Size of opaque payload sent to kernel for safekeeping until signal time */
#define SYNX_USER_PAYLOAD_SIZE 4
#define SYNX_MAX_WAITING_SYNX 16
#define SYNX_CALLBACK_RESULT_SUCCESS 2
#define SYNX_CALLBACK_RESULT_FAILED 3
#define SYNX_CALLBACK_RESULT_CANCELED 4
/**
* struct synx_info - Sync object creation information
*
* @name : Optional string representation of the synx object
* @synx_obj : Sync object returned after creation in kernel
*/
struct synx_info {
char name[64];
__s32 synx_obj;
};
/**
* struct synx_userpayload_info - Payload info from user space
*
* @synx_obj: Sync object for which payload has to be registered for
* @reserved: Reserved
* @payload: Pointer to user payload
*/
struct synx_userpayload_info {
__s32 synx_obj;
__u32 reserved;
__u64 payload[SYNX_USER_PAYLOAD_SIZE];
};
/**
* struct synx_signal - Sync object signaling struct
*
* @synx_obj : Sync object to be signaled
* @synx_state : State of the synx object to which it should be signaled
*/
struct synx_signal {
__s32 synx_obj;
__u32 synx_state;
};
/**
* struct synx_merge - Merge information for synx objects
*
* @synx_objs : Pointer to synx object array to merge
* @num_objs : Number of objects in the array
* @merged : Merged synx object
*/
struct synx_merge {
__u64 synx_objs;
__u32 num_objs;
__s32 merged;
};
/**
* struct synx_wait - Sync object wait information
*
* @synx_obj : Sync object to wait on
* @reserved : Reserved
* @timeout_ms : Timeout in milliseconds
*/
struct synx_wait {
__s32 synx_obj;
__u32 reserved;
__u64 timeout_ms;
};
/**
* struct synx_external_desc - info of external sync object
*
* @type : Synx type
* @reserved : Reserved
* @id : Sync object id
*
*/
struct synx_external_desc {
__u32 type;
__u32 reserved;
__s32 id[2];
};
/**
* struct synx_bind - info for binding two synx objects
*
* @synx_obj : Synx object
* @Reserved : Reserved
* @ext_sync_desc : External synx to bind to
*
*/
struct synx_bind {
__s32 synx_obj;
__u32 reserved;
struct synx_external_desc ext_sync_desc;
};
/**
* struct synx_addrefcount - info for refcount increment
*
* @synx_obj : Synx object
* @count : Count to increment
*
*/
struct synx_addrefcount {
__s32 synx_obj;
__u32 count;
};
/**
* struct synx_id_info - info for import and export of a synx object
*
* @synx_obj : Synx object to be exported
* @secure_key : Secure key created in export and used in import
* @new_synx_obj : Synx object created in import
*
*/
struct synx_id_info {
__s32 synx_obj;
__u32 secure_key;
__s32 new_synx_obj;
__u32 padding;
};
/**
* struct synx_fence_desc - info of external fence object
*
* @type : Fence type
* @reserved : Reserved
* @id : Fence object id
*
*/
struct synx_fence_desc {
__u32 type;
__u32 reserved;
__s32 id[2];
};
/**
* struct synx_create - Sync object creation information
*
* @name : Optional string representation of the synx object
* @synx_obj : Synx object allocated
* @flags : Create flags
* @desc : External fence desc
*/
struct synx_create_v2 {
char name[64];
__u32 synx_obj;
__u32 flags;
struct synx_fence_desc desc;
};
/**
* struct synx_userpayload_info - Payload info from user space
*
* @synx_obj : Sync object for which payload has to be registered for
* @reserved : Reserved
* @payload : Pointer to user payload
*/
struct synx_userpayload_info_v2 {
__u32 synx_obj;
__u32 reserved;
__u64 payload[SYNX_USER_PAYLOAD_SIZE];
};
/**
* struct synx_signal - Sync object signaling struct
*
* @synx_obj : Sync object to be signaled
* @synx_state : State of the synx object to which it should be signaled
* @reserved : Reserved
*/
struct synx_signal_v2 {
__u32 synx_obj;
__u32 synx_state;
__u64 reserved;
};
/**
* struct synx_merge - Merge information for synx objects
*
* @synx_objs : Pointer to synx object array to merge
* @num_objs : Number of objects in the array
* @merged : Merged synx object
* @flags : Merge flags
* @reserved : Reserved
*/
struct synx_merge_v2 {
__u64 synx_objs;
__u32 num_objs;
__u32 merged;
__u32 flags;
__u32 reserved;
};
/**
* struct synx_wait - Sync object wait information
*
* @synx_obj : Sync object to wait on
* @reserved : Reserved
* @timeout_ms : Timeout in milliseconds
*/
struct synx_wait_v2 {
__u32 synx_obj;
__u32 reserved;
__u64 timeout_ms;
};
/**
* struct synx_external_desc - info of external sync object
*
* @type : Synx type
* @reserved : Reserved
* @id : Sync object id
*
*/
struct synx_external_desc_v2 {
__u64 id;
__u32 type;
__u32 reserved;
};
/**
* struct synx_bind - info for binding two synx objects
*
* @synx_obj : Synx object
* @Reserved : Reserved
* @ext_sync_desc : External synx to bind to
*
*/
struct synx_bind_v2 {
__u32 synx_obj;
__u32 reserved;
struct synx_external_desc_v2 ext_sync_desc;
};
/**
* struct synx_import_info - import info
*
* @synx_obj : Synx handle to be imported
* @flags : Import flags
* @new_synx_obj : Synx object created in import
* @reserved : Reserved
* @desc : External fence descriptor
*/
struct synx_import_info {
__u32 synx_obj;
__u32 flags;
__u32 new_synx_obj;
__u32 reserved;
struct synx_fence_desc desc;
};
/**
* struct synx_import_arr_info - import list info
*
* @list : List of synx_import_info
* @num_objs : No of fences to import
*/
struct synx_import_arr_info {
__u64 list;
__u32 num_objs;
};
/**
* struct synx_fence_fd - get fd for synx fence
*
* @synx_obj : Synx handle
* @fd : fd for synx handle fence
*/
struct synx_fence_fd {
__u32 synx_obj;
__s32 fd;
};
/**
* struct synx_private_ioctl_arg - Sync driver ioctl argument
*
* @id : IOCTL command id
* @size : Size of command payload
* @result : Result of command execution
* @reserved : Reserved
* @ioctl_ptr : Pointer to user data
*/
struct synx_private_ioctl_arg {
__u32 id;
__u32 size;
__u32 result;
__u32 reserved;
__u64 ioctl_ptr;
};
#define SYNX_PRIVATE_MAGIC_NUM 's'
#define SYNX_PRIVATE_IOCTL_CMD \
_IOWR(SYNX_PRIVATE_MAGIC_NUM, 130, struct synx_private_ioctl_arg)
#define SYNX_CREATE 0
#define SYNX_RELEASE 1
#define SYNX_SIGNAL 2
#define SYNX_MERGE 3
#define SYNX_REGISTER_PAYLOAD 4
#define SYNX_DEREGISTER_PAYLOAD 5
#define SYNX_WAIT 6
#define SYNX_BIND 7
#define SYNX_ADDREFCOUNT 8
#define SYNX_GETSTATUS 9
#define SYNX_IMPORT 10
#define SYNX_EXPORT 11
#define SYNX_IMPORT_ARR 12
#define SYNX_GETFENCE_FD 13
#endif /* __UAPI_SYNX_H__ */

View File

@ -0,0 +1,38 @@
LINUXINCLUDE += -I$(SYNX_ROOT)/include \
-I$(SYNX_ROOT)/include/uapi \
-I$(SYNX_ROOT)/include/uapi/synx/media
ccflags-y += -I$(SYNX_ROOT)/msm/synx/
# add flag to compile mmrm actual implementatio instead of stub version.
# to follow up with mmrm team if techpack users need to define this for long term?
#KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
# ported from Android.mk
$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
ifeq ($(CONFIG_ARCH_WAIPIO), y)
$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
# include $(SYNX_ROOT)/config/waipio.mk
KBUILD_CPPFLAGS += -DCONFIG_SYNX_WAIPIO=1
ccflags-y += -DCONFIG_SYNX_WAIPIO=1
endif
ifeq ($(CONFIG_ARCH_KALAMA), y)
$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
# include $(SYNX_ROOT)/config/waipio.mk
KBUILD_CPPFLAGS += -DCONFIG_SYNX_KALAMA=1
ccflags-y += -DCONFIG_SYNX_KALAMA=1
endif
ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
# include $(SYNX_ROOT)/config/pineapple.mk
KBUILD_CPPFLAGS += -DCONFIG_SYNX_PINEAPPLE=1
ccflags-y += -DCONFIG_SYNX_PINEAPPLE=1
endif
obj-m += synx-driver.o
obj-m += synx/ipclite.o
obj-m += synx/test/ipclite_test.o
synx-driver-objs := synx/synx.o synx/synx_global.o synx/synx_util.o synx/synx_debugfs.o

View File

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
CONFIG_DEBUGFS_SYNX = false
obj-m += synx/ipclite.o
obj-m += synx-driver.o
synx-driver-objs := synx/synx.o synx/synx_util.o synx/synx_debugfs.o synx/synx_global.o
synx-driver-$(CONFIG_DEBUGFS_SYNX) += synx/synx_debugfs_util.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,443 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved..
*/
#include <linux/hwspinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/soc/qcom,ipcc.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
#include "ipclite_client.h"
/* version related entries */
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#define IPCMEM_INIT_COMPLETED 0x1
#define ACTIVE_CHANNEL 0x1
#define IPCMEM_TOC_SIZE (4*1024)
#define IPCMEM_TOC_VAR_OFFSET 0x100
#define GLOBAL_ATOMIC_SUPPORT_BMSK 0x1UL
/* IPCC signal info */
#define IPCLITE_MSG_SIGNAL 0
#define IPCLITE_MEM_INIT_SIGNAL 1
#define IPCLITE_VERSION_SIGNAL 2
#define IPCLITE_TEST_SIGNAL 3
#define IPCLITE_SSR_SIGNAL 4
#define IPCLITE_DEBUG_SIGNAL 5
#define MAX_CHANNEL_SIGNALS 6
/** Flag definitions for the entries */
#define IPCMEM_FLAGS_ENABLE_READ_PROTECTION (0x01)
#define IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION (0x02)
#define IPCMEM_FLAGS_ENABLE_RW_PROTECTION \
(IPCMEM_FLAGS_ENABLE_READ_PROTECTION | \
IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION)
#define IPCMEM_FLAGS_IGNORE_PARTITION (0x00000004)
/*Hardcoded macro to identify local host on each core*/
#define LOCAL_HOST IPCMEM_APPS
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
/* queue related entries */
#define FIFO_FULL_RESERVE 8
#define FIFO_ALIGNMENT 8
/* debug related entries */
#define IPCLITE_DEBUG_INFO_SIZE 256
#define IPCLITE_CORE_DBG_LABEL "APSS:"
#define IPCLITE_LOG_MSG_SIZE 100
#define IPCLITE_LOG_BUF_SIZE 512
#define IPCLITE_DBG_LABEL_SIZE 5
#define IPCLITE_SIGNAL_LABEL_SIZE 10
#define PREV_INDEX 2
#define ADD_OFFSET(x, y) ((void *)((size_t)x + y))
/* IPCLite Logging Mechanism */
#define IPCLITE_OS_LOG(__level, __fmt, arg...) \
do { \
if (ipclite_debug_level & __level) { \
if (ipclite_debug_control & IPCLITE_DMESG_LOG) \
pr_info(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
ipclite_dbg_label[__level], ## arg); \
if (ipclite_debug_control & IPCLITE_INMEM_LOG) \
ipclite_inmem_log(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
ipclite_dbg_label[__level], ## arg); \
} \
} while (0)
/* IPCLite Debug enable status */
#define IS_DEBUG_CONFIG(ipclite_debug) (ipclite_debug_control & ipclite_debug)
/* IPCLite Feature enable status */
#define IS_FEATURE_CONFIG(ipclite_feature) (feature_mask & ipclite_feature)
/* Global Atomic status */
#define ATOMIC_HW_MUTEX_ACQUIRE \
(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC) ?: ipclite_hw_mutex_acquire())
#define ATOMIC_HW_MUTEX_RELEASE \
(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC) ?: ipclite_hw_mutex_release())
/* API Structure */
struct ipclite_api_list {
int (*init)(struct platform_device *pdev);
int32_t (*register_client)(IPCLite_Client cb_func_ptr, void *priv);
int32_t (*register_test_client)(IPCLite_Client cb_func_ptr, void *priv);
int32_t (*msg_send)(int32_t proc_id, uint64_t data);
int32_t (*test_msg_send)(int32_t proc_id, uint64_t data);
int32_t (*partition_info)(struct global_region_info *global_ipcmem);
void (*recover)(enum ipcmem_host_type core_id);
} api_list_t;
/**
* enum ipclite_channel_status - channel status
*
* INACTIVE : Channel uninitialized or init failed
* IN_PROGRESS : Channel init passed, awaiting confirmation from remote host
* ACTIVE : Channel init passed in local and remote host, thus active
*/
enum ipclite_channel_status {
INACTIVE = 0,
IN_PROGRESS = 1,
ACTIVE = 2,
};
enum ipclite_feature_mask {
IPCLITE_GLOBAL_ATOMIC = 0x0001ULL,
IPCLITE_TEST_SUITE = 0x0002ULL,
};
enum ipclite_debug_level {
IPCLITE_ERR = 0x0001,
IPCLITE_WARN = 0x0002,
IPCLITE_INFO = 0x0004,
IPCLITE_DBG = 0x0008,
};
enum ipclite_debug_control {
IPCLITE_DMESG_LOG = 0x0001,
IPCLITE_DBG_STRUCT = 0x0002,
IPCLITE_INMEM_LOG = 0x0004,
};
enum ipclite_debug_dump {
IPCLITE_DUMP_DBG_STRUCT = 0x0001,
IPCLITE_DUMP_INMEM_LOG = 0x0002,
IPCLITE_DUMP_SSR = 0x0004,
};
static const char ipclite_dbg_label[][IPCLITE_DBG_LABEL_SIZE] = {
[IPCLITE_ERR] = "err",
[IPCLITE_WARN] = "warn",
[IPCLITE_INFO] = "info",
[IPCLITE_DBG] = "dbg"
};
/**
* IPCMEM Debug Structure Definitions
* - Present in Local Memory
*/
struct ipclite_debug_info_host {
uint32_t numsig_sent; //no. of signals sent from the core
uint32_t numsig_recv; //no. of signals received on the core
uint32_t tx_wr_index; //write index of tx queue
uint32_t tx_rd_index; //read index of tx queue
uint32_t rx_wr_index; //write index of rx queue
uint32_t rx_rd_index; //read index of rx queue
uint32_t num_intr; //no. of interrupts received on the core
uint32_t prev_tx_wr_index[PREV_INDEX]; //previous write index of tx queue
uint32_t prev_tx_rd_index[PREV_INDEX]; //previous read index of tx queue
uint32_t prev_rx_wr_index[PREV_INDEX]; //previous write index of rx queue
uint32_t prev_rx_rd_index[PREV_INDEX]; //previous read index of rx queue
};
struct ipclite_debug_info_overall {
uint32_t total_numsig_sent; //total no. of signals sent
uint32_t total_numsig_recv; //total no. of signals received
uint32_t last_sent_host_id; //last signal sent to host
uint32_t last_recv_host_id; //last signal received from host
uint32_t last_sigid_sent; //last sent signal id
uint32_t last_sigid_recv; //last received signal id
};
struct ipclite_debug_info {
uint32_t debug_version;
uint32_t debug_level;
uint32_t debug_control;
uint32_t debug_dump;
uint32_t debug_log_index;
};
struct ipclite_debug_inmem_buf {
char IPCLITELog[IPCLITE_LOG_BUF_SIZE][IPCLITE_LOG_MSG_SIZE];
};
struct ipclite_debug_struct {
struct ipclite_debug_info_overall dbg_info_overall;
struct ipclite_debug_info_host dbg_info_host[IPCMEM_NUM_HOSTS];
};
/**
* IPCMEM TOC Structure Definitions
* - Present in toc in shared memory
*/
struct ipcmem_host_info {
uint32_t hwlock_owner;
uint32_t configured_host;
};
struct ipcmem_partition_entry {
uint32_t base_offset; /*partition offset from IPCMEM base*/
uint32_t size; /*partition size*/
uint32_t flags; /*partition flags if required*/
uint32_t host0; /*subsystem 0 who can access this partition*/
uint32_t host1; /*subsystem 1 who can access this partition*/
uint32_t reserved; /*legacy partition active status*/
};
struct ipcmem_partition_info {
uint32_t num_entries; /* Number of channel partitions */
uint32_t entry_size; /* Size of partition_entry structure */
};
struct ipcmem_offsets {
uint32_t host_info;
uint32_t global_entry;
uint32_t partition_info;
uint32_t partition_entry;
uint32_t debug;
uint32_t reserved; /*Padded for 64-bit alignment*/
};
/**
* Any change in TOC header size can only be accomodated with
* major version change, as it is not backward compatible.
*/
struct ipcmem_toc_header {
uint32_t magic_number; /*Checksum of TOC*/
uint32_t init_done; /*TOC initialization status*/
uint32_t major_version;
uint32_t minor_version;
uint64_t feature_mask;
uint32_t reserved[6]; /*Padded for future use and 64-bit alignment*/
};
/**
* struct ipcmem_toc - Table of contents in ipcmem
*
* @hdr : Header to check for toc integrity, version and features
* @offsets : List of offsetted structures and partition entries
* available in the toc data region (ipcmem_toc_data)
*/
struct ipcmem_toc {
struct ipcmem_toc_header hdr;
struct ipcmem_offsets offsets;
/* ---------------------------------------
* ipcmem_toc_data @ 256-byte offset
* struct ipcmem_host_info host_info;
* struct ipcmem_partition_entry global_entry;
* struct ipcmem_partition_info partition_info;
* struct ipcmem_partition_entry partition_entry[num_entries];
* ---------------------------------------
*/
};
/**
* IPCMEM Partition Structure Definitions
* - Present in partitions in shared memory
*/
struct global_partition_header {
uint32_t partition_type;
uint32_t region_offset;
uint32_t region_size;
};
struct ipcmem_global_partition {
struct global_partition_header hdr;
};
struct ipcmem_partition_header {
uint32_t type; /*partition type*/
uint32_t desc_offset; /*descriptor offset*/
uint32_t desc_size; /*descriptor size*/
uint32_t fifo0_offset; /*fifo 0 offset*/
uint32_t fifo0_size; /*fifo 0 size*/
uint32_t fifo1_offset; /*fifo 1 offset*/
uint32_t fifo1_size; /*fifo 1 size*/
uint32_t status; /*partition status*/
};
struct ipcmem_partition {
struct ipcmem_partition_header hdr;
};
/**
* IPCMEM Helper Structure Definitions
* - Present in local memory
* - Can have pointers to toc and partitions in shared memory
*/
/*Pointers to offsetted structures in TOC*/
struct ipcmem_toc_data {
struct ipcmem_host_info *host_info;
struct ipcmem_partition_entry *global_entry;
struct ipcmem_partition_info *partition_info;
struct ipcmem_partition_entry *partition_entry;
};
struct ipcmem_region {
u64 aux_base;
void __iomem *virt_base;
uint32_t size;
};
struct ipclite_mem {
struct ipcmem_toc *toc;
struct ipcmem_toc_data toc_data;
struct ipcmem_region mem;
struct ipcmem_global_partition *global_partition;
struct ipcmem_partition **partition;
};
/**
* IPCLite Structure Definitions
* - Present in local memory
* - Can have pointers to partitions in shared memory
*/
struct ipclite_fifo {
uint32_t length;
__le32 *tail;
__le32 *head;
void *fifo;
size_t (*avail)(struct ipclite_fifo *fifo);
void (*peak)(struct ipclite_fifo *fifo,
void *data, size_t count);
void (*advance)(struct ipclite_fifo *fifo,
size_t count, uint32_t core_id);
void (*write)(struct ipclite_fifo *fifo,
const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id);
void (*reset)(struct ipclite_fifo *fifo);
};
struct ipclite_irq_info {
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
int irq;
int signal_id;
char irqname[32];
};
struct ipclite_client {
IPCLite_Client callback;
void *priv_data;
int reg_complete;
};
struct ipclite_channel {
uint32_t remote_pid;
struct ipclite_fifo *tx_fifo;
struct ipclite_fifo *rx_fifo;
spinlock_t tx_lock;
struct ipclite_irq_info irq_info[MAX_CHANNEL_SIGNALS];
struct ipclite_client client;
uint32_t channel_version;
uint32_t version_finalised;
uint32_t *gstatus_ptr;
uint32_t status;
};
/*Single structure that defines everything about IPCLite*/
struct ipclite_info {
struct device *dev;
struct ipclite_channel channel[IPCMEM_NUM_HOSTS];
struct ipclite_mem ipcmem;
struct hwspinlock *hwlock;
unsigned long hw_mutex_flags;
};
/*Default partition parameters*/
#define DEFAULT_PARTITION_TYPE 0x0
#define DEFAULT_PARTITION_STATUS INACTIVE
#define DEFAULT_PARTITION_HDR_SIZE 1024
#define DEFAULT_DESCRIPTOR_OFFSET 1024
#define DEFAULT_DESCRIPTOR_SIZE (3*1024)
#define DEFAULT_FIFO0_OFFSET (4*1024)
#define DEFAULT_FIFO0_SIZE (8*1024)
#define DEFAULT_FIFO1_OFFSET (12*1024)
#define DEFAULT_FIFO1_SIZE (8*1024)
#define DEFAULT_PARTITION_SIZE (32*1024)
#define DEFAULT_PARTITION_FLAGS IPCMEM_FLAGS_ENABLE_RW_PROTECTION
/*Loopback partition parameters*/
#define LOOPBACK_PARTITION_TYPE 0x1
/*Global partition parameters*/
#define GLOBAL_PARTITION_TYPE 0xFF
#define GLOBAL_PARTITION_HDR_SIZE (4*1024)
#define GLOBAL_REGION_OFFSET (4*1024)
#define GLOBAL_REGION_SIZE (124*1024)
#define GLOBAL_PARTITION_SIZE (128*1024)
#define GLOBAL_PARTITION_FLAGS IPCMEM_FLAGS_ENABLE_RW_PROTECTION
/*Debug partition parameters*/
#define DEBUG_PARTITION_SIZE (64*1024)
const struct ipcmem_partition_header default_partition_hdr = {
DEFAULT_PARTITION_TYPE,
DEFAULT_DESCRIPTOR_OFFSET,
DEFAULT_DESCRIPTOR_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
DEFAULT_FIFO1_OFFSET,
DEFAULT_FIFO1_SIZE,
DEFAULT_PARTITION_STATUS,
};
/* TX and RX FIFO point to same location for such loopback partition type
* (FIFO0 offset = FIFO1 offset)
*/
const struct ipcmem_partition_header loopback_partition_hdr = {
LOOPBACK_PARTITION_TYPE,
DEFAULT_DESCRIPTOR_OFFSET,
DEFAULT_DESCRIPTOR_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
DEFAULT_PARTITION_STATUS,
};
const struct global_partition_header global_partition_hdr = {
GLOBAL_PARTITION_TYPE,
GLOBAL_REGION_OFFSET,
GLOBAL_REGION_SIZE,
};

View File

@ -0,0 +1,205 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __IPCLITE_CLIENT_H__
#define __IPCLITE_CLIENT_H__
typedef atomic_t ipclite_atomic_uint32_t;
typedef atomic_t ipclite_atomic_int32_t;
/**
* A list of hosts supported in IPCMEM
*/
enum ipcmem_host_type {
IPCMEM_APPS = 0, /**< Apps Processor */
IPCMEM_MODEM = 1, /**< Modem processor */
IPCMEM_LPASS = 2, /**< Audio processor */
IPCMEM_SLPI = 3, /**< Sensor processor */
IPCMEM_GPU = 4, /**< Graphics processor */
IPCMEM_CDSP = 5, /**< Compute DSP processor */
IPCMEM_CVP = 6, /**< Computer Vision processor */
IPCMEM_CAM = 7, /**< Camera processor */
IPCMEM_VPU = 8, /**< Video processor */
IPCMEM_NUM_HOSTS = 9, /**< Max number of host in target */
IPCMEM_GLOBAL_HOST = 0xFE, /**< Global Host */
IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */
};
struct global_region_info {
void *virt_base;
uint32_t size;
};
typedef int (*IPCLite_Client)(uint32_t proc_id, int64_t data, void *priv);
/**
* ipclite_msg_send() - Sends message to remote client.
*
* @proc_id : Identifier for remote client or subsystem.
* @data : 64 bit message value.
*
* @return Zero on successful registration, negative on failure.
*/
int ipclite_msg_send(int32_t proc_id, uint64_t data);
/**
* ipclite_register_client() - Registers client callback with framework.
*
* @cb_func_ptr : Client callback function to be called on message receive.
* @priv : Private data required by client for handling callback.
*
* @return Zero on successful registration, negative on failure.
*/
int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
/**
* ipclite_test_msg_send() - Sends message to remote client.
*
* @proc_id : Identifier for remote client or subsystem.
* @data : 64 bit message value.
*
* @return Zero on successful registration, negative on failure.
*/
int ipclite_test_msg_send(int32_t proc_id, uint64_t data);
/**
* ipclite_register_test_client() - Registers client callback with framework.
*
* @cb_func_ptr : Client callback function to be called on message receive.
* @priv : Private data required by client for handling callback.
*
* @return Zero on successful registration, negative on failure.
*/
int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
/**
* get_global_partition_info() - Gets info about IPCMEM's global partitions.
*
* @global_ipcmem : Pointer to global_region_info structure.
*
* @return Zero on successful registration, negative on failure.
*/
int get_global_partition_info(struct global_region_info *global_ipcmem);
/**
* ipclite_recover() - Recovers the ipclite if any core goes for SSR
*
* core_id : takes the core id of the core which went to SSR.
*
* @return None.
*/
void ipclite_recover(enum ipcmem_host_type core_id);
/**
* ipclite_hw_mutex_acquire() - Locks the hw mutex reserved for ipclite.
*
* @return Zero on successful acquire, negative on failure.
*/
int ipclite_hw_mutex_acquire(void);
/**
* ipclite_hw_mutex_release() - Unlocks the hw mutex reserved for ipclite.
*
* @return Zero on successful release, negative on failure.
*/
int ipclite_hw_mutex_release(void);
/**
* ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
/**
* ipclite_atomic_init_i32() - Initializes the global memory with int32_t value.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data);
/**
* ipclite_global_atomic_store_u32() - Writes uint32_t value to global memory.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
/**
* ipclite_global_atomic_store_i32() - Writes int32_t value to global memory.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data);
/**
* ipclite_global_atomic_load_u32() - Reads the value from global memory.
*
* @addr : Pointer to global memory
*
* @return uint32_t value.
*/
uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr);
/**
* ipclite_global_atomic_load_i32() - Reads the value from global memory.
*
* @addr : Pointer to global memory
*
* @return int32_t value.
*/
int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr);
/**
* ipclite_global_test_and_set_bit() - Sets a bit in global memory.
*
* @nr : Bit position to set.
* @addr : Pointer to global memory
*
* @return previous value.
*/
uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
/**
* ipclite_global_test_and_clear_bit() - Clears a bit in global memory.
*
* @nr : Bit position to clear.
* @addr : Pointer to global memory
*
* @return previous value.
*/
uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
/**
* ipclite_global_atomic_inc() - Increments an atomic variable by one.
*
* @addr : Pointer to global memory
*
* @return previous value.
*/
int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr);
/**
* ipclite_global_atomic_dec() - Decrements an atomic variable by one.
*
* @addr : Pointer to global variable
*
* @return previous value.
*/
int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,590 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_API_H__
#define __SYNX_API_H__
#include <linux/list.h>
#include <synx_header.h>
#include "synx_err.h"
#define SYNX_NO_TIMEOUT ((u64)-1)
/**
* SYNX_INVALID_HANDLE : client can assign the synx handle variable with this value
* when it doesn't hold a valid synx handle
*/
#define SYNX_INVALID_HANDLE 0
/* synx object states */
#define SYNX_STATE_INVALID 0 // Invalid synx object
#define SYNX_STATE_ACTIVE 1 // Synx object has not been signaled
#define SYNX_STATE_SIGNALED_ERROR 3 // Synx object signaled with error
#define SYNX_STATE_SIGNALED_EXTERNAL 5 // Synx object was signaled by external dma client.
#define SYNX_STATE_SIGNALED_SSR 6 // Synx object signaled with SSR
#define SYNX_STATE_TIMEOUT 7 // Callback status for synx object in case of timeout
/**
* enum synx_create_flags - Flags passed during synx_create call.
*
* SYNX_CREATE_LOCAL_FENCE : Instructs the framework to create local synx object,
* for local synchronization i.e. within same core.
* SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object
* for global synchronization i.e. across supported core.
* SYNX_CREATE_DMA_FENCE : Create a synx object by wrapping the provided dma fence.
* Need to pass the dma_fence ptr through fence variable
* if this flag is set. (NOT SUPPORTED)
* SYNX_CREATE_CSL_FENCE : Create a synx object with provided csl fence.
* Establishes interop with the csl fence through
* bind operations. (NOT SUPPORTED)
*/
enum synx_create_flags {
SYNX_CREATE_LOCAL_FENCE = 0x01,
SYNX_CREATE_GLOBAL_FENCE = 0x02,
SYNX_CREATE_DMA_FENCE = 0x04,
SYNX_CREATE_CSL_FENCE = 0x08,
SYNX_CREATE_MAX_FLAGS = 0x10,
};
/**
* enum synx_init_flags - Session initialization flag
* SYNX_INIT_DEFAULT : Initialization flag to be passed
* when initializing session
* SYNX_INIT_MAX : Used for internal checks
*/
enum synx_init_flags {
SYNX_INIT_DEFAULT = 0x00,
SYNX_INIT_MAX = 0x01,
};
/**
* enum synx_import_flags - Import flags
*
* SYNX_IMPORT_LOCAL_FENCE : Instructs the framework to create local synx object,
* for local synchronization i.e. within same core.
* SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object,
* for global synchronization i.e. across supported core.
* SYNX_IMPORT_SYNX_FENCE : Import native Synx handle for synchronization.
* Need to pass the Synx handle ptr through fence variable
* if this flag is set. Client must pass:
* a. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_LOCAL_FENCE
* to import a synx handle as local synx handle.
* b. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_GLOBAL_FENCE
* to import a synx handle as global synx handle.
* SYNX_IMPORT_DMA_FENCE : Import dma fence and create Synx handle for interop.
* Need to pass the dma_fence ptr through fence variable
* if this flag is set. Client must pass:
* a. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_LOCAL_FENCE
* to import a dma fence and create local synx handle
* for interop.
* b. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_GLOBAL_FENCE
* to import a dma fence and create global synx handle
* for interop.
* SYNX_IMPORT_EX_RELEASE : Flag to inform relaxed invocation where release call
* need not be called by client on this handle after import.
* (NOT SUPPORTED)
*/
enum synx_import_flags {
SYNX_IMPORT_LOCAL_FENCE = 0x01,
SYNX_IMPORT_GLOBAL_FENCE = 0x02,
SYNX_IMPORT_SYNX_FENCE = 0x04,
SYNX_IMPORT_DMA_FENCE = 0x08,
SYNX_IMPORT_EX_RELEASE = 0x10,
};
/**
* enum synx_signal_status - Signal status
*
* SYNX_STATE_SIGNALED_SUCCESS : Signal success
* SYNX_STATE_SIGNALED_CANCEL : Signal cancellation
* SYNX_STATE_SIGNALED_MAX : Clients can send custom notification
* beyond the max value (only positive)
*/
enum synx_signal_status {
SYNX_STATE_SIGNALED_SUCCESS = 2,
SYNX_STATE_SIGNALED_CANCEL = 4,
SYNX_STATE_SIGNALED_MAX = 64,
};
/**
* synx_callback - Callback invoked by external fence
*
* External fence dispatch the registered callback to notify
* signal to synx framework.
*/
typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
/**
* synx_user_callback - Callback function registered by clients
*
* User callback registered for non-blocking wait. Dispatched when
* synx object is signaled or timed-out with status of synx object.
*/
typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
/**
* struct bind_operations - Function pointers that need to be defined
* to achieve bind functionality for external fence with synx obj
*
* @register_callback : Function to register with external sync object
* @deregister_callback : Function to deregister with external sync object
* @enable_signaling : Function to enable the signaling on the external
* sync object (optional)
* @signal : Function to signal the external sync object
*/
struct bind_operations {
int (*register_callback)(synx_callback cb_func,
void *userdata, s32 sync_obj);
int (*deregister_callback)(synx_callback cb_func,
void *userdata, s32 sync_obj);
int (*enable_signaling)(s32 sync_obj);
int (*signal)(s32 sync_obj, u32 status);
};
/**
* synx_bind_client_type : External fence supported for bind (NOT SUPPORTED)
*
* SYNX_TYPE_CSL : Camera CSL fence
* SYNX_MAX_BIND_TYPES : Used for internal checks
*/
enum synx_bind_client_type {
SYNX_TYPE_CSL = 0,
SYNX_MAX_BIND_TYPES,
};
/**
* struct synx_register_params - External registration parameters (NOT SUPPORTED)
*
* @ops : Bind operations struct
* @name : External client name
* Only first 64 bytes are accepted, rest will be ignored
* @type : Synx bind client type
*/
struct synx_register_params {
struct bind_operations ops;
char *name;
enum synx_bind_client_type type;
};
/**
* struct synx_queue_desc - Memory descriptor of the queue allocated by
* the fence driver for each client during
* register. (Clients need not pass any pointer
* in synx_initialize_params. It is for future
* use).
*
* @vaddr : CPU virtual address of the queue.
* @dev_addr : Physical address of the memory object.
* @size : Size of the memory.
* @mem_data : Internal pointer with the attributes of the allocation.
*/
struct synx_queue_desc {
void *vaddr;
u64 dev_addr;
u64 size;
void *mem_data;
};
/**
* enum synx_client_id : Unique identifier of the supported clients
*
* @SYNX_CLIENT_NATIVE : Native Client
* @SYNX_CLIENT_GFX_CTX0 : GFX Client 0
* @SYNX_CLIENT_DPU_CTL0 : DPU Client 0
* @SYNX_CLIENT_DPU_CTL1 : DPU Client 1
* @SYNX_CLIENT_DPU_CTL2 : DPU Client 2
* @SYNX_CLIENT_DPU_CTL3 : DPU Client 3
* @SYNX_CLIENT_DPU_CTL4 : DPU Client 4
* @SYNX_CLIENT_DPU_CTL5 : DPU Client 5
* @SYNX_CLIENT_EVA_CTX0 : EVA Client 0
* @SYNX_CLIENT_VID_CTX0 : Video Client 0
* @SYNX_CLIENT_NSP_CTX0 : NSP Client 0
* @SYNX_CLIENT_IFE_CTX0 : IFE Client 0
* @SYNX_CLIENT_ICP_CTX0 : ICP Client 0
*/
enum synx_client_id {
SYNX_CLIENT_NATIVE = 0,
SYNX_CLIENT_GFX_CTX0,
SYNX_CLIENT_DPU_CTL0,
SYNX_CLIENT_DPU_CTL1,
SYNX_CLIENT_DPU_CTL2,
SYNX_CLIENT_DPU_CTL3,
SYNX_CLIENT_DPU_CTL4,
SYNX_CLIENT_DPU_CTL5,
SYNX_CLIENT_EVA_CTX0,
SYNX_CLIENT_VID_CTX0,
SYNX_CLIENT_NSP_CTX0,
SYNX_CLIENT_IFE_CTX0,
SYNX_CLIENT_ICP_CTX0,
SYNX_CLIENT_MAX,
};
/**
* struct synx_session - Client session identifier
*
* @type : Session type.
* Internal Member. (Do not access/modify)
* @client : Pointer to client session
* Internal Member. (Do not access/modify)
*/
struct synx_session {
u32 type;
void *client;
};
/**
* struct synx_initialization_params - Session params
*
* @name : Client session name
* Only first 64 bytes are accepted, rest will be ignored
* @ptr : Memory descriptor of queue allocated by fence during
* device register. (filled by function)
* @id : Client identifier
* @flags : Synx initialization flags
*/
struct synx_initialization_params {
const char *name;
struct synx_queue_desc *ptr;
enum synx_client_id id;
enum synx_init_flags flags;
};
/**
* struct synx_create_params - Synx creation parameters
*
* @name : Optional parameter associating a name with the synx
* object for debug purposes
* Only first 64 bytes are accepted,
* rest will be ignored
* @h_synx : Pointer to synx object handle (filled by function)
* @fence : Pointer to external dma fence or csl fence. (NOT SUPPORTED)
* @flags : Synx flags for customization
*/
struct synx_create_params {
const char *name;
u32 *h_synx;
void *fence;
enum synx_create_flags flags;
};
/**
* enum synx_merge_flags - Handle merge flags
*
* SYNX_MERGE_LOCAL_FENCE : Create local composite synx object. To be passed along
* with SYNX_MERGE_NOTIFY_ON_ALL.
* SYNX_MERGE_GLOBAL_FENCE : Create global composite synx object. To be passed along
* with SYNX_MERGE_NOTIFY_ON_ALL.
* SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects.
* Clients must pass:
* a. SYNX_MERGE_LOCAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL
* to create local composite synx object and notify
* it when all child synx objects are signaled.
* b. SYNX_MERGE_GLOBAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL
* to create global composite synx object and notify
* it when all child synx objects are signaled.
* SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object. (NOT SUPPORTED)
*/
enum synx_merge_flags {
SYNX_MERGE_LOCAL_FENCE = 0x01,
SYNX_MERGE_GLOBAL_FENCE = 0x02,
SYNX_MERGE_NOTIFY_ON_ALL = 0x04,
SYNX_MERGE_NOTIFY_ON_ANY = 0x08,
};
/*
* struct synx_merge_params - Synx merge parameters
*
* @h_synxs : Pointer to a array of synx handles to be merged
* @flags : Merge flags
* @num_objs : Number of synx handles to be merged (in array h_synxs).
* @h_merged_obj : Merged synx handle (filled by function)
*/
struct synx_merge_params {
u32 *h_synxs;
enum synx_merge_flags flags;
u32 num_objs;
u32 *h_merged_obj;
};
/**
* enum synx_import_type - Import type
*
* SYNX_IMPORT_INDV_PARAMS : Import filled with synx_import_indv_params struct
* SYNX_IMPORT_ARR_PARAMS : Import filled with synx_import_arr_params struct
*/
enum synx_import_type {
SYNX_IMPORT_INDV_PARAMS = 0x01,
SYNX_IMPORT_ARR_PARAMS = 0x02,
};
/**
* struct synx_import_indv_params - Synx import indv parameters
*
* @new_h_synxs : Pointer to new synx object
* (filled by the function)
* The new handle/s should be used by importing
* process for all synx api operations and
* for sharing with FW cores.
* @flags : Synx import flags
* @fence : Pointer to DMA fence fd or synx handle.
*/
struct synx_import_indv_params {
u32 *new_h_synx;
enum synx_import_flags flags;
void *fence;
};
/**
* struct synx_import_arr_params - Synx import arr parameters
*
* @list : List of synx_import_indv_params
* @num_fences : Number of fences or synx handles to be imported
*/
struct synx_import_arr_params {
struct synx_import_indv_params *list;
u32 num_fences;
};
/**
* struct synx_import_params - Synx import parameters
*
* @type : Import params type filled by client
* @indv : Params to import an individual handle or fence
* @arr : Params to import an array of handles or fences
*/
struct synx_import_params {
enum synx_import_type type;
union {
struct synx_import_indv_params indv;
struct synx_import_arr_params arr;
};
};
/**
* struct synx_callback_params - Synx callback parameters
*
* @h_synx : Synx object handle
* @cb_func : Pointer to callback func to be invoked.
* @userdata : Opaque pointer passed back with callback as data
* @cancel_cb_func : Pointer to callback to ack cancellation
* @timeout_ms : Timeout in ms. SYNX_NO_TIMEOUT if no timeout.
*/
struct synx_callback_params {
u32 h_synx;
synx_user_callback_t cb_func;
void *userdata;
synx_user_callback_t cancel_cb_func;
u64 timeout_ms;
};
/* Kernel APIs */
/* synx_register_ops - Register operations for external synchronization (NOT SUPPORTED)
*
* Register with synx for enabling external synchronization through bind
*
* @param params : Pointer to register params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if params are invalid.
* -SYNX_NOMEM will be returned if bind ops cannot be registered due to
* insufficient memory.
* -SYNX_ALREADY will be returned if type already in use.
*/
int synx_register_ops(const struct synx_register_params *params);
/**
* synx_deregister_ops - De-register external synchronization operations (NOT SUPPORTED)
*
* @param params : Pointer to register params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if record not found.
*/
int synx_deregister_ops(const struct synx_register_params *params);
/**
* synx_initialize - Initializes a new client session
*
* @param params : Pointer to session init params
*
* @return Client session pointer on success. NULL or error in case of failure.
*/
struct synx_session *synx_initialize(struct synx_initialization_params *params);
/**
* synx_uninitialize - Destroys the client session
*
* @param session : Session ptr (returned from synx_initialize)
*
* @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
*/
int synx_uninitialize(struct synx_session *session);
/**
* synx_create - Creates a synx object
*
* Creates a new synx obj and returns the handle to client. There can be
* maximum of 4095 global synx handles or local synx handles across
* sessions.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Pointer to create params
*
* @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
*/
int synx_create(struct synx_session *session, struct synx_create_params *params);
/**
* synx_async_wait - Registers a callback with a synx object
*
* Clients can register maximum of 64 callbacks functions per
* synx session. Clients should register callback functions with minimal computation.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Callback params.
* cancel_cb_func in callback params is optional with this API.
*
* @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
*/
int synx_async_wait(struct synx_session *session, struct synx_callback_params *params);
/**
* synx_cancel_async_wait - De-registers a callback with a synx object
*
* This API will cancel one instance of callback function (mapped
* with userdata and h_synx) provided in cb_func of callback params.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Callback params
*
* @return Status of operation.Negative in case of error, SYNX_SUCCESS otherwise.
*/
int synx_cancel_async_wait(struct synx_session *session,
struct synx_callback_params *params);
/**
* synx_signal - Signals a synx object with the status argument.
*
* This function will signal the synx object referenced by h_synx
* and invoke any external binding synx objs.
* The status parameter will indicate whether the entity
* performing the signaling wants to convey an error case or a success case.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
* @param status : Status of signaling.
* Clients can send custom signaling status
* beyond SYNX_STATE_SIGNALED_MAX.
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_signal(struct synx_session *session, u32 h_synx,
enum synx_signal_status status);
/**
* synx_merge - Merges multiple synx objects
*
* This function will merge multiple synx objects into a synx group.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Merge params
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_merge(struct synx_session *session, struct synx_merge_params *params);
/**
* synx_wait - Waits for a synx object synchronously
*
* Does a wait on the synx object identified by h_synx for a maximum
* of timeout_ms milliseconds. Must not be called from interrupt context as
* this API can sleep.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle to be waited upon
* @param timeout_ms : Timeout in ms
*
* @return Status of synx object if handle is signaled. -SYNX_INVAL if synx object
* is in bad state or arguments are invalid, -SYNX_TIMEOUT if wait times out.
*/
int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms);
/**
* synx_get_status - Returns the status of the synx object.
*
* This API should not be used in polling mode to know if the handle
* is signaled or not.
* Clients need to explicitly wait using synx_wait() or synx_async_wait()
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
*
* @return Status of the synx object
*/
int synx_get_status(struct synx_session *session, u32 h_synx);
/**
* synx_import - Imports (looks up) synx object from given handle or fence
* *
* @param session : Session ptr (returned from synx_initialize)
* @param params : Pointer to import params
*
* @return Status of operation. Negative in case of failure, SYNX_SUCCESS otherwise.
*/
int synx_import(struct synx_session *session, struct synx_import_params *params);
/**
* synx_get_fence - Get the native fence backing the synx object
*
* Synx framework will take additional reference on dma fence and returns the native
* fence. Clients need to release additional reference explicitly by calling kref_put.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
*
* @return Fence pointer in case of success and NULL in case of failure.
*/
void *synx_get_fence(struct synx_session *session, u32 h_synx);
/**
* synx_release - Release the synx object.
*
* Every created, imported or merged synx object should be released.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle to be destroyed
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_release(struct synx_session *session, u32 h_synx);
/**
* synx_recover - Recover any possible handle leaks
*
* Function should be called on HW hang/reset to
* recover the Synx handles shared. This cleans up
* synx handles owned by subsystem under hang/reset, and avoids
* potential resource leaks.
*
* Function does not destroy the session, but only
* recover synx handles belonging to the session.
* Synx session would still be active and clients
* need to destroy the session explicitly through
* synx_uninitialize API.
*
* All the unsignaled handles owned/imported by the core at the time of reset
* will be signaled by synx framework on behalf of hung core with SYNX_STATE_SIGNALED_SSR.
*
* @param id : Client ID of core to recover
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_recover(enum synx_client_id id);
#endif /* __SYNX_API_H__ */

View File

@ -0,0 +1,203 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include "synx_api.h"
#include "synx_debugfs.h"
#include "synx_util.h"
#include "synx_global.h"
#include "synx_debugfs_util.h"
#define MAX_DBG_BUF_SIZE (64 * SYNX_MAX_OBJS)
#ifdef ENABLE_DEBUGFS
#define MAX_HELP_BUF_SIZE (4096)
#define BUF_SIZE 16
#endif
struct dentry *my_direc;
u32 lower_handle_id = GLOBAL_HANDLE_STARTING_ID, upper_handle_id = GLOBAL_HANDLE_STARTING_ID;
long synx_columns = STATUS_COLUMN | ID_COLUMN | REF_CNT_COLUMN |
NUM_CHILD_COLUMN | SUBSCRIBERS_COLUMN | WAITERS_COLUMN | PARENTS_COLUMN | GLOBAL_SHARED_MEM;
EXPORT_SYMBOL(synx_columns);
int synx_debug = SYNX_ERR | SYNX_WARN |
SYNX_INFO;
EXPORT_SYMBOL(synx_debug);
void populate_bound_rows(
struct synx_coredata *row, char *cur, char *end)
{
int j;
for (j = 0; j < row->num_bound_synxs; j++)
SYNX_CONSOLE_LOG(cur, end, "\n\tID: %d",
row->bound_synxs[j].external_desc.id);
}
static ssize_t synx_table_read(struct file *file,
char *buf,
size_t count,
loff_t *ppos)
{
struct synx_device *dev = file->private_data;
struct error_node *err_node, *err_node_tmp;
char *dbuf, *cur, *end;
ssize_t len = 0;
dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
cur = dbuf;
end = cur + MAX_DBG_BUF_SIZE;
#ifdef ENABLE_DEBUGFS
SYNX_CONSOLE_LOG(cur, end, "\n\tHandle ID start value : %d", lower_handle_id);
SYNX_CONSOLE_LOG(cur, end, "\n\tHandle ID end value : %d\n", upper_handle_id);
if (synx_columns & GLOBAL_HASHTABLE)
synx_debugfs_util_print_hash_table(&cur, &end, true);
if (synx_columns & LOCAL_HASHTABLE)
synx_debugfs_util_print_hash_table(&cur, &end, false);
if (synx_columns & CLIENT_HASHTABLE)
synx_debugfs_util_print_client_table(&cur, &end);
if (synx_columns & GLOBAL_SHARED_MEM)
synx_debugfs_util_print_global_shared_memory(&cur, &end);
if (synx_columns & DMA_FENCE_MAP)
synx_debugfs_util_print_dma_fence(&cur, &end);
#endif
if (synx_columns & ERROR_CODES && !list_empty(&dev->error_list)) {
SYNX_CONSOLE_LOG(cur, end, "\nError(s): ");
mutex_lock(&dev->error_lock);
list_for_each_entry_safe(
err_node, err_node_tmp,
&dev->error_list, node) {
SYNX_CONSOLE_LOG(cur, end, "\n\tTime: %s - ", err_node->timestamp);
SYNX_CONSOLE_LOG(cur, end, "ID: %d - ", err_node->h_synx);
SYNX_CONSOLE_LOG(cur, end, "Code: %d", err_node->error_code);
list_del(&err_node->node);
kfree(err_node);
}
mutex_unlock(&dev->error_lock);
}
len = simple_read_from_buffer(buf, count, ppos,
dbuf, cur - dbuf);
kfree(dbuf);
return len;
}
#ifdef ENABLE_DEBUGFS
static ssize_t synx_table_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
u32 stat = -1;
u32 i = 0, base = 10, num = 0;
bool invalid_val = false;
char *kbuffer = kzalloc(BUF_SIZE, GFP_KERNEL);
if (!kbuffer)
return -ENOMEM;
stat = copy_from_user(kbuffer, buf, BUF_SIZE);
if (stat != 0) {
kfree(kbuffer);
return -EFAULT;
}
if (kbuffer[i] == '0' && (kbuffer[i+1] == 'x' || kbuffer[i+1] == 'X')) {
base = 16;
i += 2;
}
for ( ; (i < BUF_SIZE / 2 && kbuffer[i] != '-' && kbuffer[i] != '\n'); i++)
SYNX_READ_CHAR(kbuffer, num, base, i);
if (!invalid_val)
lower_handle_id = num;
if (kbuffer[i] == '-') {
num = 0;
i++;
for ( ; i < BUF_SIZE && kbuffer[i] != '\n'; i++)
SYNX_READ_CHAR(kbuffer, num, base, i);
if (!invalid_val)
upper_handle_id = num;
} else if (kbuffer[i] == '\n')
upper_handle_id = lower_handle_id;
kfree(kbuffer);
return count;
}
#endif
static const struct file_operations synx_table_fops = {
.owner = THIS_MODULE,
.read = synx_table_read,
#ifdef ENABLE_DEBUGFS
.write = synx_table_write,
#endif
.open = simple_open,
};
#ifdef ENABLE_DEBUGFS
static ssize_t synx_help_read(struct file *file,
char *buf,
size_t count,
loff_t *ppos)
{
char *dbuf, *cur, *end;
ssize_t len = 0;
dbuf = kzalloc(MAX_HELP_BUF_SIZE, GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
cur = dbuf;
end = cur + MAX_HELP_BUF_SIZE;
synx_debugfs_util_load_help_content(&cur, &end);
len = simple_read_from_buffer(buf, count, ppos, dbuf, cur - dbuf);
kfree(dbuf);
return len;
}
static const struct file_operations synx_help_fops = {
.owner = THIS_MODULE,
.read = synx_help_read,
};
#endif
struct dentry *synx_init_debugfs_dir(struct synx_device *dev)
{
struct dentry *dir = NULL;
dir = debugfs_create_dir("synx_debug", NULL);
if (!dir) {
dprintk(SYNX_ERR, "Failed to create debugfs for synx\n");
return NULL;
}
debugfs_create_u32("debug_level", 0644, dir, &synx_debug);
debugfs_create_ulong("column_level", 0644, dir, &synx_columns);
if (!debugfs_create_file("synx_table",
0644, dir, dev, &synx_table_fops)) {
dprintk(SYNX_ERR, "Failed to create debugfs file for synx\n");
return NULL;
}
#ifdef ENABLE_DEBUGFS
if (!debugfs_create_file("help",
0444, dir, dev, &synx_help_fops)) {
dprintk(SYNX_ERR, "Failed to create debugfs help file for synx\n");
return NULL;
}
#endif
return dir;
}
void synx_remove_debugfs_dir(struct synx_device *dev)
{
debugfs_remove_recursive(dev->debugfs_root);
}

View File

@ -0,0 +1,144 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_DEBUGFS_H__
#define __SYNX_DEBUGFS_H__
#include <linux/debugfs.h>
#include <linux/delay.h>
#include "synx_private.h"
//#define ENABLE_DEBUGFS
#define STATE_NAME_SPACE (4)
enum synx_debug_level {
SYNX_ERR = 0x0001,
SYNX_WARN = 0x0002,
SYNX_INFO = 0x0004,
SYNX_DBG = 0x0008,
SYNX_VERB = 0x0010,
SYNX_IPCL = 0x0020,
SYNX_GSM = 0x0040,
SYNX_MEM = 0x0080,
SYNX_ALL = SYNX_ERR | SYNX_WARN | SYNX_INFO |
SYNX_DBG | SYNX_IPCL | SYNX_GSM | SYNX_MEM,
};
enum synx_columns_level {
NAME_COLUMN = 0x00000001,
ID_COLUMN = 0x00000002,
BOUND_COLUMN = 0x00000004,
STATUS_COLUMN = 0x00000008,
FENCE_COLUMN = 0x00000010,
COREDATA_COLUMN = 0x00000020,
GLOBAL_IDX_COLUMN = 0x00000040,
REL_CNT_COLUMN = 0x00000080,
MAP_CNT_COLUMN = 0x00000100,
REF_CNT_COLUMN = 0x00000200,
NUM_CHILD_COLUMN = 0x00000400,
SUBSCRIBERS_COLUMN = 0x00000800,
WAITERS_COLUMN = 0x00001000,
PARENTS_COLUMN = 0x00002000,
CLIENT_ID_COLUMN = 0x00004000,
LOCAL_HASHTABLE = 0x00010000,
GLOBAL_HASHTABLE = 0x00020000,
CLIENT_HASHTABLE = 0x00040000,
GLOBAL_SHARED_MEM = 0x00080000,
DMA_FENCE_MAP = 0x00100000,
CSL_FENCE_MAP = 0x00200000,
ERROR_CODES = 0x00008000,
};
#ifndef SYNX_DBG_LABEL
#define SYNX_DBG_LABEL "synx"
#endif
#define SYNX_DBG_TAG SYNX_DBG_LABEL ": %4s: "
extern int synx_debug;
extern u32 lower_handle_id, upper_handle_id;
extern long synx_columns;
static inline char *synx_debug_str(int level)
{
switch (level) {
case SYNX_ERR:
return "err";
case SYNX_WARN:
return "warn";
case SYNX_INFO:
return "info";
case SYNX_DBG:
return "dbg";
case SYNX_VERB:
return "verb";
case SYNX_IPCL:
return "ipcl";
case SYNX_GSM:
return "gmem";
case SYNX_MEM:
return "mem";
default:
return "???";
}
}
#define dprintk(__level, __fmt, arg...) \
do { \
if (synx_debug & __level) { \
pr_info(SYNX_DBG_TAG "%s: %d: " __fmt, \
synx_debug_str(__level), __func__, \
__LINE__, ## arg); \
} \
} while (0)
#define SYNX_CONSOLE_LOG(__cur, __end, \
__fmt_string, arg...) \
do { \
if ((__end - __cur) * (sizeof(char *)) \
- strlen(__fmt_string) <= STATE_NAME_SPACE) \
dprintk(SYNX_DBG, __fmt_string, ## arg); \
else \
__cur += scnprintf(__cur, __end - __cur, \
__fmt_string, ## arg); \
} while (0)
#define SYNX_READ_CHAR(__buf, __num, \
__base, __pos) \
do { \
if (__buf[__pos] >= '0' && \
__buf[__pos] <= '9') \
__num = __num * __base + \
(__buf[__pos] - '0'); \
else if (__buf[__pos] >= 'a' && \
__buf[__pos] <= 'f') \
__num = __num * __base + \
(__buf[__pos] - 'a' + 10); \
else if (__buf[__pos] >= 'A' && \
__buf[__pos] <= 'F') \
__num = __num * __base + \
(__buf[__pos] - 'A' + 10); \
else \
invalid_val = true; \
} while (0)
/**
* synx_init_debugfs_dir - Initializes debugfs
*
* @param dev : Pointer to synx device structure
*/
struct dentry *synx_init_debugfs_dir(struct synx_device *dev);
/**
* synx_remove_debugfs_dir - Removes debugfs
*
* @param dev : Pointer to synx device structure
*/
void synx_remove_debugfs_dir(struct synx_device *dev);
#endif /* __SYNX_DEBUGFS_H__ */

View File

@ -0,0 +1,497 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include "synx_debugfs.h"
#include "synx_debugfs_util.h"
#include "synx_util.h"
#include "synx_private.h"
#include "synx_global.h"
#define MAX_CUSTOM_STATUS ((1UL << 32) - 1)
char *synx_debugfs_util_get_state_name(u32 status)
{
char *state;
if (status == 0)
state = "INV";
else if (status == 1)
state = "ACT";
else if (status == 2)
state = "SUC";
else if (status == 3)
state = "ERR";
else if (status == 4)
state = "CAN";
else if (status == 5)
state = "EXT";
else if (status == 6)
state = "SSR";
else if (status > 64 && status <= MAX_CUSTOM_STATUS)
state = "CUS";
else
state = "???";
return state;
}
static int synx_debugfs_util_get_client_data(struct synx_client *client)
{
if (IS_ERR_OR_NULL(client))
return -SYNX_NOENT;
kref_get(&client->refcount);
return SYNX_SUCCESS;
}
static void synx_debugfs_util_put_client_data(struct synx_client *client)
{
if (!IS_ERR_OR_NULL(client))
kref_put(&client->refcount, synx_client_destroy);
}
static int synx_debugfs_util_get_handle(struct synx_handle_coredata *handle_coredata)
{
if (IS_ERR_OR_NULL(handle_coredata))
return -SYNX_NOENT;
kref_get(&handle_coredata->refcount);
return SYNX_SUCCESS;
}
static void synx_debugfs_util_put_handle(struct synx_handle_coredata *handle_coredata)
{
if (!IS_ERR_OR_NULL(handle_coredata))
kref_put(&handle_coredata->refcount, synx_util_destroy_handle);
}
static int synx_debugfs_util_get_CSL_fence_entry(struct synx_entry_64 *entry)
{
if (IS_ERR_OR_NULL(entry))
return -SYNX_NOENT;
kref_get(&entry->refcount);
return SYNX_SUCCESS;
}
static void synx_debugfs_util_put_CSL_fence_entry(struct synx_entry_64 *entry)
{
if (!IS_ERR_OR_NULL(entry))
kref_put(&entry->refcount, synx_util_destroy_data);
}
bool synx_debugfs_util_is_valid_global_shared_memory_entry(struct synx_global_coredata *entry,
u32 idx)
{
int i;
if (!entry || entry->handle != idx)
return false;
if (entry->status || entry->handle || entry->refcount ||
entry->num_child || entry->subscribers || entry->waiters)
return true;
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (entry->parents[i])
return true;
}
return false;
}
static bool synx_debugfs_util_is_valid_dma_handle_range(struct synx_fence_entry *fence_entry)
{
if ((fence_entry->g_handle >= lower_handle_id &&
fence_entry->g_handle <= upper_handle_id) ||
(fence_entry->l_handle >= lower_handle_id &&
fence_entry->l_handle <= upper_handle_id))
return true;
return false;
}
static void synx_debugfs_util_print_map_column_values(char **cur,
char **end,
struct synx_map_entry *entry)
{
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%s", synx_debugfs_util_get_state_name
(synx_util_get_object_status(entry->synx_obj)));
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t %x", entry->key);
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t %d", kref_read(&entry->refcount));
if (synx_columns & BOUND_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t %d", entry->synx_obj->num_bound_synxs);
if (synx_columns & GLOBAL_IDX_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t %d", entry->synx_obj->global_idx);
if (synx_columns & MAP_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t %d", entry->synx_obj->map_count);
SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
}
void synx_debugfs_util_print_hash_table(char **cur,
char **end,
bool is_global)
{
struct synx_map_entry *map_entry = NULL;
struct synx_coredata *synx_obj = NULL;
u32 key;
if (is_global)
SYNX_CONSOLE_LOG(*cur, *end,
"\n\t-------------GLOBAL MAP TABLE------------\n");
else
SYNX_CONSOLE_LOG(*cur, *end,
"\n\t-------------LOCAL MAP TABLE------------\n");
SYNX_CONSOLE_LOG(*cur, *end, "\n\t\t");
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |");
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |");
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |");
if (synx_columns & BOUND_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| NUM BOUND |");
if (synx_columns & GLOBAL_IDX_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| GLOBAL INDEX |");
if (synx_columns & MAP_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| MAP CNT |");
SYNX_CONSOLE_LOG(*cur, *end, "\n");
for (key = lower_handle_id; key <= upper_handle_id; key++) {
map_entry = synx_util_get_map_entry(key);
if (IS_ERR_OR_NULL(map_entry) || IS_ERR_OR_NULL(map_entry->synx_obj) ||
(is_global ^ synx_util_is_global_handle(key))) {
synx_util_release_map_entry(map_entry);
continue;
}
synx_obj = map_entry->synx_obj;
synx_util_get_object(synx_obj);
mutex_lock(&synx_obj->obj_lock);
synx_debugfs_util_print_map_column_values(cur, end, map_entry);
mutex_unlock(&synx_obj->obj_lock);
synx_util_put_object(synx_obj);
synx_util_release_map_entry(map_entry);
}
}
void synx_debugfs_util_print_dma_fence(char **cur,
char **end)
{
struct synx_fence_entry *curr = NULL;
struct hlist_node *tmp;
struct dma_fence *fence_entry = NULL;
u32 map_itr;
SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------DMA FENCE MAP TABLE------------\n");
if (synx_columns & FENCE_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| DMA FENCE |");
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |");
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |");
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |");
SYNX_CONSOLE_LOG(*cur, *end, "\n");
spin_lock_bh(&synx_dev->native->fence_map_lock);
hash_for_each_safe(synx_dev->native->fence_map, map_itr, tmp, curr, node) {
if (IS_ERR_OR_NULL(curr))
continue;
fence_entry = (struct dma_fence *)curr->key;
dma_fence_get(fence_entry);
if (synx_debugfs_util_is_valid_dma_handle_range(curr)) {
if (synx_columns & FENCE_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t%p", fence_entry);
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%s",
synx_debugfs_util_get_state_name
(__fence_state(fence_entry, false)));
if (synx_columns & ID_COLUMN) {
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr->g_handle);
SYNX_CONSOLE_LOG(*cur, *end, "\t%d", curr->l_handle);
}
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d",
kref_read(&(fence_entry)->refcount));
SYNX_CONSOLE_LOG(*cur, *end,
"\n\t-------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end,
"-----------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
}
dma_fence_put(fence_entry);
}
spin_unlock_bh(&synx_dev->native->fence_map_lock);
}
void synx_debugfs_util_print_csl_fence(char **cur,
char **end)
{
u32 itr, rc = SYNX_SUCCESS;
struct synx_entry_64 *curr = NULL;
struct hlist_node *tmp;
struct synx_map_entry *map_entry = NULL;
SYNX_CONSOLE_LOG(*cur, *end, "\n\t------------- CSL FENCE MAP TABLE------------\n");
if (synx_columns & FENCE_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| CSL FENCE |");
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |");
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |");
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |");
SYNX_CONSOLE_LOG(*cur, *end, "\n");
spin_lock_bh(&synx_dev->native->csl_map_lock);
hash_for_each_safe(synx_dev->native->csl_fence_map, itr, tmp, curr, node) {
rc = synx_debugfs_util_get_CSL_fence_entry(curr);
if (rc) {
spin_unlock_bh(&synx_dev->native->csl_map_lock);
return;
}
if (curr->data[0] >= lower_handle_id && curr->data[0] <= upper_handle_id) {
if (synx_columns & FENCE_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "%p", curr->key);
if (synx_columns & STATUS_COLUMN) {
map_entry = synx_util_get_map_entry(curr->data[0]);
if (!IS_ERR_OR_NULL(map_entry) &&
!IS_ERR_OR_NULL(map_entry->synx_obj)) {
SYNX_CONSOLE_LOG(*cur, *end, "\t%s",
synx_debugfs_util_get_state_name
(synx_util_get_object_status(map_entry->synx_obj)));
synx_util_release_map_entry(map_entry);
}
} //TODO : Update status field of CSL Fence with updated structure
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr->data[0]);
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t%d", kref_read(&curr->refcount));
SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
}
synx_debugfs_util_put_CSL_fence_entry(curr);
}
spin_unlock_bh(&synx_dev->native->csl_map_lock);
}
void synx_debugfs_util_print_global_shared_memory(char **cur,
char **end)
{
struct synx_global_coredata synx_global_entry;
u32 i, idx;
/* Column heading set up*/
SYNX_CONSOLE_LOG(*cur, *end,
"\n\t ------------- GLOBAL SHARED MEMORY ------------\n\t");
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |");
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |");
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |");
if (synx_columns & NUM_CHILD_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| NUM CHILD |");
if (synx_columns & SUBSCRIBERS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| SUBSCRIBERS |");
if (synx_columns & WAITERS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| WAITERS |");
if (synx_columns & PARENTS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| PARENTS |");
SYNX_CONSOLE_LOG(*cur, *end, "\n");
for (idx = lower_handle_id ; idx <= upper_handle_id ; idx++) {
if (!synx_fetch_global_shared_memory_handle_details(idx, &synx_global_entry) ||
!synx_debugfs_util_is_valid_global_shared_memory_entry(&synx_global_entry, idx))
continue;
if (synx_columns & STATUS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t %s",
synx_debugfs_util_get_state_name(synx_global_entry.status));
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%x", synx_global_entry.handle);
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.refcount);
if (synx_columns & NUM_CHILD_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.num_child);
if (synx_columns & SUBSCRIBERS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t%d", synx_global_entry.subscribers);
if (synx_columns & WAITERS_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.waiters);
if (synx_columns & PARENTS_COLUMN) {
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_global_entry.parents[i])
SYNX_CONSOLE_LOG(*cur, *end, " %2u",
synx_global_entry.parents[i]);
}
}
SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
}
}
void synx_debugfs_util_print_client_table(char **cur,
char **end)
{
u32 rc = SYNX_SUCCESS;
struct synx_client *curr;
struct hlist_node *tmp;
struct hlist_node *tmp2;
struct synx_handle_coredata *curr2 = NULL;
u32 client_map_itr, handle_map_itr;
SYNX_CONSOLE_LOG(*cur, *end, "\n\t ------------- CLIENT MAP TABLE------------\n");
if (synx_columns & CLIENT_ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| CLIENT ID |");
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "|CLIENT REF COUNT|");
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE ID |");
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "|REF COUNT|");
if (synx_columns & REL_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "|REL COUNT|");
SYNX_CONSOLE_LOG(*cur, *end, "\n");
spin_lock_bh(&synx_dev->native->metadata_map_lock);
hash_for_each_safe(synx_dev->native->client_metadata_map,
client_map_itr, tmp, curr, node) {
rc = synx_debugfs_util_get_client_data(curr);
if (rc)
goto bail;
spin_lock_bh(&curr->handle_map_lock);
hash_for_each_safe(curr->handle_map,
handle_map_itr, tmp2, curr2, node) {
rc = synx_debugfs_util_get_handle(curr2);
if (rc) {
spin_unlock_bh(&curr->handle_map_lock);
synx_debugfs_util_put_client_data(curr);
goto bail;
}
if (curr2->key >= lower_handle_id && curr2->key <= upper_handle_id) {
if (synx_columns & CLIENT_ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t%u", curr->id);
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d",
kref_read(&curr->refcount));
if (synx_columns & ID_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t%d", curr2->key);
if (synx_columns & REF_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d",
kref_read(&curr2->refcount));
if (synx_columns & REL_CNT_COLUMN)
SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr2->rel_count);
SYNX_CONSOLE_LOG(*cur, *end,
"\n\t-------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end,
"-----------------------------------------");
SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
}
synx_debugfs_util_put_handle(curr2);
}
spin_unlock_bh(&curr->handle_map_lock);
synx_debugfs_util_put_client_data(curr);
}
bail:
spin_unlock_bh(&synx_dev->native->metadata_map_lock);
}
void synx_debugfs_util_load_help_content(char **cur,
char **end)
{
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tSynx tables Supported for debugfs with the column names:");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tGLOBAL/LOCAL MAP COLUMNS : STATUS, ID, REF_CNT, BOUND,");
SYNX_CONSOLE_LOG(*cur, *end, "\tGLOBAL INDEX, MAP CNT\n");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tGLOBAL SHARED MEMORY COLUMNS : STATUS, ID,");
SYNX_CONSOLE_LOG(*cur, *end,
"REF_CNT, NUM_CHILD, \tSUBSCRIBERS, WAITERS, PARENTS");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tCLIENT MAP COLUMNS : CLIENT_ID, REF_CNT, REL_CNT, ID");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tDMA FENCE COLUMNS: STATUS, ID, REF_CNT, DMA FENCE");
SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tINSTRUCTIONS TO BE FOLLOWED:");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tTO PRINT CHOOSE THE COLUMNS ACCORDINGLY AND ADD UP THE");
SYNX_CONSOLE_LOG(*cur, *end,
"\tHEXADECIMAL VALUES & PASS THE ADDED UP VALUES FOR COLUMN ALONG");
SYNX_CONSOLE_LOG(*cur, *end, "WITH TABLE SELECTION VALUE AS SHOWN BELOW:");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tSet Below Values for Column selection\n");
SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tNAME_COLUMN = 0x0001");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tID_COLUMN = 0x0002");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tBOUND_COLUMN = 0x0004");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tSTATUS_COLUMN = 0x0008");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tFENCE_COLUMN = 0x0010");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tCOREDATA_COLUMN = 0x0020");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_IDX_COLUMN = 0x0040");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tREL_CNT_COLUMN = 0x0080");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tMAP_CNT_COLUMN = 0x0100");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tREF_CNT_COLUMN = 0x0200");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tNUM_CHILD_COLUMN = 0x0400");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tSUBSCRIBERS_COLUMN= 0x0800");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tWAITERS_COLUMN = 0x1000");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tPARENTS_COLUMN = 0x2000");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tCLIENT_ID_COLUMN = 0x4000");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tSet Below Values for Table selection\n");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tLOCAL_HASHTABLE = 0x00010000");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_HASHTABLE = 0x00020000");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tCLIENT_HASHTABLE = 0x00040000");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_SHARED_MEM = 0x00080000");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tDMA_FENCE_MAP = 0x00100000\n");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tExample : To select Global map & all its columns :");
SYNX_CONSOLE_LOG(*cur, *end, "\n\t echo 0x2034E>column_level");
SYNX_CONSOLE_LOG(*cur, *end, "\n\t Last four digits in hexadecimal flag");
SYNX_CONSOLE_LOG(*cur, *end, " is dedicated for setting columns,");
SYNX_CONSOLE_LOG(*cur, *end,
"\tuser can even set \"FFFF\" to set all columns");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\t Instead of passing 0x2034E, \tuser can even pass");
SYNX_CONSOLE_LOG(*cur, *end, " 0x2FFFF to fetch the same");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tUser can set Handle Range with echo command as shown below\n");
SYNX_CONSOLE_LOG(*cur, *end, "\n\techo 1048577-1048580>synx_table");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tFor single handle : echo \"1048577\">synx_table");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tHandle range can be set in hexadecimal values as shown below:");
SYNX_CONSOLE_LOG(*cur, *end, "\n\techo 0x100001-10000f>synx_table");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tSingle handle : echo 0x100001>synx_table");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\n\tTo print info on console : cat synx_table");
SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tHandle states :");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tACT : SYNX_STATE_ACTIVE");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tINV : SYNX_STATE_INVALID");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tERR : SYNX_STATE_SIGNALED_ERROR");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tSUC : SYNX_STATE_SIGNALED_SUCCESS");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tCAN : SYNX_STATE_SIGNALED_CANCELLED");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tEXT : SYNX_STATE_SIGNALED_EXTERNAL");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tSSR : SYNX_STATE_SIGNALED_SSR\n");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tCUS : CUSTOM SIGNAL");
SYNX_CONSOLE_LOG(*cur, *end, "\n\t??? : UNKNOWN / UNDEFINED");
SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tAdditional information:");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tNo need to set handle ID range and column or table selection");
SYNX_CONSOLE_LOG(*cur, *end, "\tvalues again if once it is already set");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tSimply using cat synx_table command user can print the data");
SYNX_CONSOLE_LOG(*cur, *end, "\tfor same table with same set of columns");
SYNX_CONSOLE_LOG(*cur, *end, "\n\tTo print all tables and all");
SYNX_CONSOLE_LOG(*cur, *end, "columns set column level value to 0x1fffff");
SYNX_CONSOLE_LOG(*cur, *end,
"\n\tCurrently we do not support CSL fence\n\n");
}

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_DEBUGFS_UTIL_H__
#define __SYNX_DEBUGFS_UTIL_H__
#include "synx_api.h"
#include "synx_private.h"
#define GLOBAL_HANDLE_STARTING_ID (1048577)
/* DMA FENCE print function */
void synx_debugfs_util_print_dma_fence(char **cur, char **end);
/* CSL FENCE print function */
void synx_debugfs_util_print_csl_fence(char **cur, char **end);
/* GLOBAL & LOCAL MAP print function */
void synx_debugfs_util_print_hash_table(char **cur, char **end, bool flag);
/* GLOBAL SHARED MEMORY print function */
void synx_debugfs_util_print_global_shared_memory(char **cur, char **end);
/* CLIENT MAP print function */
void synx_debugfs_util_print_client_table(char **cur, char **end);
/* Function to get SYNX State Name */
char *synx_debugfs_util_get_state_name(u32 status);
/* Function for loading content of the help option for debugfs v2 */
void synx_debugfs_util_load_help_content(char **cur, char **end);
/* Function to check entry of the global shared memory is valid or not */
bool synx_debugfs_util_is_valid_global_shared_memory_entry(struct synx_global_coredata *entry,
u32 idx);
#endif /* __SYNX_DEBUGFS_UTIL_H__ */

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_ERR_H__
#define __SYNX_ERR_H__
#include <linux/err.h>
/**
* Error codes returned from framework
*
* Return codes are mapped to platform specific
* return values.
*/
#define SYNX_SUCCESS 0
#define SYNX_NOMEM ENOMEM
#define SYNX_NOSUPPORT EOPNOTSUPP
#define SYNX_NOPERM EPERM
#define SYNX_TIMEOUT ETIMEDOUT
#define SYNX_ALREADY EALREADY
#define SYNX_NOENT ENOENT
#define SYNX_INVALID EINVAL
#define SYNX_BUSY EBUSY
#endif /* __SYNX_ERR_H__ */

View File

@ -0,0 +1,916 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/hwspinlock.h>
#include <linux/string.h>
#include "synx_debugfs.h"
#include "synx_global.h"
static struct synx_shared_mem synx_gmem;
static struct hwspinlock *synx_hwlock;
static u32 synx_gmem_lock_owner(u32 idx)
{
/*
* subscribers field of global table index 0 is used to
* maintain synx gmem lock owner data.
* core updates the field after acquiring the lock and
* before releasing the lock appropriately.
*/
return synx_gmem.table[0].subscribers;
}
static void synx_gmem_lock_owner_set(u32 idx)
{
synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
}
static void synx_gmem_lock_owner_clear(u32 idx)
{
if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
dprintk(SYNX_WARN, "reset lock owned by core %u\n",
synx_gmem.table[0].subscribers);
synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
}
static int synx_gmem_lock(u32 idx, unsigned long *flags)
{
int rc;
if (!synx_hwlock)
return -SYNX_INVALID;
rc = hwspin_lock_timeout_irqsave(
synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
if (!rc)
synx_gmem_lock_owner_set(idx);
return rc;
}
static void synx_gmem_unlock(u32 idx, unsigned long *flags)
{
synx_gmem_lock_owner_clear(idx);
hwspin_unlock_irqrestore(synx_hwlock, flags);
}
static void synx_global_print_data(
struct synx_global_coredata *synx_g_obj,
const char *func)
{
int i = 0;
dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
func, synx_g_obj->status,
synx_g_obj->handle, synx_g_obj->refcount);
dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
func, synx_g_obj->subscribers, synx_g_obj->waiters,
synx_g_obj->num_child);
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
if (synx_g_obj->parents[i])
dprintk(SYNX_VERB, "%s: parents %u:%u",
func, i, synx_g_obj->parents[i]);
}
bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle,
struct synx_global_coredata *synx_global_entry)
{
int rc = SYNX_SUCCESS;
u32 idx;
unsigned long flags;
struct synx_global_coredata *entry;
if (!synx_gmem.table) {
dprintk(SYNX_VERB, "synx_gmem is NULL\n");
return false;
}
idx = synx_handle & SYNX_HANDLE_INDEX_MASK;
if (!synx_is_valid_idx(idx))
return false;
rc = synx_gmem_lock(idx, &flags);
if (rc) {
dprintk(SYNX_VERB, "Failed to lock entry %d\n", idx);
return false;
}
entry = &synx_gmem.table[idx];
memcpy(synx_global_entry, entry, sizeof(struct synx_global_coredata));
synx_gmem_unlock(idx, &flags);
return true;
}
int synx_global_dump_shared_memory(void)
{
int rc = SYNX_SUCCESS, idx;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_INVALID;
/* Print bitmap memory*/
for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
__func__, idx, synx_gmem.bitmap[idx]);
synx_gmem_unlock(idx, &flags);
}
/* Print table memory*/
for (idx = 0;
idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
idx++) {
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
synx_gmem_unlock(idx, &flags);
}
return rc;
}
static int synx_gmem_init(void)
{
if (!synx_gmem.table)
return -SYNX_NOMEM;
synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
if (!synx_hwlock) {
dprintk(SYNX_ERR, "hwspinlock request failed\n");
return -SYNX_NOMEM;
}
/* zero idx not allocated for clients */
ipclite_global_test_and_set_bit(0,
(ipclite_atomic_uint32_t *)synx_gmem.bitmap);
memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
return SYNX_SUCCESS;
}
u32 synx_global_map_core_id(enum synx_core_id id)
{
u32 host_id;
switch (id) {
case SYNX_CORE_APSS:
host_id = IPCMEM_APPS; break;
case SYNX_CORE_NSP:
host_id = IPCMEM_CDSP; break;
case SYNX_CORE_IRIS:
host_id = IPCMEM_VPU; break;
case SYNX_CORE_EVA:
host_id = IPCMEM_CVP; break;
case SYNX_CORE_ICP:
host_id = IPCMEM_CAM; break;
default:
host_id = IPCMEM_NUM_HOSTS;
dprintk(SYNX_ERR, "invalid core id\n");
}
return host_id;
}
int synx_global_alloc_index(u32 *idx)
{
int rc = SYNX_SUCCESS;
u32 prev, index;
const u32 size = SYNX_GLOBAL_MAX_OBJS;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(idx))
return -SYNX_INVALID;
do {
index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
if (index >= size) {
rc = -SYNX_NOMEM;
break;
}
prev = ipclite_global_test_and_set_bit(index % 32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
if ((prev & (1UL << (index % 32))) == 0) {
*idx = index;
dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
break;
}
} while (true);
return rc;
}
int synx_global_init_coredata(u32 h_synx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 ||
synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 ||
synx_g_obj->parents[0] != 0) {
dprintk(SYNX_ERR,
"entry not cleared for idx %u,\n"
"synx_g_obj->status %d,\n"
"synx_g_obj->refcount %d,\n"
"synx_g_obj->subscribers %d,\n"
"synx_g_obj->handle %u,\n"
"synx_g_obj->parents[0] %d\n",
idx, synx_g_obj->status,
synx_g_obj->refcount,
synx_g_obj->subscribers,
synx_g_obj->handle,
synx_g_obj->parents[0]);
synx_gmem_unlock(idx, &flags);
return -SYNX_INVALID;
}
memset(synx_g_obj, 0, sizeof(*synx_g_obj));
/* set status to active */
synx_g_obj->status = SYNX_STATE_ACTIVE;
synx_g_obj->refcount = 1;
synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
synx_g_obj->handle = h_synx;
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
static int synx_global_get_waiting_cores_locked(
struct synx_global_coredata *synx_g_obj,
bool *cores)
{
int i;
synx_global_print_data(synx_g_obj, __func__);
for (i = 0; i < SYNX_CORE_MAX; i++) {
if (synx_g_obj->waiters & (1UL << i)) {
cores[i] = true;
dprintk(SYNX_VERB,
"waiting for handle %u/n",
synx_g_obj->handle);
}
}
/* clear waiter list so signals are not repeated */
synx_g_obj->waiters = 0;
return SYNX_SUCCESS;
}
int synx_global_get_waiting_cores(u32 idx, bool *cores)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_global_get_waiting_cores_locked(synx_g_obj, cores);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->waiters |= (1UL << id);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_get_subscribed_cores(u32 idx, bool *cores)
{
int i;
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_CORE_MAX; i++)
if (synx_g_obj->subscribers & (1UL << i))
cores[i] = true;
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_fetch_handle_details(u32 idx, u32 *h_synx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(h_synx) || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
*h_synx = synx_g_obj->handle;
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->subscribers |= (1UL << id);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->subscribers &= ~(1UL << id);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
u32 synx_global_get_parents_num(u32 idx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
u32 i, count = 0;
if (!synx_gmem.table)
return 0;
if (!synx_is_valid_idx(idx))
return 0;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_g_obj->parents[i] != 0)
count++;
}
synx_gmem_unlock(idx, &flags);
return count;
}
static int synx_global_get_parents_locked(
struct synx_global_coredata *synx_g_obj, u32 *parents)
{
u32 i;
if (!synx_g_obj || !parents)
return -SYNX_NOMEM;
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
parents[i] = synx_g_obj->parents[i];
return SYNX_SUCCESS;
}
int synx_global_get_parents(u32 idx, u32 *parents)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table || !parents)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
rc = synx_global_get_parents_locked(synx_g_obj, parents);
synx_gmem_unlock(idx, &flags);
return rc;
}
u32 synx_global_get_status(u32 idx)
{
int rc;
unsigned long flags;
u32 status = SYNX_STATE_ACTIVE;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return 0;
if (!synx_is_valid_idx(idx))
return 0;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child == 0)
status = synx_g_obj->status;
synx_gmem_unlock(idx, &flags);
return status;
}
u32 synx_global_test_status_set_wait(u32 idx,
enum synx_core_id id)
{
int rc;
unsigned long flags;
u32 status;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return 0;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return 0;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return 0;
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
status = synx_g_obj->status;
/* if handle is still ACTIVE */
if (status == SYNX_STATE_ACTIVE || synx_g_obj->num_child != 0) {
synx_g_obj->waiters |= (1UL << id);
status = SYNX_STATE_ACTIVE;
}
else
dprintk(SYNX_DBG, "handle %u already signaled %u",
synx_g_obj->handle, synx_g_obj->status);
synx_gmem_unlock(idx, &flags);
return status;
}
static int synx_global_update_status_core(u32 idx,
u32 status)
{
u32 i, p_idx;
int rc;
bool clear = false;
unsigned long flags;
uint64_t data;
struct synx_global_coredata *synx_g_obj;
u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
bool wait_cores[SYNX_CORE_MAX] = {false};
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
/* prepare for cross core signaling */
data = synx_g_obj->handle;
data <<= 32;
if (synx_g_obj->num_child != 0) {
/* composite handle */
synx_g_obj->num_child--;
if (synx_g_obj->status == SYNX_STATE_ACTIVE ||
(status > SYNX_STATE_SIGNALED_SUCCESS &&
status <= SYNX_STATE_SIGNALED_MAX))
synx_g_obj->status = status;
if (synx_g_obj->num_child == 0) {
data |= synx_g_obj->status;
synx_global_get_waiting_cores_locked(synx_g_obj,
wait_cores);
synx_global_get_parents_locked(synx_g_obj, h_parents);
/* release ref held by constituting handles */
synx_g_obj->refcount--;
if (synx_g_obj->refcount == 0) {
memset(synx_g_obj, 0,
sizeof(*synx_g_obj));
clear = true;
}
} else {
/* pending notification from handles */
data = 0;
dprintk(SYNX_DBG,
"Child notified parent handle %u, pending %u\n",
synx_g_obj->handle, synx_g_obj->num_child);
}
} else {
synx_g_obj->status = status;
data |= synx_g_obj->status;
synx_global_get_waiting_cores_locked(synx_g_obj,
wait_cores);
synx_global_get_parents_locked(synx_g_obj, h_parents);
}
synx_gmem_unlock(idx, &flags);
if (clear) {
ipclite_global_test_and_clear_bit(idx%32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
dprintk(SYNX_MEM,
"cleared global idx %u\n", idx);
}
/* notify waiting clients on signal */
if (data) {
/* notify wait client */
/* In case of SSR, someone might be waiting on same core
* However, in other cases, synx_signal API will take care
* of signaling handles on same core and thus we don't need
* to send interrupt
*/
if (status == SYNX_STATE_SIGNALED_SSR)
i = 0;
else
i = 1;
for (; i < SYNX_CORE_MAX ; i++) {
if (!wait_cores[i])
continue;
dprintk(SYNX_DBG,
"invoking ipc signal handle %u, status %u\n",
synx_g_obj->handle, synx_g_obj->status);
if (ipclite_msg_send(
synx_global_map_core_id(i),
data))
dprintk(SYNX_ERR,
"ipc signaling %llu to core %u failed\n",
data, i);
}
}
/* handle parent notifications */
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
p_idx = h_parents[i];
if (p_idx == 0)
continue;
synx_global_update_status_core(p_idx, status);
}
return SYNX_SUCCESS;
}
int synx_global_update_status(u32 idx, u32 status)
{
int rc = -SYNX_INVALID;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->num_child != 0) {
/* composite handle cannot be signaled */
goto fail;
} else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
rc = -SYNX_ALREADY;
goto fail;
}
synx_gmem_unlock(idx, &flags);
return synx_global_update_status_core(idx, status);
fail:
synx_gmem_unlock(idx, &flags);
return rc;
}
int synx_global_get_ref(u32 idx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
if (synx_g_obj->handle && synx_g_obj->refcount)
synx_g_obj->refcount++;
else
rc = -SYNX_NOENT;
synx_gmem_unlock(idx, &flags);
return rc;
}
void synx_global_put_ref(u32 idx)
{
int rc;
bool clear = false;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return;
if (!synx_is_valid_idx(idx))
return;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->refcount--;
if (synx_g_obj->refcount == 0) {
memset(synx_g_obj, 0, sizeof(*synx_g_obj));
clear = true;
}
synx_gmem_unlock(idx, &flags);
if (clear) {
ipclite_global_test_and_clear_bit(idx%32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
}
}
int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
{
int rc = -SYNX_INVALID;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
u32 i, j = 0;
u32 idx;
u32 num_child = 0;
u32 parent_status = SYNX_STATE_ACTIVE;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(p_idx))
return -SYNX_INVALID;
if (num_list == 0)
return SYNX_SUCCESS;
while (j < num_list) {
idx = idx_list[j];
if (!synx_is_valid_idx(idx))
goto fail;
rc = synx_gmem_lock(idx, &flags);
if (rc)
goto fail;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_g_obj->parents[i] == 0) {
synx_g_obj->parents[i] = p_idx;
break;
}
}
if (synx_g_obj->status == SYNX_STATE_ACTIVE)
num_child++;
else if (synx_g_obj->status >
SYNX_STATE_SIGNALED_SUCCESS &&
synx_g_obj->status <= SYNX_STATE_SIGNALED_MAX)
parent_status = synx_g_obj->status;
else if (parent_status == SYNX_STATE_ACTIVE)
parent_status = synx_g_obj->status;
if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child != 0)
num_child++;
dprintk(SYNX_MEM, "synx_obj->status %d parent status %d\n",
synx_g_obj->status, parent_status);
synx_gmem_unlock(idx, &flags);
if (i >= SYNX_GLOBAL_MAX_PARENTS) {
rc = -SYNX_NOMEM;
goto fail;
}
j++;
}
rc = synx_gmem_lock(p_idx, &flags);
if (rc)
goto fail;
synx_g_obj = &synx_gmem.table[p_idx];
synx_g_obj->num_child += num_child;
if (synx_g_obj->num_child != 0)
synx_g_obj->refcount++;
synx_g_obj->status = parent_status;
synx_global_print_data(synx_g_obj, __func__);
synx_gmem_unlock(p_idx, &flags);
return SYNX_SUCCESS;
fail:
while (num_child--) {
idx = idx_list[num_child];
if (synx_gmem_lock(idx, &flags))
continue;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_g_obj->parents[i] == p_idx) {
synx_g_obj->parents[i] = 0;
break;
}
}
synx_gmem_unlock(idx, &flags);
}
return rc;
}
int synx_global_recover(enum synx_core_id core_id)
{
int rc = SYNX_SUCCESS;
u32 idx = 0;
const u32 size = SYNX_GLOBAL_MAX_OBJS;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
bool update;
int *clear_idx = NULL;
if (!synx_gmem.table)
return -SYNX_NOMEM;
clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
if (!clear_idx)
return -SYNX_NOMEM;
ipclite_recover(synx_global_map_core_id(core_id));
/* recover synx gmem lock if it was owned by core in ssr */
if (synx_gmem_lock_owner(0) == core_id) {
synx_gmem_lock_owner_clear(0);
hwspin_unlock_raw(synx_hwlock);
}
idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
size, idx + 1);
while (idx < size) {
update = false;
rc = synx_gmem_lock(idx, &flags);
if (rc)
goto free;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->refcount &&
synx_g_obj->subscribers & (1UL << core_id)) {
synx_g_obj->subscribers &= ~(1UL << core_id);
synx_g_obj->refcount--;
if (synx_g_obj->refcount == 0) {
memset(synx_g_obj, 0, sizeof(*synx_g_obj));
clear_idx[idx] = 1;
} else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
update = true;
}
}
synx_gmem_unlock(idx, &flags);
if (update)
synx_global_update_status(idx,
SYNX_STATE_SIGNALED_SSR);
idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
size, idx + 1);
}
for (idx = 1; idx < size; idx++) {
if (clear_idx[idx]) {
ipclite_global_test_and_clear_bit(idx % 32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
dprintk(SYNX_MEM, "released global idx %u\n", idx);
}
}
free:
kfree(clear_idx);
return rc;
}
int synx_global_mem_init(void)
{
int rc;
int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
struct global_region_info mem_info;
rc = get_global_partition_info(&mem_info);
if (rc) {
dprintk(SYNX_ERR, "error setting up global shared memory\n");
return rc;
}
memset(mem_info.virt_base, 0, mem_info.size);
dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
mem_info.virt_base, mem_info.size);
synx_gmem.bitmap = (u32 *)mem_info.virt_base;
synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
synx_gmem.table =
(struct synx_global_coredata *)(synx_gmem.locks + 2);
dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
synx_gmem.bitmap, synx_gmem.table);
return synx_gmem_init();
}

View File

@ -0,0 +1,305 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_SHARED_MEM_H__
#define __SYNX_SHARED_MEM_H__
#include "synx_err.h"
#include "ipclite_client.h"
#include <synx_header.h>
/**
* enum synx_core_id - Synx core IDs
*
* SYNX_CORE_APSS : APSS core
* SYNX_CORE_NSP : NSP core
* SYNX_CORE_EVA : EVA core
* SYNX_CORE_IRIS : IRIS core
* SYNX_CORE_ICP : ICP core
*/
enum synx_core_id {
SYNX_CORE_APSS = 0,
SYNX_CORE_NSP,
SYNX_CORE_EVA,
SYNX_CORE_IRIS,
SYNX_CORE_ICP,
SYNX_CORE_MAX,
};
/* synx handle encoding */
#define SYNX_HANDLE_INDEX_BITS 16
#define SYNX_HANDLE_CORE_BITS 4
#define SYNX_HANDLE_GLOBAL_FLAG_BIT 1
#define SYNX_GLOBAL_SHARED_LOCKS 1
#define SYNX_GLOBAL_MAX_OBJS 4096
#define SYNX_GLOBAL_MAX_PARENTS 4
#define SYNX_HANDLE_INDEX_MASK ((1UL<<SYNX_HANDLE_INDEX_BITS)-1)
#define SHRD_MEM_DUMP_NUM_BMAP_WORDS 10
#define NUM_CHAR_BIT 8
/* spin lock timeout (ms) */
#define SYNX_HWSPIN_TIMEOUT 500
#define SYNX_HWSPIN_ID 10
/* dma fence states */
#define SYNX_DMA_FENCE_STATE_MAX 4096
/**
* struct synx_global_coredata - Synx global object, used for book keeping
* of all metadata associated with each individual global entry
*
* @status : Synx signaling status
* @handle : Handle of global entry
* @refcount : References owned by each core
* @num_child : Count of children pending signal (for composite handle)
* @subscribers : Cores owning reference on this object
* @waiters : Cores waiting for notification
* @parents : Composite global coredata index of parent entities
* Can be part of SYNX_GLOBAL_MAX_PARENTS composite entries.
*/
struct synx_global_coredata {
u32 status;
u32 handle;
u16 refcount;
u16 num_child;
u16 subscribers;
u16 waiters;
u16 parents[SYNX_GLOBAL_MAX_PARENTS];
};
/**
* struct synx_shared_mem - Synx global shared memory descriptor
*
* @bitmap : Bitmap for allocating entries form table
* @locks : Array of locks for exclusive access to table entries
* @table : Array of Synx global entries
*/
struct synx_shared_mem {
u32 *bitmap;
u32 *locks;
struct synx_global_coredata *table;
};
static inline bool synx_is_valid_idx(u32 idx)
{
if (idx < SYNX_GLOBAL_MAX_OBJS)
return true;
return false;
}
/**
* synx_global_mem_init - Initialize global shared memory
*
* @return Zero on success, negative error on failure.
*/
int synx_global_mem_init(void);
/**
* synx_global_map_core_id - Map Synx core ID to IPC Lite host
*
* @param id : Core Id to map
*
* @return IPC host ID.
*/
u32 synx_global_map_core_id(enum synx_core_id id);
/**
* synx_global_alloc_index - Allocate new global entry
*
* @param idx : Pointer to global table index (filled by function)
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_alloc_index(u32 *idx);
/**
* synx_global_init_coredata - Allocate new global entry
*
* @param h_synx : Synx global handle
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_init_coredata(u32 h_synx);
/**
* synx_global_get_waiting_cores - Get list of all the waiting core on global entry
*
* Will fill the cores array with TRUE if core is waiting, and
* false if not. Indexed through enum synx_core_id.
*
* @param idx : Global entry index
* @param cores : Array of boolean variables, one each for supported core.
* Array should contain SYNX_CORE_MAX entries.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_waiting_cores(u32 idx, bool *cores);
/**
* synx_global_set_waiting_core - Set core as a waiting core on global entry
*
* @param idx : Global entry index
* @param id : Core to be set as waiter
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_set_waiting_core(u32 idx, enum synx_core_id id);
/**
* synx_global_get_subscribed_cores - Get list of all the subscribed core on global entry
*
* Will fill the cores array with TRUE if core is subscribed, and
* false if not. Indexed through enum synx_core_id.
*
* @param idx : Global entry index
* @param cores : Array of boolean variables, one each for supported core.
* Array should contain SYNX_CORE_MAX entries.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_subscribed_cores(u32 idx, bool *cores);
/**
* synx_global_set_subscribed_core - Set core as a subscriber core on global entry
*
* @param idx : Global entry index
* @param id : Core to be added as subscriber
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id);
/**
* synx_global_clear_subscribed_core - Clear core as a subscriber core on global entry
*
* @param idx : Global entry index
* @param id : Core to be added as subscriber
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id);
/**
* synx_global_get_status - Get status of the global entry
*
* @param idx : Global entry index
*
* @return Global entry status
*/
u32 synx_global_get_status(u32 idx);
/**
* synx_global_test_status_set_wait - Check status and add core as waiter is not signaled
*
* This tests and adds the waiter in one atomic operation, to avoid
* race with signal which can miss sending the IPC signal if
* check status and set as done as two different operations
* (signal coming in between the two ops).
*
* @param idx : Global entry index
* @param id : Core to be set as waiter (if unsignaled)
*
* @return Status of global entry idx.
*/
u32 synx_global_test_status_set_wait(u32 idx,
enum synx_core_id id);
/**
* synx_global_update_status - Update status of the global entry
*
* Function also updates the parent composite handles
* about the signaling.
*
* @param idx : Global entry index
* @param status : Signaling status
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_update_status(u32 idx, u32 status);
/**
* synx_global_get_ref - Get additional reference on global entry
*
* @param idx : Global entry index
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_ref(u32 idx);
/**
* synx_global_put_ref - Release reference on global entry
*
* @param idx : Global entry index
*/
void synx_global_put_ref(u32 idx);
/**
* synx_global_get_parents - Get the global entry index of all composite parents
*
* @param idx : Global entry index whose parents are requested
* @param parents : Array of global entry index of composite handles
* Filled by the function. Array should contain atleast
* SYNX_GLOBAL_MAX_PARENTS entries.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_parents(u32 idx, u32 *parents);
/**
* synx_global_merge - Merge handles to form global handle
*
* Is essential for merge functionality.
*
* @param idx_list : List of global indexes to merge
* @param num_list : Number of handles in the list to merge
* @params p_idx : Global entry index allocated for composite handle
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx);
/**
* synx_global_recover - Recover handles subscribed by specific core
*
* @param id : Core ID to clean up
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_recover(enum synx_core_id id);
/**
* synx_global_clean_cdsp_mem - Release handles created/used by CDSP
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_clean_cdsp_mem(void);
/**
* synx_global_dump_shared_memory - Prints the top entries of
* bitmap and table in global shared memory.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_dump_shared_memory(void);
/**
* synx_global_fetch_handle_details - Fetches the synx handle from
* global shared memory.
*
* @param idx : Global entry index whose handle is requested.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_fetch_handle_details(u32 idx, u32 *h_synx);
/* Function to fetch global shared memory entry */
bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle,
struct synx_global_coredata *synx_global_entry);
#endif /* __SYNX_SHARED_MEM_H__ */

View File

@ -0,0 +1,249 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_PRIVATE_H__
#define __SYNX_PRIVATE_H__
#include <linux/bitmap.h>
#include <linux/cdev.h>
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/hashtable.h>
#include <linux/ktime.h>
#include <linux/workqueue.h>
#include "synx_api.h"
#include "synx_global.h"
#define SYNX_MAX_OBJS SYNX_GLOBAL_MAX_OBJS
#define SYNX_NAME "synx"
#define SYNX_DEVICE_NAME "synx_device"
#define SYNX_WQ_CB_NAME "hiprio_synx_cb_queue"
#define SYNX_WQ_CB_THREADS 4
#define SYNX_WQ_CLEANUP_NAME "hiprio_synx_cleanup_queue"
#define SYNX_WQ_CLEANUP_THREADS 2
#define SYNX_MAX_NUM_BINDINGS 8
#define SYNX_OBJ_HANDLE_SHIFT SYNX_HANDLE_INDEX_BITS
#define SYNX_OBJ_CORE_ID_SHIFT (SYNX_OBJ_HANDLE_SHIFT+SYNX_HANDLE_CORE_BITS)
#define SYNX_OBJ_GLOBAL_FLAG_SHIFT (SYNX_OBJ_CORE_ID_SHIFT+SYNX_HANDLE_GLOBAL_FLAG_BIT)
#define SYNX_OBJ_HANDLE_MASK GENMASK_ULL(SYNX_OBJ_HANDLE_SHIFT-1, 0)
#define SYNX_OBJ_CORE_ID_MASK GENMASK_ULL(SYNX_OBJ_CORE_ID_SHIFT-1, SYNX_OBJ_HANDLE_SHIFT)
#define SYNX_OBJ_GLOBAL_FLAG_MASK \
GENMASK_ULL(SYNX_OBJ_GLOBAL_FLAG_SHIFT-1, SYNX_OBJ_CORE_ID_SHIFT)
#define MAX_TIMESTAMP_SIZE 32
#define SYNX_OBJ_NAME_LEN 64
#define SYNX_PAYLOAD_WORDS 4
#define SYNX_CREATE_IM_EX_RELEASE SYNX_CREATE_MAX_FLAGS
#define SYNX_CREATE_MERGED_FENCE (SYNX_CREATE_MAX_FLAGS << 1)
#define SYNX_MAX_REF_COUNTS 100
struct synx_bind_desc {
struct synx_external_desc_v2 external_desc;
void *external_data;
};
struct error_node {
char timestamp[32];
u64 session;
u32 client_id;
u32 h_synx;
s32 error_code;
struct list_head node;
};
struct synx_entry_32 {
u32 key;
void *data;
struct hlist_node node;
};
struct synx_entry_64 {
u64 key;
u32 data[2];
struct kref refcount;
struct hlist_node node;
};
struct synx_map_entry {
struct synx_coredata *synx_obj;
struct kref refcount;
u32 flags;
u32 key;
struct work_struct dispatch;
struct hlist_node node;
};
struct synx_fence_entry {
u32 g_handle;
u32 l_handle;
u64 key;
struct hlist_node node;
};
struct synx_kernel_payload {
u32 h_synx;
u32 status;
void *data;
synx_user_callback_t cb_func;
synx_user_callback_t cancel_cb_func;
};
struct synx_cb_data {
struct synx_session *session;
u32 idx;
u32 h_synx;
u32 status;
struct timer_list synx_timer;
u64 timeout;
struct work_struct cb_dispatch;
struct list_head node;
};
struct synx_client_cb {
bool is_valid;
u32 idx;
struct synx_client *client;
struct synx_kernel_payload kernel_cb;
struct list_head node;
};
struct synx_registered_ops {
char name[SYNX_OBJ_NAME_LEN];
struct bind_operations ops;
enum synx_bind_client_type type;
bool valid;
};
struct synx_cleanup_cb {
void *data;
struct work_struct cb_dispatch;
};
enum synx_signal_handler {
SYNX_SIGNAL_FROM_CLIENT = 0x1,
SYNX_SIGNAL_FROM_FENCE = 0x2,
SYNX_SIGNAL_FROM_IPC = 0x4,
SYNX_SIGNAL_FROM_CALLBACK = 0x8,
};
struct synx_signal_cb {
u32 handle;
u32 status;
u64 ext_sync_id;
struct synx_coredata *synx_obj;
enum synx_signal_handler flag;
struct dma_fence_cb fence_cb;
struct work_struct cb_dispatch;
};
struct synx_coredata {
char name[SYNX_OBJ_NAME_LEN];
struct dma_fence *fence;
struct mutex obj_lock;
struct kref refcount;
u32 type;
u32 status;
u32 num_bound_synxs;
struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS];
struct list_head reg_cbs_list;
u32 global_idx;
u32 map_count;
struct synx_signal_cb *signal_cb;
};
struct synx_client;
struct synx_device;
struct synx_handle_coredata {
struct synx_client *client;
struct synx_coredata *synx_obj;
void *map_entry;
struct kref refcount;
u32 key;
u32 rel_count;
struct work_struct dispatch;
struct hlist_node node;
};
struct synx_client {
u32 type;
bool active;
struct synx_device *device;
char name[SYNX_OBJ_NAME_LEN];
u64 id;
u64 dma_context;
struct kref refcount;
struct mutex event_q_lock;
struct list_head event_q;
wait_queue_head_t event_wq;
DECLARE_BITMAP(cb_bitmap, SYNX_MAX_OBJS);
struct synx_client_cb cb_table[SYNX_MAX_OBJS];
DECLARE_HASHTABLE(handle_map, 8);
spinlock_t handle_map_lock;
struct work_struct dispatch;
struct hlist_node node;
};
struct synx_native {
spinlock_t metadata_map_lock;
DECLARE_HASHTABLE(client_metadata_map, 8);
spinlock_t fence_map_lock;
DECLARE_HASHTABLE(fence_map, 10);
spinlock_t global_map_lock;
DECLARE_HASHTABLE(global_map, 10);
spinlock_t local_map_lock;
DECLARE_HASHTABLE(local_map, 8);
spinlock_t csl_map_lock;
DECLARE_HASHTABLE(csl_fence_map, 8);
DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS);
};
struct synx_cdsp_ssr {
u64 ssrcnt;
void *handle;
struct notifier_block nb;
};
struct synx_device {
struct cdev cdev;
dev_t dev;
struct class *class;
struct synx_native *native;
struct workqueue_struct *wq_cb;
struct workqueue_struct *wq_cleanup;
struct mutex vtbl_lock;
struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES];
struct dentry *debugfs_root;
struct list_head error_list;
struct mutex error_lock;
struct synx_cdsp_ssr cdsp_ssr;
};
int synx_signal_core(struct synx_coredata *synx_obj,
u32 status,
bool cb_signal,
s32 ext_sync_id);
int synx_ipc_callback(uint32_t client_id,
int64_t data, void *priv);
void synx_signal_handler(struct work_struct *cb_dispatch);
int synx_native_release_core(struct synx_client *session,
u32 h_synx);
int synx_bind(struct synx_session *session,
u32 h_synx,
struct synx_external_desc_v2 external_sync);
#endif /* __SYNX_PRIVATE_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,188 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_UTIL_H__
#define __SYNX_UTIL_H__
#include "synx_api.h"
#include "synx_private.h"
extern struct synx_device *synx_dev;
u32 __fence_state(struct dma_fence *fence, bool locked);
void synx_util_destroy_coredata(struct kref *kref);
extern void synx_fence_callback(struct dma_fence *fence,
struct dma_fence_cb *cb);
extern int synx_native_signal_fence(struct synx_coredata *synx_obj,
u32 status);
static inline bool synx_util_is_valid_bind_type(u32 type)
{
if (type < SYNX_MAX_BIND_TYPES)
return true;
return false;
}
static inline bool synx_util_is_global_handle(u32 h_synx)
{
return (h_synx & SYNX_OBJ_GLOBAL_FLAG_MASK) ? true : false;
}
static inline u32 synx_util_get_object_type(
struct synx_coredata *synx_obj)
{
return synx_obj ? synx_obj->type : 0;
}
static inline bool synx_util_is_merged_object(
struct synx_coredata *synx_obj)
{
if (synx_obj &&
(synx_obj->type & SYNX_CREATE_MERGED_FENCE))
return true;
return false;
}
static inline bool synx_util_is_global_object(
struct synx_coredata *synx_obj)
{
if (synx_obj &&
(synx_obj->type & SYNX_CREATE_GLOBAL_FENCE))
return true;
return false;
}
static inline bool synx_util_is_external_object(
struct synx_coredata *synx_obj)
{
if (synx_obj &&
!(synx_obj->type & SYNX_CREATE_MERGED_FENCE) &&
(synx_obj->type & SYNX_CREATE_DMA_FENCE))
return true;
return false;
}
static inline u32 synx_util_map_params_to_type(u32 flags)
{
if (flags & SYNX_CREATE_CSL_FENCE)
return SYNX_TYPE_CSL;
return SYNX_MAX_BIND_TYPES;
}
static inline u32 synx_util_global_idx(u32 h_synx)
{
return (h_synx & SYNX_OBJ_HANDLE_MASK);
}
/* coredata memory functions */
void synx_util_get_object(struct synx_coredata *synx_obj);
void synx_util_put_object(struct synx_coredata *synx_obj);
void synx_util_object_destroy(struct synx_coredata *synx_obj);
static inline struct synx_coredata *synx_util_obtain_object(
struct synx_handle_coredata *synx_data)
{
if (IS_ERR_OR_NULL(synx_data))
return NULL;
return synx_data->synx_obj;
}
/* global/local map functions */
struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj,
u32 h_synx, u32 flags);
struct synx_map_entry *synx_util_get_map_entry(u32 h_synx);
void synx_util_release_map_entry(struct synx_map_entry *map_entry);
void synx_util_destroy_map_entry(struct kref *kref);
/* fence map functions */
int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx,
u32 global);
u32 synx_util_get_fence_entry(u64 key, u32 global);
void synx_util_release_fence_entry(u64 key);
/* coredata initialize functions */
int synx_util_init_coredata(struct synx_coredata *synx_obj,
struct synx_create_params *params,
struct dma_fence_ops *ops,
u64 dma_context);
int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
struct dma_fence **fences,
struct synx_merge_params *params,
u32 num_objs,
u64 dma_context);
/* handle related functions */
int synx_alloc_global_handle(u32 *new_synx);
int synx_alloc_local_handle(u32 *new_synx);
long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size);
int synx_util_init_handle(struct synx_client *client, struct synx_coredata *obj,
u32 *new_h_synx,
void *map_entry);
u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx);
/* callback related functions */
int synx_util_alloc_cb_entry(struct synx_client *client,
struct synx_kernel_payload *data,
u32 *cb_idx);
int synx_util_clear_cb_entry(struct synx_client *client,
struct synx_client_cb *cb);
void synx_util_default_user_callback(u32 h_synx, int status, void *data);
void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 state);
void synx_util_cb_dispatch(struct work_struct *cb_dispatch);
/* external fence functions */
int synx_util_activate(struct synx_coredata *synx_obj);
int synx_util_add_callback(struct synx_coredata *synx_obj, u32 h_synx);
/* merge related helper functions */
s32 synx_util_merge_error(struct synx_client *client, u32 *h_synxs, u32 num_objs);
int synx_util_validate_merge(struct synx_client *client, u32 *h_synxs, u32 num_objs,
struct dma_fence ***fences,
u32 *fence_cnt);
/* coredata status functions */
u32 synx_util_get_object_status(struct synx_coredata *synx_obj);
u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj);
/* client handle map related functions */
struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client,
u32 h_synx);
void synx_util_release_handle(struct synx_handle_coredata *synx_data);
int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id,
u32 type, struct synx_handle_coredata **handle);
void synx_client_destroy(struct kref *kref);
void synx_util_destroy_handle(struct kref *kref);
/* client memory handler functions */
struct synx_client *synx_get_client(struct synx_session *session);
void synx_put_client(struct synx_client *client);
/* error log functions */
void synx_util_generate_timestamp(char *timestamp, size_t size);
void synx_util_log_error(u32 id, u32 h_synx, s32 err);
/* external fence map functions */
int synx_util_save_data(void *fence, u32 flags, u32 data);
struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type);
void synx_util_remove_data(void *fence, u32 type);
/* misc */
void synx_util_destroy_data(struct kref *kref);
void synx_util_map_import_params_to_create(
struct synx_import_indv_params *params,
struct synx_create_params *c_params);
struct bind_operations *synx_util_get_bind_ops(u32 type);
u32 synx_util_map_client_id_to_core(enum synx_client_id id);
int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences);
#endif /* __SYNX_UTIL_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,118 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "../ipclite_client.h"
#include "../ipclite.h"
/* General testing related configurations */
#define IPCLITE_TEST_MAX_THREADS 5
#define IPCLITE_TEST_HEADER 0xaa
#define IPCLITE_TEST_ALL_CORES GENMASK(IPCMEM_NUM_HOSTS - 1, 0)
/* Synx Usecase related definitions */
#define NUM_HANDLES 4096
#define BITMAP_SIZE (NUM_HANDLES/32)
#define BITS(x) (sizeof(x)*8)
struct handle_t {
int handle_bitmap[BITMAP_SIZE];
int handle_data[NUM_HANDLES];
};
/* Flags for Pass, Fail, Start, and Stop */
#define IPCLITE_TEST_PASS 2
#define IPCLITE_TEST_FAIL 1
#define IPCLITE_TEST_START 2
#define IPCLITE_TEST_STOP 1
/* List of Cases Available for Testing */
enum ipclite_test_type {
PING = 1,
NEGATIVE = 2,
GLOBAL_ATOMIC = 3,
DEBUG = 4,
SSR = 5,
HW_MUTEX = 6,
};
/* List of sysfs parameters */
enum ipclite_test_param {
TEST_CASE = 1,
SENDER_LIST = 2,
RECEIVER_LIST = 3,
NUM_PINGS = 4,
WAIT = 5,
NUM_ITR = 6,
NUM_THREADS = 7,
ENABLED_CORES = 8,
};
/* List of subtests for HW Mutex Test */
enum ipclite_test_hw_mutex_subtest {
HW_MUTEX_RELEASE = 1,
};
/* List of messages for SSR Testing */
enum ipclite_test_ssr_subtest {
SSR_CRASHING = 1,
SSR_WAKEUP = 2,
};
/* List of subtest for Global Atomics Testing */
enum ipclite_test_global_atomics_subtest {
GLOBAL_ATOMICS_INC = 1,
GLOBAL_ATOMICS_DEC = 2,
GLOBAL_ATOMICS_INC_DEC = 3,
GLOBAL_ATOMICS_SET_CLR = 4,
};
/* Types of pings and replies to be sent and received */
enum ipclite_test_ping {
PING_SEND = 10,
PING_REPLY = 11,
};
static char core_name[IPCMEM_NUM_HOSTS][13] = {
"IPCMEM_APPS",
"IPCMEM_MODEM",
"IPCMEM_LPASS",
"IPCMEM_SLPI",
"IPCMEM_GPU",
"IPCMEM_CDSP",
"IPCMEM_CVP",
"IPCMEM_CAM",
"IPCMEM_VPU"
};
struct ipclite_test_params {
int wait;
int num_pings;
int num_itr;
int selected_senders;
int selected_receivers;
int selected_test_case;
int enabled_cores;
int num_thread;
int num_senders;
int num_receivers;
};
struct ipclite_test_data {
int pings_sent[IPCMEM_NUM_HOSTS];
int pings_received[IPCMEM_NUM_HOSTS];
int client_id;
struct global_region_info *global_memory;
struct ipclite_test_params test_params;
int ssr_client;
};
struct ipclite_thread_data {
struct task_struct *thread;
void *data;
wait_queue_head_t wq;
bool run;
};
static int ipclite_test_callback_fn(unsigned int client_id, long long msg, void *d);

View File

@ -0,0 +1,16 @@
load(":synx_modules.bzl", "synx_modules")
load(":synx_module_build.bzl", "define_consolidate_gki_modules")
def define_pineapple():
define_consolidate_gki_modules(
target = "pineapple",
registry = synx_modules,
modules = [
"synx-driver",
"ipclite",
"ipclite_test",
],
config_options = [
"TARGET_SYNX_ENABLE",
],
)

View File

@ -0,0 +1,28 @@
# Build synx kernel driver
TARGET_SYNX_ENABLE := false
ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
TARGET_SYNX_ENABLE := true
endif
else
TARGET_SYNX_ENABLE := true
endif
ifneq (,$(call is-board-platform-in-list2,volcano))
TARGET_SYNX_ENABLE := false
endif
ifneq (,$(call is-board-platform-in-list2,pitti))
TARGET_SYNX_ENABLE := false
endif
ifeq ($(TARGET_SYNX_ENABLE), true)
ifneq (,$(call is-board-platform-in-list2,$(TARGET_BOARD_PLATFORM)))
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite_test.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko
#BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko
endif
endif

View File

@ -0,0 +1,18 @@
TARGET_SYNX_ENABLE := false
ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
TARGET_SYNX_ENABLE := true
endif
else
TARGET_SYNX_ENABLE := true
endif
ifneq (,$(call is-board-platform-in-list2,volcano))
TARGET_SYNX_ENABLE := false
endif
ifneq (,$(call is-board-platform-in-list2,pitti))
TARGET_SYNX_ENABLE := false
endif
ifeq ($(TARGET_SYNX_ENABLE), true)
PRODUCT_PACKAGES += synx-driver.ko
endif

View File

@ -0,0 +1,128 @@
load(
"//build/kernel/kleaf:kernel.bzl",
"ddk_module",
"ddk_submodule",
"kernel_module",
"kernel_modules_install",
)
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps):
processed_config_srcs = {}
processed_config_deps = {}
for config_src_name in config_srcs:
config_src = config_srcs[config_src_name]
if type(config_src) == "list":
processed_config_srcs[config_src_name] = {True: config_src}
else:
processed_config_srcs[config_src_name] = config_src
for config_deps_name in config_deps:
config_dep = config_deps[config_deps_name]
if type(config_dep) == "list":
processed_config_deps[config_deps_name] = {True: config_dep}
else:
processed_config_deps[config_deps_name] = config_dep
module = struct(
name = name,
path = path,
srcs = srcs,
config_srcs = processed_config_srcs,
config_option = config_option,
deps = deps,
config_deps = processed_config_deps,
)
module_map[name] = module
def _get_config_choices(map, options):
choices = []
for option in map:
choices.extend(map[option].get(option in options, []))
return choices
def _get_kernel_build_options(modules, config_options):
all_options = {option: True for option in config_options}
all_options = all_options | {module.config_option: True for module in modules if module.config_option}
return all_options
def _get_kernel_build_module_srcs(module, options, formatter):
srcs = module.srcs + _get_config_choices(module.config_srcs, options)
module_path = "{}/".format(module.path) if module.path else ""
globbed_srcs = native.glob(["{}{}".format(module_path, formatter(src)) for src in srcs])
return globbed_srcs
def _get_kernel_build_module_deps(module, options, formatter):
deps = module.deps + _get_config_choices(module.config_deps, options)
deps = [formatter(dep) for dep in deps]
return deps
def create_module_registry(hdrs = []):
module_map = {}
def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps = [], config_deps = {}):
_register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps)
return struct(
register = register,
get = module_map.get,
hdrs = hdrs,
module_map = module_map,
)
def define_target_variant_modules(target, variant, registry, modules, config_options = []):
kernel_build = "{}_{}".format(target, variant)
kernel_build_label = "//msm-kernel:{}".format(kernel_build)
modules = [registry.get(module_name) for module_name in modules]
options = _get_kernel_build_options(modules, config_options)
build_print = lambda message: print("{}: {}".format(kernel_build, message))
formatter = lambda s: s.replace("%b", kernel_build).replace("%t", target)
headers = ["//msm-kernel:all_headers"] + registry.hdrs
all_module_rules = []
for module in modules:
rule_name = "{}_{}".format(kernel_build, module.name)
module_srcs = _get_kernel_build_module_srcs(module, options, formatter)
if not module_srcs:
continue
ddk_submodule(
name = rule_name,
srcs = module_srcs,
out = "{}.ko".format(module.name),
deps = headers + _get_kernel_build_module_deps(module, options, formatter),
local_defines = options.keys(),
)
all_module_rules.append(rule_name)
ddk_module(
name = "{}_modules".format(kernel_build),
kernel_build = kernel_build_label,
deps = all_module_rules,
)
copy_to_dist_dir(
name = "{}_modules_dist".format(kernel_build),
data = [":{}_modules".format(kernel_build)],
dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(kernel_build),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
)
def define_consolidate_gki_modules(target, registry, modules, config_options = []):
define_target_variant_modules(target, "consolidate", registry, modules, config_options)
define_target_variant_modules(target, "gki", registry, modules, config_options)

View File

@ -0,0 +1,33 @@
load(":synx_module_build.bzl", "create_module_registry")
SYNX_KERNEL_ROOT = "synx-kernel"
synx_modules = create_module_registry([":synx_headers"])
register_synx_module = synx_modules.register
register_synx_module(
name = "synx-driver",
path = "msm",
srcs = [
"synx/synx.c",
"synx/synx_global.c",
"synx/synx_util.c",
"synx/synx_debugfs.c",
"synx/synx_debugfs_util.c",
],
)
register_synx_module(
name = "ipclite",
path = "msm",
srcs = [
"synx/ipclite.c",
],
)
register_synx_module(
name = "ipclite_test",
path = "msm",
srcs = [
"synx/test/ipclite_test.c",
],
)