From c9856ccfeb83c41f26fd135ac38fef18c925e0bc Mon Sep 17 00:00:00 2001 From: Gerrit SelfHelp Service Account Date: Fri, 6 May 2022 17:27:21 -0700 Subject: [PATCH 01/42] Initial empty repository From 25cb61693a7e955847ccb644a310e581a8aed3c5 Mon Sep 17 00:00:00 2001 From: NITIN LAXMIDAS NAIK Date: Wed, 13 Jul 2022 12:14:48 -0700 Subject: [PATCH 02/42] msm: synx: build script as DLKM for Vendor SI added mk and Kbuild script to support building synx driver as external module for Vendor SI Change-Id: Ib66325d115ca46e6b61de1e168e85d09419f73e2 Signed-off-by: NITIN LAXMIDAS NAIK --- Android.bp | 5 + Android.mk | 53 + Kbuild | 6 + Makefile | 11 + config/pineapplesynx.conf | 5 + config/pineapplesynxconf.h | 6 + include/uapi/synx/media/synx_header.h | 326 +++ msm/Kbuild | 37 + msm/Makefile | 5 + msm/synx/ipclite.c | 1030 ++++++++++ msm/synx/ipclite.h | 321 +++ msm/synx/ipclite_client.h | 191 ++ msm/synx/synx.c | 2636 +++++++++++++++++++++++++ msm/synx/synx_api.h | 542 +++++ msm/synx/synx_debugfs.c | 145 ++ msm/synx/synx_debugfs.h | 94 + msm/synx/synx_err.h | 27 + msm/synx/synx_global.c | 819 ++++++++ msm/synx/synx_global.h | 284 +++ msm/synx/synx_private.h | 245 +++ msm/synx/synx_util.c | 1525 ++++++++++++++ msm/synx/synx_util.h | 181 ++ synx_kernel_board.mk | 19 + synx_kernel_product.mk | 12 + 24 files changed, 8525 insertions(+) create mode 100644 Android.bp create mode 100644 Android.mk create mode 100644 Kbuild create mode 100644 Makefile create mode 100644 config/pineapplesynx.conf create mode 100644 config/pineapplesynxconf.h create mode 100644 include/uapi/synx/media/synx_header.h create mode 100644 msm/Kbuild create mode 100644 msm/Makefile create mode 100644 msm/synx/ipclite.c create mode 100644 msm/synx/ipclite.h create mode 100644 msm/synx/ipclite_client.h create mode 100644 msm/synx/synx.c create mode 100644 msm/synx/synx_api.h create mode 100644 msm/synx/synx_debugfs.c create mode 100644 msm/synx/synx_debugfs.h create mode 100644 msm/synx/synx_err.h create mode 100644 msm/synx/synx_global.c create mode 100644 msm/synx/synx_global.h create mode 100644 msm/synx/synx_private.h create mode 100644 msm/synx/synx_util.c create mode 100644 msm/synx/synx_util.h create mode 100644 synx_kernel_board.mk create mode 100644 synx_kernel_product.mk diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000000..e55ada9644 --- /dev/null +++ b/Android.bp @@ -0,0 +1,5 @@ +cc_library_headers { + name: "qti_synx_kernel_headers", + export_include_dirs: ["include/uapi/synx/media"], + vendor_available: true +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000000..89c39caf25 --- /dev/null +++ b/Android.mk @@ -0,0 +1,53 @@ +TARGET_SYNX_ENABLE := false +ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true) + ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true) + TARGET_SYNX_ENABLE := true + endif +else +TARGET_SYNX_ENABLE := true +endif + +ifeq ($(TARGET_SYNX_ENABLE),true) +SYNX_BLD_DIR := $(TOP)/vendor/qcom/opensource/synx-kernel + + +# Build synx-driver.ko +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := SYNX_ROOT=$(SYNX_BLD_DIR) +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +########################################################### + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := synx-driver-symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +# Include kp_module.ko in the /vendor/lib/modules (vendor.img) +# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE) +include $(DLKM_DIR)/Build_external_kernelmodule.mk + +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES)) +LOCAL_MODULE := synx-driver.ko +LOCAL_MODULE_KBUILD_NAME := msm/synx-driver.ko +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +# print out variables +$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) +$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY)) +$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES)) +$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES)) +$(info DLKM_DIR = $(DLKM_DIR)) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk + + +endif # End of check for TARGET_SYNX_ENABLE diff --git a/Kbuild b/Kbuild new file mode 100644 index 0000000000..b7d7589836 --- /dev/null +++ b/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +CONFIG_BUILD_VENDORSI := true + +# auto-detect subdirs +obj-y +=msm/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..1dfe010ed4 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +KBUILD_OPTIONS+= SYNX_ROOT=$(KERNEL_SRC)/$(M) + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) +modules_install: + $(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/config/pineapplesynx.conf b/config/pineapplesynx.conf new file mode 100644 index 0000000000..9224352c1c --- /dev/null +++ b/config/pineapplesynx.conf @@ -0,0 +1,5 @@ +ifeq ($(CONFIG_QGKI),y) +export TARGET_SYNX_ENABLE=y +else +export TARGET_SYNX_ENABLE=m +endif diff --git a/config/pineapplesynxconf.h b/config/pineapplesynxconf.h new file mode 100644 index 0000000000..bd455f1455 --- /dev/null +++ b/config/pineapplesynxconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define TARGET_SYNX_ENABLE 1 diff --git a/include/uapi/synx/media/synx_header.h b/include/uapi/synx/media/synx_header.h new file mode 100644 index 0000000000..8c30adec95 --- /dev/null +++ b/include/uapi/synx/media/synx_header.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __UAPI_SYNX_H__ +#define __UAPI_SYNX_H__ + +#include +#include + +/* Size of opaque payload sent to kernel for safekeeping until signal time */ +#define SYNX_USER_PAYLOAD_SIZE 4 + +#define SYNX_MAX_WAITING_SYNX 16 + +#define SYNX_CALLBACK_RESULT_SUCCESS 2 +#define SYNX_CALLBACK_RESULT_FAILED 3 +#define SYNX_CALLBACK_RESULT_CANCELED 4 + +/** + * struct synx_info - Sync object creation information + * + * @name : Optional string representation of the synx object + * @synx_obj : Sync object returned after creation in kernel + */ +struct synx_info { + char name[64]; + __s32 synx_obj; +}; + +/** + * struct synx_userpayload_info - Payload info from user space + * + * @synx_obj: Sync object for which payload has to be registered for + * @reserved: Reserved + * @payload: Pointer to user payload + */ +struct synx_userpayload_info { + __s32 synx_obj; + __u32 reserved; + __u64 payload[SYNX_USER_PAYLOAD_SIZE]; +}; + +/** + * struct synx_signal - Sync object signaling struct + * + * @synx_obj : Sync object to be signaled + * @synx_state : State of the synx object to which it should be signaled + */ +struct synx_signal { + __s32 synx_obj; + __u32 synx_state; +}; + +/** + * struct synx_merge - Merge information for synx objects + * + * @synx_objs : Pointer to synx object array to merge + * @num_objs : Number of objects in the array + * @merged : Merged synx object + */ +struct synx_merge { + __u64 synx_objs; + __u32 num_objs; + __s32 merged; +}; + +/** + * struct synx_wait - Sync object wait information + * + * @synx_obj : Sync object to wait on + * @reserved : Reserved + * @timeout_ms : Timeout in milliseconds + */ +struct synx_wait { + __s32 synx_obj; + __u32 reserved; + __u64 timeout_ms; +}; + +/** + * struct synx_external_desc - info of external sync object + * + * @type : Synx type + * @reserved : Reserved + * @id : Sync object id + * + */ +struct synx_external_desc { + __u32 type; + __u32 reserved; + __s32 id[2]; +}; + +/** + * struct synx_bind - info for binding two synx objects + * + * @synx_obj : Synx object + * @Reserved : Reserved + * @ext_sync_desc : External synx to bind to + * + */ +struct synx_bind { + __s32 synx_obj; + __u32 reserved; + struct synx_external_desc ext_sync_desc; +}; + +/** + * struct synx_addrefcount - info for refcount increment + * + * @synx_obj : Synx object + * @count : Count to increment + * + */ +struct synx_addrefcount { + __s32 synx_obj; + __u32 count; +}; + +/** + * struct synx_id_info - info for import and export of a synx object + * + * @synx_obj : Synx object to be exported + * @secure_key : Secure key created in export and used in import + * @new_synx_obj : Synx object created in import + * + */ +struct synx_id_info { + __s32 synx_obj; + __u32 secure_key; + __s32 new_synx_obj; + __u32 padding; +}; + +/** + * struct synx_fence_desc - info of external fence object + * + * @type : Fence type + * @reserved : Reserved + * @id : Fence object id + * + */ +struct synx_fence_desc { + __u32 type; + __u32 reserved; + __s32 id[2]; +}; + +/** + * struct synx_create - Sync object creation information + * + * @name : Optional string representation of the synx object + * @synx_obj : Synx object allocated + * @flags : Create flags + * @desc : External fence desc + */ +struct synx_create_v2 { + char name[64]; + __u32 synx_obj; + __u32 flags; + struct synx_fence_desc desc; +}; + +/** + * struct synx_userpayload_info - Payload info from user space + * + * @synx_obj : Sync object for which payload has to be registered for + * @reserved : Reserved + * @payload : Pointer to user payload + */ +struct synx_userpayload_info_v2 { + __u32 synx_obj; + __u32 reserved; + __u64 payload[SYNX_USER_PAYLOAD_SIZE]; +}; + +/** + * struct synx_signal - Sync object signaling struct + * + * @synx_obj : Sync object to be signaled + * @synx_state : State of the synx object to which it should be signaled + * @reserved : Reserved + */ +struct synx_signal_v2 { + __u32 synx_obj; + __u32 synx_state; + __u64 reserved; +}; + +/** + * struct synx_merge - Merge information for synx objects + * + * @synx_objs : Pointer to synx object array to merge + * @num_objs : Number of objects in the array + * @merged : Merged synx object + * @flags : Merge flags + * @reserved : Reserved + */ +struct synx_merge_v2 { + __u64 synx_objs; + __u32 num_objs; + __u32 merged; + __u32 flags; + __u32 reserved; +}; + +/** + * struct synx_wait - Sync object wait information + * + * @synx_obj : Sync object to wait on + * @reserved : Reserved + * @timeout_ms : Timeout in milliseconds + */ +struct synx_wait_v2 { + __u32 synx_obj; + __u32 reserved; + __u64 timeout_ms; +}; + +/** + * struct synx_external_desc - info of external sync object + * + * @type : Synx type + * @reserved : Reserved + * @id : Sync object id + * + */ +struct synx_external_desc_v2 { + __u64 id; + __u32 type; + __u32 reserved; +}; + +/** + * struct synx_bind - info for binding two synx objects + * + * @synx_obj : Synx object + * @Reserved : Reserved + * @ext_sync_desc : External synx to bind to + * + */ +struct synx_bind_v2 { + __u32 synx_obj; + __u32 reserved; + struct synx_external_desc_v2 ext_sync_desc; +}; + +/** + * struct synx_import_info - import info + * + * @synx_obj : Synx handle to be imported + * @flags : Import flags + * @new_synx_obj : Synx object created in import + * @reserved : Reserved + * @desc : External fence descriptor + */ +struct synx_import_info { + __u32 synx_obj; + __u32 flags; + __u32 new_synx_obj; + __u32 reserved; + struct synx_fence_desc desc; +}; + +/** + * struct synx_import_arr_info - import list info + * + * @list : List of synx_import_info + * @num_objs : No of fences to import + */ +struct synx_import_arr_info { + __u64 list; + __u32 num_objs; +}; + +/** + * struct synx_fence_fd - get fd for synx fence + * + * @synx_obj : Synx handle + * @fd : fd for synx handle fence + */ +struct synx_fence_fd { + __u32 synx_obj; + __s32 fd; +}; + +/** + * struct synx_private_ioctl_arg - Sync driver ioctl argument + * + * @id : IOCTL command id + * @size : Size of command payload + * @result : Result of command execution + * @reserved : Reserved + * @ioctl_ptr : Pointer to user data + */ +struct synx_private_ioctl_arg { + __u32 id; + __u32 size; + __u32 result; + __u32 reserved; + __u64 ioctl_ptr; +}; + +#define SYNX_PRIVATE_MAGIC_NUM 's' + +#define SYNX_PRIVATE_IOCTL_CMD \ + _IOWR(SYNX_PRIVATE_MAGIC_NUM, 130, struct synx_private_ioctl_arg) + +#define SYNX_CREATE 0 +#define SYNX_RELEASE 1 +#define SYNX_SIGNAL 2 +#define SYNX_MERGE 3 +#define SYNX_REGISTER_PAYLOAD 4 +#define SYNX_DEREGISTER_PAYLOAD 5 +#define SYNX_WAIT 6 +#define SYNX_BIND 7 +#define SYNX_ADDREFCOUNT 8 +#define SYNX_GETSTATUS 9 +#define SYNX_IMPORT 10 +#define SYNX_EXPORT 11 +#define SYNX_IMPORT_ARR 12 +#define SYNX_GETFENCE_FD 13 + +#endif /* __UAPI_SYNX_H__ */ diff --git a/msm/Kbuild b/msm/Kbuild new file mode 100644 index 0000000000..3e03ecf95f --- /dev/null +++ b/msm/Kbuild @@ -0,0 +1,37 @@ +LINUXINCLUDE += -I$(SYNX_ROOT)/include \ + -I$(SYNX_ROOT)/include/uapi \ + -I$(SYNX_ROOT)/include/uapi/synx/media + +ccflags-y += -I$(SYNX_ROOT)/msm/synx/ + +# add flag to compile mmrm actual implementatio instead of stub version. +# to follow up with mmrm team if techpack users need to define this for long term? +#KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM + +# ported from Android.mk +$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS)) + +ifeq ($(CONFIG_ARCH_WAIPIO), y) +$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO)) +# include $(SYNX_ROOT)/config/waipio.mk +KBUILD_CPPFLAGS += -DCONFIG_SYNX_WAIPIO=1 +ccflags-y += -DCONFIG_SYNX_WAIPIO=1 +endif + +ifeq ($(CONFIG_ARCH_KALAMA), y) +$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA)) +# include $(SYNX_ROOT)/config/waipio.mk +KBUILD_CPPFLAGS += -DCONFIG_SYNX_KALAMA=1 +ccflags-y += -DCONFIG_SYNX_KALAMA=1 +endif + +ifeq ($(CONFIG_ARCH_PINEAPPLE), y) +$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE)) +# include $(SYNX_ROOT)/config/pineapple.mk +KBUILD_CPPFLAGS += -DCONFIG_SYNX_PINEAPPLE=1 +ccflags-y += -DCONFIG_SYNX_PINEAPPLE=1 +endif + +obj-m += synx-driver.o +obj-m += synx/ipclite.o +synx-driver-objs := synx/synx.o synx/synx_global.o synx/synx_util.o synx/synx_debugfs.o diff --git a/msm/Makefile b/msm/Makefile new file mode 100644 index 0000000000..aff0d93920 --- /dev/null +++ b/msm/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-m += synx/ipclite.o +obj-m += synx-driver.o +synx-driver-objs := synx/synx.o synx/synx_util.o synx/synx_debugfs.o synx/synx_global.o diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c new file mode 100644 index 0000000000..03ff780f69 --- /dev/null +++ b/msm/synx/ipclite.c @@ -0,0 +1,1030 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include "ipclite_client.h" +#include "ipclite.h" + +#define VMID_HLOS 3 +#define VMID_SSC_Q6 5 +#define VMID_ADSP_Q6 6 +#define VMID_CDSP 30 +#define GLOBAL_ATOMICS_ENABLED 1 +#define GLOBAL_ATOMICS_DISABLED 0 + +static struct ipclite_info *ipclite; +static struct ipclite_client synx_client; +static struct ipclite_client test_client; +struct ipclite_hw_mutex_ops *ipclite_hw_mutex; + +u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; + +#define FIFO_FULL_RESERVE 8 +#define FIFO_ALIGNMENT 8 + +void ipclite_hwlock_reset(enum ipcmem_host_type core_id) +{ + /* verify and reset the hw mutex lock */ + if (core_id == ipclite->ipcmem.toc->global_atomic_hwlock_owner) { + ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + hwspin_unlock_raw(ipclite->hwlock); + } +} +EXPORT_SYMBOL(ipclite_hwlock_reset); + +static void ipclite_hw_mutex_acquire(void) +{ + int32_t ret; + + if (ipclite != NULL) { + if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) { + ret = hwspin_lock_timeout_irqsave(ipclite->hwlock, + HWSPINLOCK_TIMEOUT, + &ipclite->ipclite_hw_mutex->flags); + if (ret) + pr_err("Hw mutex lock acquire failed\n"); + + ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_APPS; + + pr_debug("Hw mutex lock acquired\n"); + } + } +} + +static void ipclite_hw_mutex_release(void) +{ + if (ipclite != NULL) { + if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) { + ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + hwspin_unlock_irqrestore(ipclite->hwlock, + &ipclite->ipclite_hw_mutex->flags); + pr_debug("Hw mutex lock release\n"); + } + } +} + +void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data) +{ + atomic_set(addr, data); + pr_debug("%s new_val = %d\n", __func__, (*(uint32_t *)addr)); +} +EXPORT_SYMBOL(ipclite_atomic_init_u32); + +void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data) +{ + atomic_set(addr, data); + pr_debug("%s new_val = %d\n", __func__, (*(int32_t *)addr)); +} +EXPORT_SYMBOL(ipclite_atomic_init_i32); + +void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data) +{ + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + atomic_set(addr, data); + pr_debug("%s new_val = %d\n", __func__, (*(uint32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); +} +EXPORT_SYMBOL(ipclite_global_atomic_store_u32); + +void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data) +{ + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + atomic_set(addr, data); + pr_debug("%s new_val = %d\n", __func__, (*(int32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); +} +EXPORT_SYMBOL(ipclite_global_atomic_store_i32); + +uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr) +{ + uint32_t ret; + + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + ret = atomic_read(addr); + pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); + + return ret; +} +EXPORT_SYMBOL(ipclite_global_atomic_load_u32); + +int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr) +{ + int32_t ret; + + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + ret = atomic_read(addr); + pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(int32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); + + return ret; +} +EXPORT_SYMBOL(ipclite_global_atomic_load_i32); + +uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr) +{ + uint32_t ret; + uint32_t mask = (1 << nr); + + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + ret = atomic_fetch_or(mask, addr); + pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); + + return ret; +} +EXPORT_SYMBOL(ipclite_global_test_and_set_bit); + +uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr) +{ + uint32_t ret; + uint32_t mask = (1 << nr); + + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + ret = atomic_fetch_and(~mask, addr); + pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); + + return ret; +} +EXPORT_SYMBOL(ipclite_global_test_and_clear_bit); + +int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr) +{ + int32_t ret = 0; + + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + ret = atomic_fetch_add(1, addr); + pr_debug("%s ret = %d new_val = %d\n", __func__, ret, (*(int32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); + + return ret; +} +EXPORT_SYMBOL(ipclite_global_atomic_inc); + +int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr) +{ + int32_t ret = 0; + + /* callback to acquire hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->acquire(); + + ret = atomic_fetch_sub(1, addr); + pr_debug("%s ret = %d new_val = %d\n", __func__, ret, (*(int32_t *)addr)); + + /* callback to release hw mutex lock if atomic support is not enabled */ + ipclite->ipclite_hw_mutex->release(); + + return ret; +} +EXPORT_SYMBOL(ipclite_global_atomic_dec); + +static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo) +{ + size_t len; + u32 head; + u32 tail; + + head = le32_to_cpu(*rx_fifo->head); + tail = le32_to_cpu(*rx_fifo->tail); + pr_debug("head=%d, tail=%d\n", head, tail); + if (head < tail) + len = rx_fifo->length - tail + head; + else + len = head - tail; + + if (WARN_ON_ONCE(len > rx_fifo->length)) + len = 0; + pr_debug("len=%d\n", len); + return len; +} + +static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo, + void *data, size_t count) +{ + size_t len; + u32 tail; + + tail = le32_to_cpu(*rx_fifo->tail); + + if (WARN_ON_ONCE(tail > rx_fifo->length)) + return; + + if (tail >= rx_fifo->length) + tail -= rx_fifo->length; + + len = min_t(size_t, count, rx_fifo->length - tail); + if (len) + memcpy_fromio(data, rx_fifo->fifo + tail, len); + + if (len != count) + memcpy_fromio(data + len, rx_fifo->fifo, (count - len)); +} + +static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo, + size_t count) +{ + u32 tail; + + tail = le32_to_cpu(*rx_fifo->tail); + + tail += count; + if (tail >= rx_fifo->length) + tail %= rx_fifo->length; + + *rx_fifo->tail = cpu_to_le32(tail); +} + +static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo) +{ + u32 head; + u32 tail; + u32 avail; + + head = le32_to_cpu(*tx_fifo->head); + tail = le32_to_cpu(*tx_fifo->tail); + + if (tail <= head) + avail = tx_fifo->length - head + tail; + else + avail = tail - head; + + if (avail < FIFO_FULL_RESERVE) + avail = 0; + else + avail -= FIFO_FULL_RESERVE; + + if (WARN_ON_ONCE(avail > tx_fifo->length)) + avail = 0; + + return avail; +} + +static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo, + unsigned int head, + const void *data, size_t count) +{ + size_t len; + + if (WARN_ON_ONCE(head > tx_fifo->length)) + return head; + + len = min_t(size_t, count, tx_fifo->length - head); + if (len) + memcpy(tx_fifo->fifo + head, data, len); + + if (len != count) + memcpy(tx_fifo->fifo, data + len, count - len); + + head += count; + if (head >= tx_fifo->length) + head -= tx_fifo->length; + + return head; +} + +static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo, + const void *data, size_t dlen) +{ + unsigned int head; + + head = le32_to_cpu(*tx_fifo->head); + head = ipcmem_tx_write_one(tx_fifo, head, data, dlen); + + head = ALIGN(head, 8); + if (head >= tx_fifo->length) + head -= tx_fifo->length; + + /* Ensure ordering of fifo and head update */ + wmb(); + + *tx_fifo->head = cpu_to_le32(head); + pr_debug("head = %d\n", *tx_fifo->head); +} + +static size_t ipclite_rx_avail(struct ipclite_channel *channel) +{ + return channel->rx_fifo->avail(channel->rx_fifo); +} + +static void ipclite_rx_peak(struct ipclite_channel *channel, + void *data, size_t count) +{ + channel->rx_fifo->peak(channel->rx_fifo, data, count); +} + +static void ipclite_rx_advance(struct ipclite_channel *channel, + size_t count) +{ + channel->rx_fifo->advance(channel->rx_fifo, count); +} + +static size_t ipclite_tx_avail(struct ipclite_channel *channel) +{ + return channel->tx_fifo->avail(channel->tx_fifo); +} + +static void ipclite_tx_write(struct ipclite_channel *channel, + const void *data, size_t dlen) +{ + channel->tx_fifo->write(channel->tx_fifo, data, dlen); +} + +static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail) +{ + uint64_t data; + int ret = 0; + + if (avail < sizeof(data)) { + pr_err("Not enough data in fifo\n"); + return -EAGAIN; + } + + ipclite_rx_peak(channel, &data, sizeof(data)); + + if (synx_client.reg_complete == 1) { + if (synx_client.callback) + synx_client.callback(channel->remote_pid, data, + synx_client.priv_data); + } + ipclite_rx_advance(channel, ALIGN(sizeof(data), 8)); + return ret; +} + +static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail) +{ + uint64_t data; + int ret = 0; + + if (avail < sizeof(data)) { + pr_err("Not enough data in fifo\n"); + return -EAGAIN; + } + + ipclite_rx_peak(channel, &data, sizeof(data)); + + if (test_client.reg_complete == 1) { + if (test_client.callback) + test_client.callback(channel->remote_pid, data, + test_client.priv_data); + } + ipclite_rx_advance(channel, ALIGN(sizeof(data), 8)); + return ret; +} + +static irqreturn_t ipclite_intr(int irq, void *data) +{ + struct ipclite_channel *channel; + struct ipclite_irq_info *irq_info; + unsigned int avail = 0; + int ret = 0; + uint64_t msg; + + pr_debug("Interrupt received\n"); + irq_info = (struct ipclite_irq_info *)data; + channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]); + + if (irq_info->signal_id == IPCLITE_MSG_SIGNAL) { + for (;;) { + avail = ipclite_rx_avail(channel); + if (avail < sizeof(msg)) + break; + + ret = ipclite_rx_data(channel, avail); + } + pr_debug("checking messages in rx_fifo done\n"); + } else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) { + /* check_version_compatibility();*/ + pr_debug("version matching sequence completed\n"); + } else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) { + for (;;) { + avail = ipclite_rx_avail(channel); + if (avail < sizeof(msg)) + break; + + ret = ipclite_rx_test_data(channel, avail); + } + pr_debug("checking messages in rx_fifo done\n"); + } else { + pr_err("wrong interrupt signal received, signal_id =%d\n", irq_info->signal_id); + } + return IRQ_HANDLED; +} + +static int ipclite_tx(struct ipclite_channel *channel, + uint64_t data, size_t dlen, uint32_t ipclite_signal) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&channel->tx_lock, flags); + if (ipclite_tx_avail(channel) < dlen) { + spin_unlock_irqrestore(&channel->tx_lock, flags); + ret = -EAGAIN; + return ret; + } + + ipclite_tx_write(channel, &data, dlen); + + mbox_send_message(channel->irq_info[ipclite_signal].mbox_chan, NULL); + mbox_client_txdone(channel->irq_info[ipclite_signal].mbox_chan, 0); + + spin_unlock_irqrestore(&channel->tx_lock, flags); + + return ret; +} + +int ipclite_msg_send(int32_t proc_id, uint64_t data) +{ + int ret = 0; + + if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { + pr_err("Invalid proc_id %d\n", proc_id); + return -EINVAL; + } + + if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) { + pr_err("Cannot send msg to remote client. Channel inactive\n"); + return -ENXIO; + } + + ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), + IPCLITE_MSG_SIGNAL); + pr_debug("Message send completed with ret=%d\n", ret); + return ret; +} +EXPORT_SYMBOL(ipclite_msg_send); + +int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv) +{ + if (!cb_func_ptr) { + pr_err("Invalid callback pointer\n"); + return -EINVAL; + } + synx_client.callback = cb_func_ptr; + synx_client.priv_data = priv; + synx_client.reg_complete = 1; + pr_debug("Client Registration completed\n"); + return 0; +} +EXPORT_SYMBOL(ipclite_register_client); + +int ipclite_test_msg_send(int32_t proc_id, uint64_t data) +{ + int ret = 0; + + if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { + pr_err("Invalid proc_id %d\n", proc_id); + return -EINVAL; + } + + /* Limit Message Sending without Client Registration */ + if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) { + pr_err("Cannot send msg to remote client. Channel inactive\n"); + return -ENXIO; + } + + ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), + IPCLITE_TEST_SIGNAL); + pr_debug("Message send completed with ret=%d\n", ret); + return ret; +} +EXPORT_SYMBOL(ipclite_test_msg_send); + +int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv) +{ + if (!cb_func_ptr) { + pr_err("Invalid callback pointer\n"); + return -EINVAL; + } + test_client.callback = cb_func_ptr; + test_client.priv_data = priv; + test_client.reg_complete = 1; + pr_debug("Test Client Registration Completed\n"); + return 0; +} +EXPORT_SYMBOL(ipclite_register_test_client); + +static int map_ipcmem(struct ipclite_info *ipclite, const char *name) +{ + struct device *dev; + struct device_node *np; + struct resource r; + int ret = 0; + + dev = ipclite->dev; + + np = of_parse_phandle(dev->of_node, name, 0); + if (!np) { + pr_err("No %s specified\n", name); + return -EINVAL; + } + + ret = of_address_to_resource(np, 0, &r); + of_node_put(np); + if (ret) + return ret; + + ipclite->ipcmem.mem.aux_base = (u64)r.start; + ipclite->ipcmem.mem.size = resource_size(&r); + ipclite->ipcmem.mem.virt_base = devm_ioremap_wc(dev, r.start, + resource_size(&r)); + if (!ipclite->ipcmem.mem.virt_base) + return -ENOMEM; + + pr_debug("aux_base = %lx, size=%d,virt_base=%p\n", + ipclite->ipcmem.mem.aux_base, ipclite->ipcmem.mem.size, + ipclite->ipcmem.mem.virt_base); + + return ret; +} + +static void ipcmem_init(struct ipclite_mem *ipcmem) +{ + int host0, host1; + int i = 0; + + ipcmem->toc = ipcmem->mem.virt_base; + pr_debug("toc_base = %p\n", ipcmem->toc); + + ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE; + pr_debug("toc->hdr.size = %d\n", ipcmem->toc->hdr.size); + + /*Fill in global partition details*/ + ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry; + ipcmem->global_partition = (struct ipcmem_global_partition *) + ((char *)ipcmem->mem.virt_base + + ipcmem_toc_global_partition_entry.base_offset); + + pr_debug("base_offset =%x,ipcmem->global_partition = %p\n", + ipcmem_toc_global_partition_entry.base_offset, + ipcmem->global_partition); + + ipcmem->global_partition->hdr = global_partition_hdr; + + pr_debug("hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", + ipcmem->global_partition->hdr.partition_type, + ipcmem->global_partition->hdr.region_offset, + ipcmem->global_partition->hdr.region_size); + + /* Fill in each IPCMEM TOC entry from ipcmem_toc_partition_entries config*/ + for (i = 0; i < MAX_PARTITION_COUNT; i++) { + host0 = ipcmem_toc_partition_entries[i].host0; + host1 = ipcmem_toc_partition_entries[i].host1; + pr_debug("host0 = %d, host1=%d\n", host0, host1); + + ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i]; + ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i]; + + ipcmem->partition[i] = (struct ipcmem_partition *) + ((char *)ipcmem->mem.virt_base + + ipcmem_toc_partition_entries[i].base_offset); + + pr_debug("partition[%d] = %p,partition_base_offset[%d]=%lx\n", + i, ipcmem->partition[i], + i, ipcmem_toc_partition_entries[i].base_offset); + + if (host0 == host1) + ipcmem->partition[i]->hdr = loopback_partition_hdr; + else + ipcmem->partition[i]->hdr = default_partition_hdr; + + pr_debug("hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", + ipcmem->partition[i]->hdr.type, + ipcmem->partition[i]->hdr.desc_offset, + ipcmem->partition[i]->hdr.desc_size); + } + + /*Making sure all writes for ipcmem configurations are completed*/ + wmb(); + + ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED; + pr_debug("Ipcmem init completed\n"); +} + + +/*Add VMIDs corresponding to EVA, CDSP and VPU to set IPCMEM access control*/ +static int set_ipcmem_access_control(struct ipclite_info *ipclite) +{ + int ret = 0; + int srcVM[1] = {VMID_HLOS}; + int destVM[2] = {VMID_HLOS, VMID_CDSP}; + int destVMperm[2] = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE}; + + ret = hyp_assign_phys(ipclite->ipcmem.mem.aux_base, + ipclite->ipcmem.mem.size, srcVM, 1, + destVM, destVMperm, 2); + return ret; +} + +static int ipclite_channel_irq_init(struct device *parent, struct device_node *node, + struct ipclite_channel *channel) +{ + int ret = 0; + u32 index; + char strs[4][9] = {"msg", "mem-init", "version", "test"}; + struct ipclite_irq_info *irq_info; + struct device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->parent = parent; + dev->of_node = node; + dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node); + pr_debug("Registering %s device\n", dev_name(parent->parent)); + ret = device_register(dev); + if (ret) { + pr_err("failed to register ipclite child node\n"); + put_device(dev); + return ret; + } + + ret = of_property_read_u32(dev->of_node, "index", + &index); + if (ret) { + pr_err("failed to parse index\n"); + goto err_dev; + } + + irq_info = &channel->irq_info[index]; + pr_debug("irq_info[%d]=%p\n", index, irq_info); + + irq_info->mbox_client.dev = dev; + irq_info->mbox_client.knows_txdone = true; + irq_info->mbox_chan = mbox_request_channel(&irq_info->mbox_client, 0); + pr_debug("irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan); + if (IS_ERR(irq_info->mbox_chan)) { + if (PTR_ERR(irq_info->mbox_chan) != -EPROBE_DEFER) + pr_err("failed to acquire IPC channel\n"); + goto err_dev; + } + + snprintf(irq_info->irqname, 32, "ipclite-signal-%s", strs[index]); + irq_info->irq = of_irq_get(dev->of_node, 0); + pr_debug("irq[%d] = %d\n", index, irq_info->irq); + irq_info->signal_id = index; + ret = devm_request_irq(dev, irq_info->irq, + ipclite_intr, + IRQF_NO_SUSPEND | IRQF_SHARED, + irq_info->irqname, irq_info); + if (ret) { + pr_err("failed to request IRQ\n"); + goto err_dev; + } + pr_debug("Interrupt init completed, ret = %d\n", ret); + return 0; + +err_dev: + device_unregister(dev); + kfree(dev); + return ret; +} + +int32_t get_global_partition_info(struct global_region_info *global_ipcmem) +{ + struct ipcmem_global_partition *global_partition; + + if (!global_ipcmem) + return -EINVAL; + + global_partition = ipclite->ipcmem.global_partition; + global_ipcmem->virt_base = (void *)((char *)global_partition + + global_partition->hdr.region_offset); + global_ipcmem->size = (size_t)(global_partition->hdr.region_size); + + pr_debug("base = %p, size=%lx\n", global_ipcmem->virt_base, + global_ipcmem->size); + return 0; +} +EXPORT_SYMBOL(get_global_partition_info); + +static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid, + int remote_pid) +{ + return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base + + ipcmem.toc->toc_entry[local_pid][remote_pid].base_offset); +} + +static void ipclite_channel_release(struct device *dev) +{ + pr_info("Releasing ipclite channel\n"); + kfree(dev); +} + +/* Sets up following fields of IPCLite channel structure: + * remote_pid,tx_fifo, rx_fifo + */ +static int ipclite_channel_init(struct device *parent, + struct device_node *node) +{ + struct ipclite_fifo *rx_fifo; + struct ipclite_fifo *tx_fifo; + + struct device *dev; + u32 local_pid, remote_pid, global_atomic; + u32 *descs; + int ret = 0; + + struct device_node *child; + + struct ipcmem_partition_header *partition_hdr; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->parent = parent; + dev->of_node = node; + dev->release = ipclite_channel_release; + dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node); + pr_debug("Registering %s device\n", dev_name(parent->parent)); + ret = device_register(dev); + if (ret) { + pr_err("failed to register ipclite device\n"); + put_device(dev); + kfree(dev); + return ret; + } + + local_pid = LOCAL_HOST; + + ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", + &remote_pid); + if (ret) { + dev_err(dev, "failed to parse qcom,remote-pid\n"); + goto err_put_dev; + } + pr_debug("remote_pid = %d, local_pid=%d\n", remote_pid, local_pid); + + ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL); + if (!ipclite_hw_mutex) { + ret = -ENOMEM; + goto err_put_dev; + } + + ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic); + if (ret) { + dev_err(dev, "failed to parse global_atomic\n"); + goto err_put_dev; + } + if (global_atomic == 0) + global_atomic_support = GLOBAL_ATOMICS_DISABLED; + + rx_fifo = devm_kzalloc(dev, sizeof(*rx_fifo), GFP_KERNEL); + tx_fifo = devm_kzalloc(dev, sizeof(*tx_fifo), GFP_KERNEL); + if (!rx_fifo || !tx_fifo) { + ret = -ENOMEM; + goto err_put_dev; + } + pr_debug("rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo); + + partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, + local_pid, remote_pid); + pr_debug("partition_hdr = %p\n", partition_hdr); + descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset); + pr_debug("descs = %p\n", descs); + + if (local_pid < remote_pid) { + tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset; + tx_fifo->length = partition_hdr->fifo0_size; + rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset; + rx_fifo->length = partition_hdr->fifo1_size; + + tx_fifo->tail = &descs[0]; + tx_fifo->head = &descs[1]; + rx_fifo->tail = &descs[2]; + rx_fifo->head = &descs[3]; + + } else { + tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset; + tx_fifo->length = partition_hdr->fifo1_size; + rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset; + rx_fifo->length = partition_hdr->fifo0_size; + + rx_fifo->tail = &descs[0]; + rx_fifo->head = &descs[1]; + tx_fifo->tail = &descs[2]; + tx_fifo->head = &descs[3]; + } + + if (partition_hdr->type == LOOPBACK_PARTITION_TYPE) { + rx_fifo->tail = tx_fifo->tail; + rx_fifo->head = tx_fifo->head; + } + + /* rx_fifo->reset = ipcmem_rx_reset;*/ + rx_fifo->avail = ipcmem_rx_avail; + rx_fifo->peak = ipcmem_rx_peak; + rx_fifo->advance = ipcmem_rx_advance; + + /* tx_fifo->reset = ipcmem_tx_reset;*/ + tx_fifo->avail = ipcmem_tx_avail; + tx_fifo->write = ipcmem_tx_write; + + *rx_fifo->tail = 0; + *tx_fifo->head = 0; + + /*Store Channel Information*/ + ipclite->channel[remote_pid].remote_pid = remote_pid; + ipclite->channel[remote_pid].tx_fifo = tx_fifo; + ipclite->channel[remote_pid].rx_fifo = rx_fifo; + + spin_lock_init(&ipclite->channel[remote_pid].tx_lock); + + for_each_available_child_of_node(dev->of_node, child) { + ret = ipclite_channel_irq_init(dev, child, + &ipclite->channel[remote_pid]); + if (ret) { + pr_err("irq setup for ipclite channel failed\n"); + goto err_put_dev; + } + } + ipclite->channel[remote_pid].channel_status = ACTIVE_CHANNEL; + pr_debug("Channel init completed, ret = %d\n", ret); + return ret; + +err_put_dev: + ipclite->channel[remote_pid].channel_status = 0; + device_unregister(dev); + kfree(dev); + return ret; +} + +static void probe_subsystem(struct device *dev, struct device_node *np) +{ + int ret = 0; + + ret = ipclite_channel_init(dev, np); + if (ret) + pr_err("IPCLite Channel init failed\n"); +} + +static int ipclite_probe(struct platform_device *pdev) +{ + int ret = 0; + int hwlock_id; + struct ipcmem_region *mem; + struct device_node *cn; + struct device_node *pn = pdev->dev.of_node; + struct ipclite_channel broadcast; + + ipclite = kzalloc(sizeof(*ipclite), GFP_KERNEL); + if (!ipclite) { + ret = -ENOMEM; + goto error; + } + + ipclite->dev = &pdev->dev; + + hwlock_id = of_hwspin_lock_get_id(pn, 0); + if (hwlock_id < 0) { + if (hwlock_id != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to retrieve hwlock\n"); + ret = hwlock_id; + goto error; + } + pr_debug("Hwlock id retrieved, hwlock_id=%d\n", hwlock_id); + + ipclite->hwlock = hwspin_lock_request_specific(hwlock_id); + if (!ipclite->hwlock) { + pr_err("Failed to assign hwlock_id\n"); + ret = -ENXIO; + goto error; + } + pr_debug("Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock); + + ret = map_ipcmem(ipclite, "memory-region"); + if (ret) { + pr_err("failed to map ipcmem\n"); + goto release; + } + mem = &(ipclite->ipcmem.mem); + memset(mem->virt_base, 0, mem->size); + + ret = set_ipcmem_access_control(ipclite); + if (ret) { + pr_err("failed to set access control policy\n"); + goto release; + } + + ipcmem_init(&ipclite->ipcmem); + + /* Setup Channel for each Remote Subsystem */ + for_each_available_child_of_node(pn, cn) + probe_subsystem(&pdev->dev, cn); + /* Broadcast init_done signal to all subsystems once mbox channels + * are set up + */ + broadcast = ipclite->channel[IPCMEM_APPS]; + ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, + NULL); + if (ret < 0) + goto mem_release; + + mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0); + + if (global_atomic_support) { + ipclite->ipcmem.toc->ipclite_features.global_atomic_support = + GLOBAL_ATOMICS_ENABLED; + } else { + ipclite->ipcmem.toc->ipclite_features.global_atomic_support = + GLOBAL_ATOMICS_DISABLED; + } + + pr_debug("global_atomic_support : %d\n", + ipclite->ipcmem.toc->ipclite_features.global_atomic_support); + + /* hw mutex callbacks */ + ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire; + ipclite_hw_mutex->release = ipclite_hw_mutex_release; + + /* store to ipclite structure */ + ipclite->ipclite_hw_mutex = ipclite_hw_mutex; + + /* initialize hwlock owner to invalid host */ + ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + + pr_info("IPCLite probe completed successfully\n"); + return ret; + +mem_release: + /* If the remote subsystem has already completed the init and actively + * using IPCMEM, re-assigning IPCMEM memory back to HLOS can lead to crash + * Solution: Either we don't take back the memory or make sure APPS completes + * init before any other subsystem initializes IPCLite (we won't have to send + * braodcast) + */ +release: + kfree(ipclite); +error: + pr_err("IPCLite probe failed\n"); + return ret; +} + +static const struct of_device_id ipclite_of_match[] = { + { .compatible = "qcom,ipclite"}, + {} +}; +MODULE_DEVICE_TABLE(of, ipclite_of_match); + +static struct platform_driver ipclite_driver = { + .probe = ipclite_probe, + .driver = { + .name = "ipclite", + .of_match_table = ipclite_of_match, + }, +}; + +module_platform_driver(ipclite_driver); + +MODULE_DESCRIPTION("IPCLite Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_SOFTDEP("pre: qcom_hwspinlock"); diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h new file mode 100644 index 0000000000..d622bad099 --- /dev/null +++ b/msm/synx/ipclite.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.. + */ +#include +#include +#include +#include +#include +#include +#include "ipclite_client.h" + +#define IPCMEM_INIT_COMPLETED 0x1 +#define ACTIVE_CHANNEL 0x1 + +#define IPCMEM_TOC_SIZE (4*1024) +#define MAX_CHANNEL_SIGNALS 4 + +#define MAX_PARTITION_COUNT 7 /*7 partitions other than global partition*/ + +#define IPCLITE_MSG_SIGNAL 0 +#define IPCLITE_MEM_INIT_SIGNAL 1 +#define IPCLITE_VERSION_SIGNAL 2 +#define IPCLITE_TEST_SIGNAL 3 + +/** Flag definitions for the entries */ +#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01) +#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION (0x02) +#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION \ + (IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION | \ + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION) + +#define IPCMEM_TOC_ENTRY_FLAGS_IGNORE_PARTITION (0x00000004) + +/*Hardcoded macro to identify local host on each core*/ +#define LOCAL_HOST IPCMEM_APPS + +/* Timeout (ms) for the trylock of remote spinlocks */ +#define HWSPINLOCK_TIMEOUT 1000 + +/*IPCMEM Structure Definitions*/ + +struct ipclite_features { + uint32_t global_atomic_support; + uint32_t version_finalised; +}; + +struct ipcmem_partition_header { + uint32_t type; /*partition type*/ + uint32_t desc_offset; /*descriptor offset*/ + uint32_t desc_size; /*descriptor size*/ + uint32_t fifo0_offset; /*fifo 0 offset*/ + uint32_t fifo0_size; /*fifo 0 size*/ + uint32_t fifo1_offset; /*fifo 1 offset*/ + uint32_t fifo1_size; /*fifo 1 size*/ +}; + +struct ipcmem_toc_entry { + uint32_t base_offset; /*partition offset from IPCMEM base*/ + uint32_t size; /*partition size*/ + uint32_t flags; /*partition flags if required*/ + uint32_t host0; /*subsystem 0 who can access this partition*/ + uint32_t host1; /*subsystem 1 who can access this partition*/ + uint32_t status; /*partition active status*/ +}; + +struct ipcmem_toc_header { + uint32_t size; + uint32_t init_done; +}; + +struct ipcmem_toc { + struct ipcmem_toc_header hdr; + struct ipcmem_toc_entry toc_entry_global; + struct ipcmem_toc_entry toc_entry[IPCMEM_NUM_HOSTS][IPCMEM_NUM_HOSTS]; + /* Need to have a better implementation here */ + /* as ipcmem is 4k and if host number increases */ + /* it would create problems*/ + struct ipclite_features ipclite_features; + uint32_t global_atomic_hwlock_owner; +}; + +struct ipcmem_region { + u64 aux_base; + void __iomem *virt_base; + uint32_t size; +}; + +struct ipcmem_partition { + struct ipcmem_partition_header hdr; +}; + +struct global_partition_header { + uint32_t partition_type; + uint32_t region_offset; + uint32_t region_size; +}; + +struct ipcmem_global_partition { + struct global_partition_header hdr; +}; + +struct ipclite_mem { + struct ipcmem_toc *toc; + struct ipcmem_region mem; + struct ipcmem_global_partition *global_partition; + struct ipcmem_partition *partition[MAX_PARTITION_COUNT]; +}; + +struct ipclite_fifo { + uint32_t length; + + __le32 *tail; + __le32 *head; + + void *fifo; + + size_t (*avail)(struct ipclite_fifo *fifo); + + void (*peak)(struct ipclite_fifo *fifo, + void *data, size_t count); + + void (*advance)(struct ipclite_fifo *fifo, + size_t count); + + void (*write)(struct ipclite_fifo *fifo, + const void *data, size_t dlen); + + void (*reset)(struct ipclite_fifo *fifo); +}; + +struct ipclite_hw_mutex_ops { + unsigned long flags; + void (*acquire)(void); + void (*release)(void); +}; + +struct ipclite_irq_info { + struct mbox_client mbox_client; + struct mbox_chan *mbox_chan; + int irq; + int signal_id; + char irqname[32]; +}; + +struct ipclite_client { + IPCLite_Client callback; + void *priv_data; + int reg_complete; +}; + +struct ipclite_channel { + uint32_t remote_pid; + + struct ipclite_fifo *tx_fifo; + struct ipclite_fifo *rx_fifo; + spinlock_t tx_lock; + + struct ipclite_irq_info irq_info[MAX_CHANNEL_SIGNALS]; + + struct ipclite_client client; + + uint32_t channel_version; + uint32_t version_finalised; + + uint32_t channel_status; +}; + +/*Single structure that defines everything about IPCLite*/ +struct ipclite_info { + struct device *dev; + struct ipclite_channel channel[IPCMEM_NUM_HOSTS]; + struct ipclite_mem ipcmem; + struct hwspinlock *hwlock; + struct ipclite_hw_mutex_ops *ipclite_hw_mutex; +}; + +const struct ipcmem_toc_entry ipcmem_toc_global_partition_entry = { + /* Global partition. */ + 4 * 1024, + 128 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_GLOBAL_HOST, + IPCMEM_GLOBAL_HOST, +}; + +const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { + /* Global partition. */ + /* { + * 4 * 1024, + * 128 * 1024, + * IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + * IPCMEM_GLOBAL_HOST, + * IPCMEM_GLOBAL_HOST, + * }, + */ + + /* Apps<->CDSP partition. */ + { + 132 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_APPS, + IPCMEM_CDSP, + 1, + }, + /* APPS<->CVP (EVA) partition. */ + { + 164 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_APPS, + IPCMEM_CVP, + 1, + }, + /* APPS<->VPU partition. */ + { + 196 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_APPS, + IPCMEM_VPU, + 1, + }, + /* CDSP<->CVP (EVA) partition. */ + { + 228 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_CDSP, + IPCMEM_CVP, + 1, + }, + /* CDSP<->VPU partition. */ + { + 260 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_CDSP, + IPCMEM_VPU, + 1, + }, + /* VPU<->CVP (EVA) partition. */ + { + 292 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_VPU, + IPCMEM_CVP, + 1, + }, + /* APPS<->APPS partition. */ + { + 326 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_APPS, + IPCMEM_APPS, + 1, + } + /* Last entry uses invalid hosts and no protections to signify the end. */ + /* { + * 0, + * 0, + * IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + * IPCMEM_INVALID_HOST, + * IPCMEM_INVALID_HOST, + * } + */ +}; + +/*D:wefault partition parameters*/ +#define DEFAULT_PARTITION_TYPE 0x0 +#define DEFAULT_PARTITION_HDR_SIZE 1024 + +#define DEFAULT_DESCRIPTOR_OFFSET 1024 +#define DEFAULT_DESCRIPTOR_SIZE (3*1024) +#define DEFAULT_FIFO0_OFFSET (4*1024) +#define DEFAULT_FIFO0_SIZE (8*1024) +#define DEFAULT_FIFO1_OFFSET (12*1024) +#define DEFAULT_FIFO1_SIZE (8*1024) + +/*Loopback partition parameters*/ +#define LOOPBACK_PARTITION_TYPE 0x1 + +/*Global partition parameters*/ +#define GLOBAL_PARTITION_TYPE 0xFF +#define GLOBAL_PARTITION_HDR_SIZE (4*1024) + +#define GLOBAL_REGION_OFFSET (4*1024) +#define GLOBAL_REGION_SIZE (124*1024) + + +const struct ipcmem_partition_header default_partition_hdr = { + DEFAULT_PARTITION_TYPE, + DEFAULT_DESCRIPTOR_OFFSET, + DEFAULT_DESCRIPTOR_SIZE, + DEFAULT_FIFO0_OFFSET, + DEFAULT_FIFO0_SIZE, + DEFAULT_FIFO1_OFFSET, + DEFAULT_FIFO1_SIZE, +}; + +/* TX and RX FIFO point to same location for such loopback partition type + * (FIFO0 offset = FIFO1 offset) + */ +const struct ipcmem_partition_header loopback_partition_hdr = { + LOOPBACK_PARTITION_TYPE, + DEFAULT_DESCRIPTOR_OFFSET, + DEFAULT_DESCRIPTOR_SIZE, + DEFAULT_FIFO0_OFFSET, + DEFAULT_FIFO0_SIZE, + DEFAULT_FIFO0_OFFSET, + DEFAULT_FIFO0_SIZE, +}; + +const struct global_partition_header global_partition_hdr = { + GLOBAL_PARTITION_TYPE, + GLOBAL_REGION_OFFSET, + GLOBAL_REGION_SIZE, +}; diff --git a/msm/synx/ipclite_client.h b/msm/synx/ipclite_client.h new file mode 100644 index 0000000000..3ffa3a5652 --- /dev/null +++ b/msm/synx/ipclite_client.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __IPCLITE_CLIENT_H__ +#define __IPCLITE_CLIENT_H__ + +typedef atomic_t ipclite_atomic_uint32_t; +typedef atomic_t ipclite_atomic_int32_t; + +/** + * A list of hosts supported in IPCMEM + */ +enum ipcmem_host_type { + IPCMEM_APPS = 0, /**< Apps Processor */ + IPCMEM_MODEM = 1, /**< Modem processor */ + IPCMEM_LPASS = 2, /**< Audio processor */ + IPCMEM_SLPI = 3, /**< Sensor processor */ + IPCMEM_GPU = 4, /**< Graphics processor */ + IPCMEM_CDSP = 5, /**< Compute DSP processor */ + IPCMEM_CVP = 6, /**< Computer Vision processor */ + IPCMEM_CAM = 7, /**< Camera processor */ + IPCMEM_VPU = 8, /**< Video processor */ + IPCMEM_NUM_HOSTS = 9, /**< Max number of host in target */ + + IPCMEM_GLOBAL_HOST = 0xFE, /**< Global Host */ + IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */ +}; + +struct global_region_info { + void *virt_base; + uint32_t size; +}; + +typedef int32_t (*IPCLite_Client)(uint32_t proc_id, int64_t data, void *priv); + +/** + * ipclite_msg_send() - Sends message to remote client. + * + * @proc_id : Identifier for remote client or subsystem. + * @data : 64 bit message value. + * + * @return Zero on successful registration, negative on failure. + */ +int32_t ipclite_msg_send(int32_t proc_id, uint64_t data); + +/** + * ipclite_register_client() - Registers client callback with framework. + * + * @cb_func_ptr : Client callback function to be called on message receive. + * @priv : Private data required by client for handling callback. + * + * @return Zero on successful registration, negative on failure. + */ +int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv); + +/** + * ipclite_test_msg_send() - Sends message to remote client. + * + * @proc_id : Identifier for remote client or subsystem. + * @data : 64 bit message value. + * + * @return Zero on successful registration, negative on failure. + */ +int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data); + +/** + * ipclite_register_test_client() - Registers client callback with framework. + * + * @cb_func_ptr : Client callback function to be called on message receive. + * @priv : Private data required by client for handling callback. + * + * @return Zero on successful registration, negative on failure. + */ +int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv); + +/** + * get_global_partition_info() - Gets info about IPCMEM's global partitions. + * + * @global_ipcmem : Pointer to global_region_info structure. + * + * @return Zero on successful registration, negative on failure. + */ +int32_t get_global_partition_info(struct global_region_info *global_ipcmem); + +/** + * ipclite_hwlock_reset() - Resets the lock if the lock is currently held by core_id + * + * core_id : takes the core id of which the lock needs to be resetted. + * + * @return None. + */ +void ipclite_hwlock_reset(enum ipcmem_host_type core_id); + +/** + * ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value. + * + * @addr : Pointer to global memory + * @data : Value to store in global memory + * + * @return None. + */ +void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data); + +/** + * ipclite_atomic_init_i32() - Initializes the global memory with int32_t value. + * + * @addr : Pointer to global memory + * @data : Value to store in global memory + * + * @return None. + */ +void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data); + +/** + * ipclite_global_atomic_store_u32() - Writes uint32_t value to global memory. + * + * @addr : Pointer to global memory + * @data : Value to store in global memory + * + * @return None. + */ +void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data); + +/** + * ipclite_global_atomic_store_i32() - Writes int32_t value to global memory. + * + * @addr : Pointer to global memory + * @data : Value to store in global memory + * + * @return None. + */ +void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data); + +/** + * ipclite_global_atomic_load_u32() - Reads the value from global memory. + * + * @addr : Pointer to global memory + * + * @return uint32_t value. + */ +uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr); + +/** + * ipclite_global_atomic_load_i32() - Reads the value from global memory. + * + * @addr : Pointer to global memory + * + * @return int32_t value. + */ +int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr); + +/** + * ipclite_global_test_and_set_bit() - Sets a bit in global memory. + * + * @nr : Bit position to set. + * @addr : Pointer to global memory + * + * @return previous value. + */ +uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr); + +/** + * ipclite_global_test_and_clear_bit() - Clears a bit in global memory. + * + * @nr : Bit position to clear. + * @addr : Pointer to global memory + * + * @return previous value. + */ +uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr); + +/** + * ipclite_global_atomic_inc() - Increments an atomic variable by one. + * + * @addr : Pointer to global memory + * + * @return previous value. + */ +int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr); + +/** + * ipclite_global_atomic_dec() - Decrements an atomic variable by one. + * + * @addr : Pointer to global variable + * + * @return previous value. + */ +int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr); + +#endif diff --git a/msm/synx/synx.c b/msm/synx/synx.c new file mode 100644 index 0000000000..8135a6edc7 --- /dev/null +++ b/msm/synx/synx.c @@ -0,0 +1,2636 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "synx_debugfs.h" +#include "synx_private.h" +#include "synx_util.h" + +struct synx_device *synx_dev; +static atomic64_t synx_counter = ATOMIC64_INIT(1); + +void synx_external_callback(s32 sync_obj, int status, void *data) +{ + struct synx_signal_cb *signal_cb = data; + + if (IS_ERR_OR_NULL(signal_cb)) { + dprintk(SYNX_ERR, + "invalid payload from external obj %d [%d]\n", + sync_obj, status); + return; + } + + signal_cb->status = status; + signal_cb->ext_sync_id = sync_obj; + signal_cb->flag = SYNX_SIGNAL_FROM_CALLBACK; + + dprintk(SYNX_DBG, + "external callback from %d on handle %u\n", + sync_obj, signal_cb->handle); + + /* + * invoke the handler directly as external callback + * is invoked from separate task. + * avoids creation of separate task again. + */ + synx_signal_handler(&signal_cb->cb_dispatch); +} +EXPORT_SYMBOL(synx_external_callback); + +bool synx_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +const char *synx_fence_driver_name(struct dma_fence *fence) +{ + return "Global Synx driver"; +} + +void synx_fence_release(struct dma_fence *fence) +{ + /* release the memory allocated during create */ + kfree(fence->lock); + kfree(fence); + dprintk(SYNX_MEM, "released backing fence %pK\n", fence); +} +EXPORT_SYMBOL(synx_fence_release); + +static struct dma_fence_ops synx_fence_ops = { + .wait = dma_fence_default_wait, + .enable_signaling = synx_fence_enable_signaling, + .get_driver_name = synx_fence_driver_name, + .get_timeline_name = synx_fence_driver_name, + .release = synx_fence_release, +}; + +static int synx_create_sync_fd(struct dma_fence *fence) +{ + int fd; + struct sync_file *sync_file; + + if (IS_ERR_OR_NULL(fence)) + return -SYNX_INVALID; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + return fd; + + sync_file = sync_file_create(fence); + if (IS_ERR_OR_NULL(sync_file)) { + dprintk(SYNX_ERR, "error creating sync file\n"); + goto err; + } + + fd_install(fd, sync_file->file); + return fd; + +err: + put_unused_fd(fd); + return -SYNX_INVALID; +} + +void *synx_get_fence(struct synx_session *session, + u32 h_synx) +{ + struct synx_client *client; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + struct dma_fence *fence = NULL; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return NULL; + + synx_data = synx_util_acquire_handle(client, h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj) || + IS_ERR_OR_NULL(synx_obj->fence)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, h_synx); + goto fail; + } + + mutex_lock(&synx_obj->obj_lock); + fence = synx_obj->fence; + /* obtain an additional reference to the fence */ + dma_fence_get(fence); + mutex_unlock(&synx_obj->obj_lock); + +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return fence; +} +EXPORT_SYMBOL(synx_get_fence); + +static int synx_native_check_bind(struct synx_client *client, + struct synx_create_params *params) +{ + int rc; + u32 h_synx; + struct synx_entry_64 *ext_entry; + struct synx_map_entry *entry; + + if (IS_ERR_OR_NULL(params->fence)) + return -SYNX_INVALID; + + ext_entry = synx_util_retrieve_data(params->fence, + synx_util_map_params_to_type(params->flags)); + if (IS_ERR_OR_NULL(ext_entry)) + return -SYNX_NOENT; + + h_synx = ext_entry->data[0]; + synx_util_remove_data(params->fence, + synx_util_map_params_to_type(params->flags)); + + entry = synx_util_get_map_entry(h_synx); + if (IS_ERR_OR_NULL(entry)) + /* possible cleanup, retry to alloc new handle */ + return -SYNX_NOENT; + + rc = synx_util_init_handle(client, entry->synx_obj, + &h_synx, entry); + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "[sess :%llu] new handle init failed\n", + client->id); + goto fail; + } + + *params->h_synx = h_synx; + return SYNX_SUCCESS; + +fail: + synx_util_release_map_entry(entry); + return rc; +} + +static int synx_native_create_core(struct synx_client *client, + struct synx_create_params *params) +{ + int rc; + struct synx_coredata *synx_obj; + struct synx_map_entry *map_entry; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->h_synx)) + return -SYNX_INVALID; + + synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_obj)) + return -SYNX_NOMEM; + + rc = synx_util_init_coredata(synx_obj, params, + &synx_fence_ops, client->dma_context); + if (rc) { + dprintk(SYNX_ERR, + "[sess :%llu] handle allocation failed\n", + client->id); + kfree(synx_obj); + goto fail; + } + + map_entry = synx_util_insert_to_map(synx_obj, + *params->h_synx, 0); + if (IS_ERR_OR_NULL(map_entry)) { + rc = PTR_ERR(map_entry); + synx_util_put_object(synx_obj); + goto fail; + } + + rc = synx_util_add_callback(synx_obj, *params->h_synx); + if (rc != SYNX_SUCCESS) { + synx_util_release_map_entry(map_entry); + goto fail; + } + + rc = synx_util_init_handle(client, synx_obj, + params->h_synx, map_entry); + if (rc < 0) { + dprintk(SYNX_ERR, + "[sess :%llu] unable to init new handle\n", + client->id); + synx_util_release_map_entry(map_entry); + goto fail; + } + + dprintk(SYNX_MEM, + "[sess :%llu] allocated %u, core %pK, fence %pK\n", + client->id, *params->h_synx, synx_obj, synx_obj->fence); + return SYNX_SUCCESS; + +fail: + return rc; +} + +int synx_create(struct synx_session *session, + struct synx_create_params *params) +{ + int rc = -SYNX_NOENT; + struct synx_client *client; + struct synx_external_desc_v2 ext_desc = {0}; + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->h_synx) || + params->flags > SYNX_CREATE_MAX_FLAGS) { + dprintk(SYNX_ERR, "invalid create arguments\n"); + return -SYNX_INVALID; + } + + if (params->flags & SYNX_CREATE_DMA_FENCE) { + dprintk(SYNX_ERR, + "handle create with native fence not supported\n"); + return -SYNX_NOSUPPORT; + } + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + *params->h_synx = 0; + + do { + /* create with external fence */ + if (!IS_ERR_OR_NULL(params->fence)) + rc = synx_native_check_bind(client, params); + + if (rc == -SYNX_NOENT) { + rc = synx_native_create_core(client, params); + if (rc == SYNX_SUCCESS && + !IS_ERR_OR_NULL(params->fence)) { + /* save external fence details */ + rc = synx_util_save_data(params->fence, + synx_util_map_params_to_type(params->flags), + *params->h_synx); + if (rc == -SYNX_ALREADY) { + /* + * raced with create on same fence from + * another client. clear the allocated + * handle and retry. + */ + synx_native_release_core(client, *params->h_synx); + *params->h_synx = 0; + rc = -SYNX_NOENT; + continue; + } else if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "allocating handle failed=%d", rc); + synx_native_release_core(client, *params->h_synx); + break; + } + + /* bind with external fence */ + ext_desc.id = *((u32 *)params->fence); + ext_desc.type = synx_util_map_params_to_type(params->flags); + rc = synx_bind(session, *params->h_synx, ext_desc); + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "[sess :%llu] bind external fence failed\n", + client->id); + synx_native_release_core(client, *params->h_synx); + goto fail; + } + } + } + + if (rc == SYNX_SUCCESS) + dprintk(SYNX_VERB, + "[sess :%llu] handle allocated %u\n", + client->id, *params->h_synx); + + break; + } while (true); + +fail: + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_create); + +int synx_native_signal_core(struct synx_coredata *synx_obj, + u32 status, + bool cb_signal, + u64 ext_sync_id) +{ + int rc = 0; + int ret; + u32 i = 0; + u32 idx = 0; + s32 sync_id; + u32 type; + void *data = NULL; + struct synx_bind_desc bind_descs[SYNX_MAX_NUM_BINDINGS]; + struct bind_operations *bind_ops = NULL; + + if (IS_ERR_OR_NULL(synx_obj)) + return -SYNX_INVALID; + + synx_util_callback_dispatch(synx_obj, status); + + /* + * signal the external bound sync obj/s even if fence signal fails, + * w/ error signal state (set above) to prevent deadlock + */ + if (synx_obj->num_bound_synxs > 0) { + memset(bind_descs, 0, + sizeof(struct synx_bind_desc) * SYNX_MAX_NUM_BINDINGS); + for (i = 0; i < synx_obj->num_bound_synxs; i++) { + /* signal invoked by external sync obj */ + if (cb_signal && + (ext_sync_id == + synx_obj->bound_synxs[i].external_desc.id)) { + dprintk(SYNX_VERB, + "skipping signaling inbound sync: %llu\n", + ext_sync_id); + type = synx_obj->bound_synxs[i].external_desc.type; + memset(&synx_obj->bound_synxs[i], 0, + sizeof(struct synx_bind_desc)); + /* clear the hash table entry */ + synx_util_remove_data(&ext_sync_id, type); + continue; + } + memcpy(&bind_descs[idx++], + &synx_obj->bound_synxs[i], + sizeof(struct synx_bind_desc)); + /* clear the memory, its been backed up above */ + memset(&synx_obj->bound_synxs[i], 0, + sizeof(struct synx_bind_desc)); + } + synx_obj->num_bound_synxs = 0; + } + + for (i = 0; i < idx; i++) { + sync_id = bind_descs[i].external_desc.id; + data = bind_descs[i].external_data; + type = bind_descs[i].external_desc.type; + bind_ops = synx_util_get_bind_ops(type); + if (IS_ERR_OR_NULL(bind_ops)) { + dprintk(SYNX_ERR, + "invalid bind ops for type: %u\n", type); + kfree(data); + continue; + } + + /* clear the hash table entry */ + synx_util_remove_data(&sync_id, type); + + /* + * we are already signaled, so don't want to + * recursively be signaled + */ + ret = bind_ops->deregister_callback( + synx_external_callback, data, sync_id); + if (ret < 0) { + dprintk(SYNX_ERR, + "deregistration fail on %d, type: %u, err=%d\n", + sync_id, type, ret); + continue; + } + dprintk(SYNX_VERB, + "signal external sync: %d, type: %u, status: %u\n", + sync_id, type, status); + /* optional function to enable external signaling */ + if (bind_ops->enable_signaling) { + ret = bind_ops->enable_signaling(sync_id); + if (ret < 0) + dprintk(SYNX_ERR, + "enabling fail on %d, type: %u, err=%d\n", + sync_id, type, ret); + } + ret = bind_ops->signal(sync_id, status); + if (ret < 0) + dprintk(SYNX_ERR, + "signaling fail on %d, type: %u, err=%d\n", + sync_id, type, ret); + /* + * release the memory allocated for external data. + * It is safe to release this memory as external cb + * has been already deregistered before this. + */ + kfree(data); + } + + return rc; +} + +int synx_native_signal_fence(struct synx_coredata *synx_obj, + u32 status) +{ + int rc = 0; + unsigned long flags; + + if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence)) + return -SYNX_INVALID; + + if (status <= SYNX_STATE_ACTIVE) { + dprintk(SYNX_ERR, "signaling with wrong status: %u\n", + status); + return -SYNX_INVALID; + } + + if (synx_util_is_merged_object(synx_obj)) { + dprintk(SYNX_ERR, "signaling a composite handle\n"); + return -SYNX_INVALID; + } + + if (synx_util_get_object_status(synx_obj) != + SYNX_STATE_ACTIVE) + return -SYNX_ALREADY; + + if (IS_ERR_OR_NULL(synx_obj->signal_cb)) { + dprintk(SYNX_ERR, "signal cb in bad state\n"); + return -SYNX_INVALID; + } + + /* + * remove registered callback for the fence + * so it does not invoke the signal through callback again + */ + if (!dma_fence_remove_callback(synx_obj->fence, + &synx_obj->signal_cb->fence_cb)) { + dprintk(SYNX_ERR, "callback could not be removed\n"); + return -SYNX_INVALID; + } + + dprintk(SYNX_MEM, "signal cb destroyed %pK\n", + synx_obj->signal_cb); + kfree(synx_obj->signal_cb); + synx_obj->signal_cb = NULL; + + /* releasing reference held by signal cb */ + synx_util_put_object(synx_obj); + + spin_lock_irqsave(synx_obj->fence->lock, flags); + /* check the status again acquiring lock to avoid errors */ + if (synx_util_get_object_status_locked(synx_obj) != + SYNX_STATE_ACTIVE) { + spin_unlock_irqrestore(synx_obj->fence->lock, flags); + return -SYNX_ALREADY; + } + + /* set fence error to model {signal w/ error} */ + if (status != SYNX_STATE_SIGNALED_SUCCESS) + dma_fence_set_error(synx_obj->fence, -status); + + rc = dma_fence_signal_locked(synx_obj->fence); + if (rc) + dprintk(SYNX_ERR, + "signaling fence %pK failed=%d\n", + synx_obj->fence, rc); + spin_unlock_irqrestore(synx_obj->fence->lock, flags); + + return rc; +} + +void synx_signal_handler(struct work_struct *cb_dispatch) +{ + int rc = SYNX_SUCCESS; + u32 idx; + struct synx_signal_cb *signal_cb = + container_of(cb_dispatch, struct synx_signal_cb, cb_dispatch); + struct synx_coredata *synx_obj = signal_cb->synx_obj; + + u32 h_synx = signal_cb->handle; + u32 status = signal_cb->status; + + if ((signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) && + (synx_util_is_global_handle(h_synx) || + synx_util_is_global_object(synx_obj))) { + idx = (IS_ERR_OR_NULL(synx_obj)) ? + synx_util_global_idx(h_synx) : + synx_obj->global_idx; + rc = synx_global_update_status(idx, status); + if (rc != SYNX_SUCCESS) + dprintk(SYNX_ERR, + "global status update of %u failed=%d\n", + h_synx, rc); + synx_global_put_ref(idx); + } + + /* + * when invoked from external callback, possible for + * all local clients to have released the handle coredata. + */ + if (IS_ERR_OR_NULL(synx_obj)) { + dprintk(SYNX_WARN, + "handle %d has no local clients\n", + h_synx); + dprintk(SYNX_MEM, "signal cb destroyed %pK\n", + signal_cb); + kfree(signal_cb); + return; + } + + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "global status update for %u failed=%d\n", + h_synx, rc); + goto fail; + } + + mutex_lock(&synx_obj->obj_lock); + + if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) + rc = synx_native_signal_fence(synx_obj, status); + + if (rc == SYNX_SUCCESS) + rc = synx_native_signal_core(synx_obj, status, + (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? + true : false, signal_cb->ext_sync_id); + + mutex_unlock(&synx_obj->obj_lock); + + if (rc != SYNX_SUCCESS) + dprintk(SYNX_ERR, + "internal signaling %u failed=%d", + h_synx, rc); + +fail: + /* release reference held by signal cb */ + synx_util_put_object(synx_obj); + dprintk(SYNX_MEM, "signal cb destroyed %pK\n", signal_cb); + kfree(signal_cb); + dprintk(SYNX_VERB, "signal handle %u dispatch complete=%d", + h_synx, rc); +} + +/* function would be called from atomic context */ +void synx_fence_callback(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct synx_signal_cb *signal_cb = + container_of(cb, struct synx_signal_cb, fence_cb); + + dprintk(SYNX_DBG, + "callback from external fence %pK for handle %u\n", + fence, signal_cb->handle); + + /* other signal_cb members would be set during cb registration */ + signal_cb->status = dma_fence_get_status_locked(fence); + + INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler); + queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch); +} +EXPORT_SYMBOL(synx_fence_callback); + +static int synx_signal_offload_job( + struct synx_client *client, + struct synx_coredata *synx_obj, + u32 h_synx, u32 status) +{ + int rc = SYNX_SUCCESS; + struct synx_signal_cb *signal_cb; + + signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC); + if (IS_ERR_OR_NULL(signal_cb)) { + rc = -SYNX_NOMEM; + goto fail; + } + + /* + * since the signal will be queued to separate thread, + * to ensure the synx coredata pointer remain valid, get + * additional reference, thus avoiding any potential + * use-after-free. + */ + synx_util_get_object(synx_obj); + + signal_cb->handle = h_synx; + signal_cb->status = status; + signal_cb->synx_obj = synx_obj; + signal_cb->flag = SYNX_SIGNAL_FROM_CLIENT; + + dprintk(SYNX_VERB, + "[sess :%llu] signal work queued for %u\n", + client->id, h_synx); + + INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler); + queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch); + +fail: + return rc; +} + +int synx_signal(struct synx_session *session, u32 h_synx, u32 status) +{ + int rc = SYNX_SUCCESS; + struct synx_client *client; + struct synx_handle_coredata *synx_data = NULL; + struct synx_coredata *synx_obj; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + if (status <= SYNX_STATE_ACTIVE) { + dprintk(SYNX_ERR, + "[sess :%llu] signaling with wrong status: %u\n", + client->id, status); + rc = -SYNX_INVALID; + goto fail; + } + + synx_data = synx_util_acquire_handle(client, h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj) || + IS_ERR_OR_NULL(synx_obj->fence)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, h_synx); + rc = -SYNX_INVALID; + goto fail; + } + + if (synx_util_is_global_handle(h_synx) || + synx_util_is_global_object(synx_obj)) + rc = synx_global_update_status( + synx_obj->global_idx, status); + + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "[sess :%llu] status update %d failed=%d\n", + client->id, h_synx, rc); + goto fail; + } + + /* + * offload callback dispatch and external fence + * notification to separate worker thread, if any. + */ + if (synx_obj->num_bound_synxs || + !list_empty(&synx_obj->reg_cbs_list)) + rc = synx_signal_offload_job(client, synx_obj, + h_synx, status); + + mutex_lock(&synx_obj->obj_lock); + rc = synx_native_signal_fence(synx_obj, status); + if (rc != SYNX_SUCCESS) + dprintk(SYNX_ERR, + "[sess :%llu] signaling %u failed=%d\n", + client->id, h_synx, rc); + mutex_unlock(&synx_obj->obj_lock); + +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_signal); + +static int synx_match_payload(struct synx_kernel_payload *cb_payload, + struct synx_kernel_payload *payload) +{ + int rc = 0; + + if (IS_ERR_OR_NULL(cb_payload) || IS_ERR_OR_NULL(payload)) + return -SYNX_INVALID; + + if ((cb_payload->cb_func == payload->cb_func) && + (cb_payload->data == payload->data)) { + if (payload->cancel_cb_func) { + cb_payload->cb_func = + payload->cancel_cb_func; + rc = 1; + } else { + rc = 2; + dprintk(SYNX_VERB, + "kernel cb de-registration success\n"); + } + } + + return rc; +} + +int synx_async_wait(struct synx_session *session, + struct synx_callback_params *params) +{ + int rc = 0; + u32 idx; + u32 status; + struct synx_client *client; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + struct synx_cb_data *synx_cb; + struct synx_kernel_payload payload; + + if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params)) + return -SYNX_INVALID; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + synx_data = synx_util_acquire_handle(client, params->h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, params->h_synx); + rc = -SYNX_INVALID; + goto fail; + } + + mutex_lock(&synx_obj->obj_lock); + if (synx_util_is_merged_object(synx_obj)) { + dprintk(SYNX_ERR, + "[sess :%llu] cannot async wait on merged handle %u\n", + client->id, params->h_synx); + rc = -SYNX_INVALID; + goto release; + } + + synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC); + if (IS_ERR_OR_NULL(synx_cb)) { + rc = -SYNX_NOMEM; + goto release; + } + + payload.h_synx = params->h_synx; + payload.cb_func = params->cb_func; + payload.data = params->userdata; + + /* allocate a free index from client cb table */ + rc = synx_util_alloc_cb_entry(client, &payload, &idx); + if (rc) { + dprintk(SYNX_ERR, + "[sess :%llu] error allocating cb entry\n", + client->id); + kfree(synx_cb); + goto release; + } + + if (synx_util_is_global_handle(params->h_synx) || + synx_util_is_global_object(synx_obj)) + status = synx_global_test_status_set_wait( + synx_util_global_idx(params->h_synx), + SYNX_CORE_APSS); + else + status = synx_util_get_object_status(synx_obj); + + synx_cb->session = session; + synx_cb->idx = idx; + INIT_WORK(&synx_cb->cb_dispatch, synx_util_cb_dispatch); + + /* add callback if object still ACTIVE, dispatch if SIGNALED */ + if (status == SYNX_STATE_ACTIVE) { + dprintk(SYNX_VERB, + "[sess :%llu] callback added for handle %u\n", + client->id, params->h_synx); + list_add(&synx_cb->node, &synx_obj->reg_cbs_list); + } else { + synx_cb->status = status; + dprintk(SYNX_VERB, + "[sess :%llu] callback queued for handle %u\n", + client->id, params->h_synx); + queue_work(synx_dev->wq_cb, + &synx_cb->cb_dispatch); + } + +release: + mutex_unlock(&synx_obj->obj_lock); +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_async_wait); + +int synx_cancel_async_wait( + struct synx_session *session, + struct synx_callback_params *params) +{ + int rc = 0, ret = 0; + u32 status; + bool match_found = false; + struct synx_client *client; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + struct synx_kernel_payload payload; + struct synx_cb_data *synx_cb, *synx_cb_temp; + struct synx_client_cb *cb_payload; + + if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params)) + return -SYNX_INVALID; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + synx_data = synx_util_acquire_handle(client, params->h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, params->h_synx); + rc = -SYNX_INVALID; + goto fail; + } + + mutex_lock(&synx_obj->obj_lock); + if (synx_util_is_merged_object(synx_obj) || + synx_util_is_external_object(synx_obj)) { + dprintk(SYNX_ERR, + "cannot cancel wait on composite handle\n"); + goto release; + } + + payload.h_synx = params->h_synx; + payload.cb_func = params->cb_func; + payload.data = params->userdata; + payload.cancel_cb_func = params->cancel_cb_func; + + status = synx_util_get_object_status(synx_obj); + if (status != SYNX_STATE_ACTIVE) { + dprintk(SYNX_ERR, + "handle %u already signaled cannot cancel\n", + params->h_synx); + rc = -SYNX_INVALID; + goto release; + } + + status = SYNX_CALLBACK_RESULT_CANCELED; + /* remove all cb payloads mayching the deregister call */ + list_for_each_entry_safe(synx_cb, synx_cb_temp, + &synx_obj->reg_cbs_list, node) { + if (synx_cb->session != session) { + continue; + } else if (synx_cb->idx == 0 || + synx_cb->idx >= SYNX_MAX_OBJS) { + /* + * this should not happen. Even if it does, + * the allocated memory will be cleaned up + * when object is destroyed, preventing any + * memory leaks. + */ + dprintk(SYNX_ERR, + "[sess :%llu] invalid callback data\n", + client->id); + continue; + } + + cb_payload = &client->cb_table[synx_cb->idx]; + ret = synx_match_payload(&cb_payload->kernel_cb, &payload); + switch (ret) { + case 1: + /* queue the cancel cb work */ + list_del_init(&synx_cb->node); + synx_cb->status = status; + queue_work(synx_dev->wq_cb, + &synx_cb->cb_dispatch); + match_found = true; + break; + case 2: + /* no cancellation cb */ + if (synx_util_clear_cb_entry(client, cb_payload)) + dprintk(SYNX_ERR, + "[sess :%llu] error clearing cb %u\n", + client->id, params->h_synx); + list_del_init(&synx_cb->node); + kfree(synx_cb); + match_found = true; + break; + default: + break; + } + } + + if (!match_found) + rc = -SYNX_INVALID; + +release: + mutex_unlock(&synx_obj->obj_lock); +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_cancel_async_wait); + +int synx_merge(struct synx_session *session, + struct synx_merge_params *params) +{ + int rc, i, j = 0; + u32 h_child; + u32 count = 0; + u32 *h_child_list; + struct synx_client *client; + struct dma_fence **fences = NULL; + struct synx_coredata *synx_obj; + struct synx_map_entry *map_entry; + + if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params)) + return -SYNX_INVALID; + + if (IS_ERR_OR_NULL(params->h_synxs) || + IS_ERR_OR_NULL(params->h_merged_obj)) { + dprintk(SYNX_ERR, "invalid arguments\n"); + return -SYNX_INVALID; + } + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + rc = synx_util_validate_merge(client, params->h_synxs, + params->num_objs, &fences, &count); + if (rc < 0) { + dprintk(SYNX_ERR, + "[sess :%llu] merge validation failed\n", + client->id); + rc = -SYNX_INVALID; + goto fail; + } + + synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_obj)) { + rc = -SYNX_NOMEM; + goto fail; + } + + rc = synx_util_init_group_coredata(synx_obj, fences, + params, count, client->dma_context); + if (rc) { + dprintk(SYNX_ERR, + "[sess :%llu] error initializing merge handle\n", + client->id); + goto clean_up; + } + + map_entry = synx_util_insert_to_map(synx_obj, + *params->h_merged_obj, 0); + if (IS_ERR_OR_NULL(map_entry)) { + rc = PTR_ERR(map_entry); + goto clean_up; + } + + rc = synx_util_init_handle(client, synx_obj, + params->h_merged_obj, map_entry); + if (rc) { + dprintk(SYNX_ERR, + "[sess :%llu] unable to init merge handle %u\n", + client->id, *params->h_merged_obj); + dma_fence_put(synx_obj->fence); + goto clear; + } + + if (params->flags & SYNX_MERGE_GLOBAL_FENCE) { + h_child_list = kzalloc(count*4, GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_obj)) { + rc = -SYNX_NOMEM; + goto clear; + } + + for (i = 0; i < count; i++) { + h_child = synx_util_get_fence_entry((u64)fences[i], 1); + if (!synx_util_is_global_handle(h_child)) + continue; + + h_child_list[j++] = synx_util_global_idx(h_child); + } + + rc = synx_global_merge(h_child_list, j, + synx_util_global_idx(*params->h_merged_obj)); + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, "global merge failed\n"); + goto clear; + } + } + + dprintk(SYNX_MEM, + "[sess :%llu] merge allocated %u, core %pK, fence %pK\n", + client->id, *params->h_merged_obj, synx_obj, + synx_obj->fence); + synx_put_client(client); + return SYNX_SUCCESS; + +clear: + synx_util_release_map_entry(map_entry); +clean_up: + kfree(synx_obj); +fail: + synx_util_merge_error(client, params->h_synxs, count); + if (params->num_objs && params->num_objs <= count) + kfree(fences); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_merge); + +int synx_native_release_core(struct synx_client *client, + u32 h_synx) +{ + int rc = -SYNX_INVALID; + struct synx_handle_coredata *curr, *synx_handle = NULL; + + spin_lock_bh(&client->handle_map_lock); + hash_for_each_possible(client->handle_map, + curr, node, h_synx) { + if (curr->key == h_synx && + curr->rel_count != 0) { + curr->rel_count--; + synx_handle = curr; + rc = SYNX_SUCCESS; + break; + } + } + spin_unlock_bh(&client->handle_map_lock); + + /* release the reference obtained at synx creation */ + synx_util_release_handle(synx_handle); + + return rc; +} + +int synx_release(struct synx_session *session, u32 h_synx) +{ + int rc = 0; + struct synx_client *client; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + rc = synx_native_release_core(client, h_synx); + + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_release); + +int synx_wait(struct synx_session *session, + u32 h_synx, u64 timeout_ms) +{ + int rc = 0; + unsigned long timeleft; + struct synx_client *client; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + synx_data = synx_util_acquire_handle(client, h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, h_synx); + rc = -SYNX_INVALID; + goto fail; + } + + if (synx_util_is_global_handle(h_synx)) { + rc = synx_global_test_status_set_wait( + synx_util_global_idx(h_synx), SYNX_CORE_APSS); + if (rc != SYNX_STATE_ACTIVE) + goto fail; + } + + timeleft = dma_fence_wait_timeout(synx_obj->fence, (bool) 0, + msecs_to_jiffies(timeout_ms)); + if (timeleft <= 0) { + dprintk(SYNX_ERR, + "[sess :%llu] wait timeout for handle %u\n", + client->id, h_synx); + rc = -ETIMEDOUT; + goto fail; + } + + mutex_lock(&synx_obj->obj_lock); + rc = synx_util_get_object_status(synx_obj); + mutex_unlock(&synx_obj->obj_lock); + +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_wait); + +int synx_bind(struct synx_session *session, + u32 h_synx, + struct synx_external_desc_v2 external_sync) +{ + int rc = 0; + u32 i; + u32 bound_idx; + struct synx_client *client; + struct synx_handle_coredata *synx_data = NULL; + struct synx_coredata *synx_obj; + struct synx_signal_cb *data = NULL; + struct bind_operations *bind_ops = NULL; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + synx_data = synx_util_acquire_handle(client, h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj)) { + if (rc || synx_data) + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, h_synx); + goto fail; + } + + bind_ops = synx_util_get_bind_ops(external_sync.type); + if (IS_ERR_OR_NULL(bind_ops)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid bind ops for %u\n", + client->id, external_sync.type); + rc = -SYNX_INVALID; + goto fail; + } + + mutex_lock(&synx_obj->obj_lock); + if (synx_util_is_merged_object(synx_obj)) { + dprintk(SYNX_ERR, + "[sess :%llu] cannot bind to composite handle %u\n", + client->id, h_synx); + rc = -SYNX_INVALID; + goto release; + } + + if (synx_obj->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) { + dprintk(SYNX_ERR, + "[sess :%llu] max bindings reached for handle %u\n", + client->id, h_synx); + rc = -SYNX_NOMEM; + goto release; + } + + /* don't bind external sync obj if already done */ + for (i = 0; i < synx_obj->num_bound_synxs; i++) { + if ((external_sync.id == + synx_obj->bound_synxs[i].external_desc.id) && + (external_sync.type == + synx_obj->bound_synxs[i].external_desc.type)){ + dprintk(SYNX_ERR, + "[sess :%llu] duplicate bind for sync %llu\n", + client->id, external_sync.id); + rc = -SYNX_ALREADY; + goto release; + } + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (IS_ERR_OR_NULL(data)) { + rc = -SYNX_NOMEM; + goto release; + } + + /* get additional reference since passing pointer to cb */ + synx_util_get_object(synx_obj); + + /* data passed to external callback */ + data->handle = h_synx; + data->synx_obj = synx_obj; + + bound_idx = synx_obj->num_bound_synxs; + memcpy(&synx_obj->bound_synxs[bound_idx], + &external_sync, sizeof(struct synx_external_desc_v2)); + synx_obj->bound_synxs[bound_idx].external_data = data; + synx_obj->num_bound_synxs++; + mutex_unlock(&synx_obj->obj_lock); + + rc = bind_ops->register_callback(synx_external_callback, + data, external_sync.id); + if (rc) { + dprintk(SYNX_ERR, + "[sess :%llu] callback reg failed for %llu\n", + client->id, external_sync.id); + mutex_lock(&synx_obj->obj_lock); + memset(&synx_obj->bound_synxs[bound_idx], 0, + sizeof(struct synx_external_desc_v2)); + synx_obj->num_bound_synxs--; + mutex_unlock(&synx_obj->obj_lock); + synx_util_put_object(synx_obj); + kfree(data); + goto fail; + } + + synx_util_release_handle(synx_data); + dprintk(SYNX_DBG, + "[sess :%llu] ext sync %llu bound to handle %u\n", + client->id, external_sync.id, h_synx); + synx_put_client(client); + return SYNX_SUCCESS; + +release: + mutex_unlock(&synx_obj->obj_lock); +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_bind); + +int synx_get_status(struct synx_session *session, + u32 h_synx) +{ + int rc = 0; + struct synx_client *client; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + synx_data = synx_util_acquire_handle(client, h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj) || + IS_ERR_OR_NULL(synx_obj->fence)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle access %u\n", + client->id, h_synx); + rc = -SYNX_INVALID; + goto fail; + } + + if (synx_util_is_global_handle(h_synx)) { + rc = synx_global_get_status( + synx_util_global_idx(h_synx)); + if (rc != SYNX_STATE_ACTIVE) { + dprintk(SYNX_VERB, + "[sess :%llu] handle %u in status %d\n", + client->id, h_synx, rc); + goto fail; + } + } + + mutex_lock(&synx_obj->obj_lock); + rc = synx_util_get_object_status(synx_obj); + mutex_unlock(&synx_obj->obj_lock); + dprintk(SYNX_VERB, + "[sess :%llu] handle %u status %d\n", + client->id, h_synx, rc); + +fail: + synx_util_release_handle(synx_data); + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_get_status); + +static struct synx_map_entry *synx_handle_conversion( + struct synx_client *client, + u32 *h_synx, struct synx_map_entry *old_entry) +{ + int rc; + struct synx_map_entry *map_entry = NULL; + struct synx_coredata *synx_obj; + + if (IS_ERR_OR_NULL(old_entry)) { + old_entry = synx_util_get_map_entry(*h_synx); + if (IS_ERR_OR_NULL(old_entry)) { + rc = PTR_ERR(old_entry); + dprintk(SYNX_ERR, + "invalid import handle %u err=%d", + *h_synx, rc); + return old_entry; + } + } + + synx_obj = old_entry->synx_obj; + BUG_ON(synx_obj == NULL); + + mutex_lock(&synx_obj->obj_lock); + synx_util_get_object(synx_obj); + if (synx_obj->global_idx != 0) { + *h_synx = synx_encode_handle( + synx_obj->global_idx, SYNX_CORE_APSS, true); + + map_entry = synx_util_get_map_entry(*h_synx); + if (IS_ERR_OR_NULL(map_entry)) { + /* raced with release from last global client */ + map_entry = synx_util_insert_to_map(synx_obj, + *h_synx, 0); + if (IS_ERR_OR_NULL(map_entry)) { + rc = PTR_ERR(map_entry); + dprintk(SYNX_ERR, + "addition of %u to map failed=%d", + *h_synx, rc); + } + } + } else { + rc = synx_alloc_global_handle(h_synx); + if (rc == SYNX_SUCCESS) { + synx_obj->global_idx = + synx_util_global_idx(*h_synx); + synx_obj->type |= SYNX_CREATE_GLOBAL_FENCE; + + map_entry = synx_util_insert_to_map(synx_obj, + *h_synx, 0); + if (IS_ERR_OR_NULL(map_entry)) { + rc = PTR_ERR(map_entry); + synx_global_put_ref( + synx_util_global_idx(*h_synx)); + dprintk(SYNX_ERR, + "insertion of %u to map failed=%d", + *h_synx, rc); + } + } + } + mutex_unlock(&synx_obj->obj_lock); + + if (IS_ERR_OR_NULL(map_entry)) + synx_util_put_object(synx_obj); + + synx_util_release_map_entry(old_entry); + return map_entry; +} + +static int synx_native_import_handle(struct synx_client *client, + struct synx_import_indv_params *params) +{ + int rc = SYNX_SUCCESS; + u32 h_synx, core_id; + struct synx_map_entry *map_entry, *old_entry; + struct synx_coredata *synx_obj; + struct synx_handle_coredata *synx_data = NULL, *curr; + char name[SYNX_OBJ_NAME_LEN] = {0}; + struct synx_create_params c_params = {0}; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->fence) || + IS_ERR_OR_NULL(params->new_h_synx)) + return -SYNX_INVALID; + + h_synx = *((u32 *)params->fence); + + /* check if already mapped to client */ + spin_lock_bh(&client->handle_map_lock); + hash_for_each_possible(client->handle_map, + curr, node, h_synx) { + if (curr->key == h_synx && + curr->rel_count != 0 && + (synx_util_is_global_handle(h_synx) || + params->flags & SYNX_IMPORT_LOCAL_FENCE)) { + curr->rel_count++; + kref_get(&curr->refcount); + synx_data = curr; + break; + } + } + spin_unlock_bh(&client->handle_map_lock); + + if (synx_data) { + *params->new_h_synx = h_synx; + return SYNX_SUCCESS; + } + + map_entry = synx_util_get_map_entry(h_synx); + if (IS_ERR_OR_NULL(map_entry)) { + core_id = (h_synx & SYNX_OBJ_CORE_ID_MASK) + >> SYNX_HANDLE_INDEX_BITS; + if (core_id == SYNX_CORE_APSS) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid import handle %u\n", + client->id, h_synx); + return -SYNX_INVALID; + } else if (synx_util_is_global_handle(h_synx)) { + /* import global handle created in another core */ + synx_util_map_import_params_to_create(params, &c_params); + scnprintf(name, SYNX_OBJ_NAME_LEN, "import-client-%d", + current->pid); + c_params.name = name; + c_params.h_synx = &h_synx; + + rc = synx_native_create_core(client, &c_params); + if (rc != SYNX_SUCCESS) + return rc; + + *params->new_h_synx = h_synx; + return SYNX_SUCCESS; + } + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle %u\n", + client->id, h_synx); + return -SYNX_INVALID; + } + + synx_obj = map_entry->synx_obj; + BUG_ON(synx_obj == NULL); + + if ((params->flags & SYNX_IMPORT_GLOBAL_FENCE) && + !synx_util_is_global_handle(h_synx)) { + old_entry = map_entry; + map_entry = synx_handle_conversion(client, &h_synx, + old_entry); + } + + if (rc != SYNX_SUCCESS) + return rc; + + *params->new_h_synx = h_synx; + + rc = synx_util_init_handle(client, map_entry->synx_obj, + params->new_h_synx, map_entry); + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "[sess :%llu] init of imported handle %u failed=%d\n", + client->id, h_synx, rc); + synx_util_release_map_entry(map_entry); + } + + return rc; +} + +static int synx_native_import_fence(struct synx_client *client, + struct synx_import_indv_params *params) +{ + int rc = SYNX_SUCCESS; + u32 curr_h_synx; + u32 global; + struct synx_create_params c_params = {0}; + char name[SYNX_OBJ_NAME_LEN] = {0}; + struct synx_fence_entry *entry; + struct synx_map_entry *map_entry = NULL; + struct synx_handle_coredata *synx_data = NULL, *curr; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->fence) || + IS_ERR_OR_NULL(params->new_h_synx)) + return -SYNX_INVALID; + + global = SYNX_IMPORT_GLOBAL_FENCE & params->flags; + +retry: + *params->new_h_synx = + synx_util_get_fence_entry((u64)params->fence, global); + if (*params->new_h_synx == 0) { + /* create a new synx obj and add to fence map */ + synx_util_map_import_params_to_create(params, &c_params); + scnprintf(name, SYNX_OBJ_NAME_LEN, "import-client-%d", + current->pid); + c_params.name = name; + c_params.h_synx = params->new_h_synx; + c_params.fence = params->fence; + + rc = synx_native_create_core(client, &c_params); + if (rc != SYNX_SUCCESS) + return rc; + + curr_h_synx = *params->new_h_synx; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (IS_ERR_OR_NULL(entry)) { + rc = -SYNX_NOMEM; + curr_h_synx = *c_params.h_synx; + goto fail; + } + + do { + entry->key = (u64)params->fence; + if (global) + entry->g_handle = *params->new_h_synx; + else + entry->l_handle = *params->new_h_synx; + + rc = synx_util_insert_fence_entry(entry, + params->new_h_synx, global); + if (rc == SYNX_SUCCESS) { + dprintk(SYNX_DBG, + "mapped fence %pK to new handle %u\n", + params->fence, *params->new_h_synx); + break; + } else if (rc == -SYNX_ALREADY) { + /* + * release the new handle allocated + * and use the available handle + * already mapped instead. + */ + map_entry = synx_util_get_map_entry( + *params->new_h_synx); + if (IS_ERR_OR_NULL(map_entry)) { + /* race with fence release, need to retry */ + dprintk(SYNX_DBG, + "re-attempting handle import\n"); + *params->new_h_synx = curr_h_synx; + continue; + } + + rc = synx_util_init_handle(client, + map_entry->synx_obj, + params->new_h_synx, map_entry); + + dprintk(SYNX_DBG, "mapped fence %pK to handle %u\n", + params->fence, *params->new_h_synx); + goto release; + } else { + dprintk(SYNX_ERR, + "importing fence %pK failed, err=%d\n", + params->fence, rc); + goto release; + } + } while (true); + } else { + /* check if already mapped to client */ + spin_lock_bh(&client->handle_map_lock); + hash_for_each_possible(client->handle_map, + curr, node, *params->new_h_synx) { + if (curr->key == *params->new_h_synx && + curr->rel_count != 0) { + curr->rel_count++; + kref_get(&curr->refcount); + synx_data = curr; + break; + } + } + spin_unlock_bh(&client->handle_map_lock); + + if (synx_data) { + dprintk(SYNX_DBG, "mapped fence %pK to handle %u\n", + params->fence, *params->new_h_synx); + return SYNX_SUCCESS; + } + + if (global && !synx_util_is_global_handle( + *params->new_h_synx)) + map_entry = synx_handle_conversion(client, + params->new_h_synx, NULL); + else + map_entry = synx_util_get_map_entry( + *params->new_h_synx); + + if (IS_ERR_OR_NULL(map_entry)) { + /* race with fence release, need to retry */ + dprintk(SYNX_DBG, "re-attempting handle import\n"); + goto retry; + } + + rc = synx_util_init_handle(client, map_entry->synx_obj, + params->new_h_synx, map_entry); + + dprintk(SYNX_DBG, "mapped fence %pK to existing handle %u\n", + params->fence, *params->new_h_synx); + } + + return rc; + +release: + kfree(entry); +fail: + synx_native_release_core(client, curr_h_synx); + return rc; +} + +static int synx_native_import_indv(struct synx_client *client, + struct synx_import_indv_params *params) +{ + int rc = -SYNX_INVALID; + + if (IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->new_h_synx) || + IS_ERR_OR_NULL(params->fence)) { + dprintk(SYNX_ERR, "invalid import arguments\n"); + return -SYNX_INVALID; + } + + if (likely(params->flags & SYNX_IMPORT_DMA_FENCE)) + rc = synx_native_import_fence(client, params); + else if (params->flags & SYNX_IMPORT_SYNX_FENCE) + rc = synx_native_import_handle(client, params); + + dprintk(SYNX_DBG, + "[sess :%llu] import of fence %pK %s, handle %u\n", + client->id, params->fence, + rc ? "failed" : "successful", + rc ? 0 : *params->new_h_synx); + + return rc; +} + +static int synx_native_import_arr(struct synx_client *client, + struct synx_import_arr_params *params) +{ + u32 i; + int rc = SYNX_SUCCESS; + + if (IS_ERR_OR_NULL(params) || params->num_fences == 0) { + dprintk(SYNX_ERR, "invalid import arr arguments\n"); + return -SYNX_INVALID; + } + + for (i = 0; i < params->num_fences; i++) { + rc = synx_native_import_indv(client, ¶ms->list[i]); + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "importing fence[%u] %pK failed=%d\n", + i, params->list[i].fence, rc); + break; + } + } + + if (rc != SYNX_SUCCESS) + while (i--) { + /* release the imported handles and cleanup */ + if (synx_native_release_core(client, + *params->list[i].new_h_synx) != SYNX_SUCCESS) + dprintk(SYNX_ERR, + "error cleaning up imported handle[%u] %u\n", + i, *params->list[i].new_h_synx); + } + + return rc; +} + +int synx_import(struct synx_session *session, + struct synx_import_params *params) +{ + int rc = 0; + struct synx_client *client; + + if (IS_ERR_OR_NULL(params)) { + dprintk(SYNX_ERR, "invalid import arguments\n"); + return -SYNX_INVALID; + } + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + /* import fence based on its type */ + if (params->type == SYNX_IMPORT_ARR_PARAMS) + rc = synx_native_import_arr(client, ¶ms->arr); + else + rc = synx_native_import_indv(client, ¶ms->indv); + + synx_put_client(client); + return rc; +} +EXPORT_SYMBOL(synx_import); + +static int synx_handle_create(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + int result; + int csl_fence; + struct synx_create_v2 create_info; + struct synx_create_params params = {0}; + + if (k_ioctl->size != sizeof(create_info)) + return -SYNX_INVALID; + + if (copy_from_user(&create_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + params.h_synx = &create_info.synx_obj; + params.name = create_info.name; + params.flags = create_info.flags; + if (create_info.flags & SYNX_CREATE_CSL_FENCE) { + csl_fence = create_info.desc.id[0]; + params.fence = &csl_fence; + } + result = synx_create(session, ¶ms); + + if (!result) + if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr), + &create_info, + k_ioctl->size)) + return -EFAULT; + + return result; +} + +static int synx_handle_getstatus(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_signal_v2 signal_info; + + if (k_ioctl->size != sizeof(signal_info)) + return -SYNX_INVALID; + + if (copy_from_user(&signal_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + signal_info.synx_state = + synx_get_status(session, signal_info.synx_obj); + + if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr), + &signal_info, + k_ioctl->size)) + return -EFAULT; + + return SYNX_SUCCESS; +} + +static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_import_info import_info; + struct synx_import_params params = {0}; + + if (k_ioctl->size != sizeof(import_info)) + return -SYNX_INVALID; + + if (copy_from_user(&import_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + if (import_info.flags & SYNX_IMPORT_SYNX_FENCE) + params.indv.fence = &import_info.synx_obj; + else if (import_info.flags & SYNX_IMPORT_DMA_FENCE) + params.indv.fence = + sync_file_get_fence(import_info.desc.id[0]); + + params.type = SYNX_IMPORT_INDV_PARAMS; + params.indv.flags = import_info.flags; + params.indv.new_h_synx = &import_info.new_synx_obj; + + if (synx_import(session, ¶ms)) + return -SYNX_INVALID; + + if (import_info.flags & SYNX_IMPORT_DMA_FENCE) + dma_fence_put(params.indv.fence); + + if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr), + &import_info, + k_ioctl->size)) + return -EFAULT; + + return SYNX_SUCCESS; +} + +static int synx_handle_import_arr( + struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + int rc = -SYNX_INVALID; + u32 idx = 0; + struct synx_client *client; + struct synx_import_arr_info arr_info; + struct synx_import_info *arr; + struct synx_import_indv_params params = {0}; + + if (k_ioctl->size != sizeof(arr_info)) + return -SYNX_INVALID; + + if (copy_from_user(&arr_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + arr = kcalloc(arr_info.num_objs, + sizeof(*arr), GFP_KERNEL); + if (IS_ERR_OR_NULL(arr)) + return -ENOMEM; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) { + rc = PTR_ERR(client); + goto clean; + } + + if (copy_from_user(arr, + u64_to_user_ptr(arr_info.list), + sizeof(*arr) * arr_info.num_objs)) { + rc = -EFAULT; + goto fail; + } + + while (idx < arr_info.num_objs) { + params.new_h_synx = &arr[idx].new_synx_obj; + params.flags = arr[idx].flags; + if (arr[idx].flags & SYNX_IMPORT_SYNX_FENCE) + params.fence = &arr[idx].synx_obj; + if (arr[idx].flags & SYNX_IMPORT_DMA_FENCE) + params.fence = + sync_file_get_fence(arr[idx].desc.id[0]); + rc = synx_native_import_indv(client, ¶ms); + if (rc != SYNX_SUCCESS) + break; + idx++; + } + + /* release allocated handles in case of failure */ + if (rc != SYNX_SUCCESS) { + while (idx > 0) + synx_native_release_core(client, + arr[--idx].new_synx_obj); + } else { + if (copy_to_user(u64_to_user_ptr(arr_info.list), + arr, + sizeof(*arr) * arr_info.num_objs)) { + rc = -EFAULT; + goto fail; + } + } + +fail: + synx_put_client(client); +clean: + kfree(arr); + return rc; +} + +static int synx_handle_export(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + return -SYNX_INVALID; +} + +static int synx_handle_signal(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_signal_v2 signal_info; + + if (k_ioctl->size != sizeof(signal_info)) + return -SYNX_INVALID; + + if (copy_from_user(&signal_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + return synx_signal(session, signal_info.synx_obj, + signal_info.synx_state); +} + +static int synx_handle_merge(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + u32 *h_synxs; + int result; + struct synx_merge_v2 merge_info; + struct synx_merge_params params = {0}; + + if (k_ioctl->size != sizeof(merge_info)) + return -SYNX_INVALID; + + if (copy_from_user(&merge_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + if (merge_info.num_objs >= SYNX_MAX_OBJS) + return -SYNX_INVALID; + + h_synxs = kcalloc(merge_info.num_objs, + sizeof(*h_synxs), GFP_KERNEL); + if (IS_ERR_OR_NULL(h_synxs)) + return -ENOMEM; + + if (copy_from_user(h_synxs, + u64_to_user_ptr(merge_info.synx_objs), + sizeof(u32) * merge_info.num_objs)) { + kfree(h_synxs); + return -EFAULT; + } + + params.num_objs = merge_info.num_objs; + params.h_synxs = h_synxs; + params.flags = merge_info.flags; + params.h_merged_obj = &merge_info.merged; + + result = synx_merge(session, ¶ms); + if (!result) + if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr), + &merge_info, + k_ioctl->size)) { + kfree(h_synxs); + return -EFAULT; + } + + kfree(h_synxs); + return result; +} + +static int synx_handle_wait(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_wait_v2 wait_info; + + if (k_ioctl->size != sizeof(wait_info)) + return -SYNX_INVALID; + + if (copy_from_user(&wait_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + k_ioctl->result = synx_wait(session, + wait_info.synx_obj, wait_info.timeout_ms); + + return SYNX_SUCCESS; +} + +static int synx_handle_async_wait( + struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + int rc = 0; + struct synx_userpayload_info_v2 user_data; + struct synx_callback_params params = {0}; + + if (k_ioctl->size != sizeof(user_data)) + return -SYNX_INVALID; + + if (copy_from_user(&user_data, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + params.h_synx = user_data.synx_obj; + params.cb_func = synx_util_default_user_callback; + params.userdata = (void *)user_data.payload[0]; + + rc = synx_async_wait(session, ¶ms); + if (rc) + dprintk(SYNX_ERR, + "user cb registration failed for handle %d\n", + user_data.synx_obj); + + return rc; +} + +static int synx_handle_cancel_async_wait( + struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + int rc = 0; + struct synx_userpayload_info_v2 user_data; + struct synx_callback_params params = {0}; + + if (k_ioctl->size != sizeof(user_data)) + return -SYNX_INVALID; + + if (copy_from_user(&user_data, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + params.h_synx = user_data.synx_obj; + params.cb_func = synx_util_default_user_callback; + params.userdata = (void *)user_data.payload[0]; + + rc = synx_cancel_async_wait(session, ¶ms); + if (rc) + dprintk(SYNX_ERR, + "user cb deregistration failed for handle %d\n", + user_data.synx_obj); + + return rc; +} + +static int synx_handle_bind(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_bind_v2 synx_bind_info; + + if (k_ioctl->size != sizeof(synx_bind_info)) + return -SYNX_INVALID; + + if (copy_from_user(&synx_bind_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + k_ioctl->result = synx_bind(session, + synx_bind_info.synx_obj, + synx_bind_info.ext_sync_desc); + + return k_ioctl->result; +} + +static int synx_handle_release(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_info release_info; + + if (k_ioctl->size != sizeof(release_info)) + return -SYNX_INVALID; + + if (copy_from_user(&release_info, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + return synx_release(session, release_info.synx_obj); +} + +static int synx_handle_get_fence(struct synx_private_ioctl_arg *k_ioctl, + struct synx_session *session) +{ + struct synx_fence_fd fence_fd; + struct dma_fence *fence; + + if (k_ioctl->size != sizeof(fence_fd)) + return -SYNX_INVALID; + + if (copy_from_user(&fence_fd, + u64_to_user_ptr(k_ioctl->ioctl_ptr), + k_ioctl->size)) + return -EFAULT; + + fence = synx_get_fence(session, fence_fd.synx_obj); + fence_fd.fd = synx_create_sync_fd(fence); + /* + * release additional reference taken in synx_get_fence. + * additional reference ensures the fence is valid and + * does not race with handle/fence release. + */ + dma_fence_put(fence); + + if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr), + &fence_fd, k_ioctl->size)) + return -EFAULT; + + return SYNX_SUCCESS; +} + +static long synx_ioctl(struct file *filep, + unsigned int cmd, + unsigned long arg) +{ + s32 rc = 0; + struct synx_private_ioctl_arg k_ioctl; + struct synx_session *session = filep->private_data; + + if (cmd != SYNX_PRIVATE_IOCTL_CMD) { + dprintk(SYNX_ERR, "invalid ioctl cmd\n"); + return -ENOIOCTLCMD; + } + + if (copy_from_user(&k_ioctl, + (struct synx_private_ioctl_arg *)arg, + sizeof(k_ioctl))) { + dprintk(SYNX_ERR, "invalid ioctl args\n"); + return -EFAULT; + } + + if (!k_ioctl.ioctl_ptr) + return -SYNX_INVALID; + + dprintk(SYNX_VERB, "[sess :%llu] Enter cmd %u from pid %d\n", + ((struct synx_client *)session)->id, + k_ioctl.id, current->pid); + + switch (k_ioctl.id) { + case SYNX_CREATE: + rc = synx_handle_create(&k_ioctl, session); + break; + case SYNX_RELEASE: + rc = synx_handle_release(&k_ioctl, session); + break; + case SYNX_REGISTER_PAYLOAD: + rc = synx_handle_async_wait(&k_ioctl, + session); + break; + case SYNX_DEREGISTER_PAYLOAD: + rc = synx_handle_cancel_async_wait(&k_ioctl, + session); + break; + case SYNX_SIGNAL: + rc = synx_handle_signal(&k_ioctl, session); + break; + case SYNX_MERGE: + rc = synx_handle_merge(&k_ioctl, session); + break; + case SYNX_WAIT: + rc = synx_handle_wait(&k_ioctl, session); + if (copy_to_user((void *)arg, + &k_ioctl, + sizeof(k_ioctl))) { + dprintk(SYNX_ERR, "invalid ioctl args\n"); + rc = -EFAULT; + } + break; + case SYNX_BIND: + rc = synx_handle_bind(&k_ioctl, session); + break; + case SYNX_GETSTATUS: + rc = synx_handle_getstatus(&k_ioctl, session); + break; + case SYNX_IMPORT: + rc = synx_handle_import(&k_ioctl, session); + break; + case SYNX_IMPORT_ARR: + rc = synx_handle_import_arr(&k_ioctl, session); + break; + case SYNX_EXPORT: + rc = synx_handle_export(&k_ioctl, session); + break; + case SYNX_GETFENCE_FD: + rc = synx_handle_get_fence(&k_ioctl, session); + break; + default: + rc = -SYNX_INVALID; + } + + dprintk(SYNX_VERB, "[sess :%llu] exit with status %d\n", + ((struct synx_client *)session)->id, rc); + + return rc; +} + +static ssize_t synx_read(struct file *filep, + char __user *buf, size_t size, loff_t *f_pos) +{ + ssize_t rc = 0; + struct synx_client *client = NULL; + struct synx_client_cb *cb; + struct synx_session *session = filep->private_data; + struct synx_userpayload_info_v2 data; + + if (size != sizeof(struct synx_userpayload_info_v2)) { + dprintk(SYNX_ERR, "invalid read size\n"); + return -SYNX_INVALID; + } + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) + return -SYNX_INVALID; + + mutex_lock(&client->event_q_lock); + cb = list_first_entry_or_null(&client->event_q, + struct synx_client_cb, node); + if (IS_ERR_OR_NULL(cb)) { + mutex_unlock(&client->event_q_lock); + rc = 0; + goto fail; + } + + if (cb->idx == 0 || cb->idx >= SYNX_MAX_OBJS) { + dprintk(SYNX_ERR, "invalid index\n"); + mutex_unlock(&client->event_q_lock); + rc = -SYNX_INVALID; + goto fail; + } + + list_del_init(&cb->node); + mutex_unlock(&client->event_q_lock); + + rc = size; + data.synx_obj = cb->kernel_cb.h_synx; + data.reserved = cb->kernel_cb.status; + data.payload[0] = (u64)cb->kernel_cb.data; + if (copy_to_user(buf, + &data, + sizeof(struct synx_userpayload_info_v2))) { + dprintk(SYNX_ERR, "couldn't copy user callback data\n"); + rc = -EFAULT; + } + + if (synx_util_clear_cb_entry(client, cb)) + dprintk(SYNX_ERR, + "[sess :%llu] error clearing cb for handle %u\n", + client->id, data.synx_obj); +fail: + synx_put_client(client); + return rc; +} + +static unsigned int synx_poll(struct file *filep, + struct poll_table_struct *poll_table) +{ + int rc = 0; + struct synx_client *client; + struct synx_session *session = filep->private_data; + + client = synx_get_client(session); + if (IS_ERR_OR_NULL(client)) { + dprintk(SYNX_ERR, "invalid session in poll\n"); + return SYNX_SUCCESS; + } + + poll_wait(filep, &client->event_wq, poll_table); + mutex_lock(&client->event_q_lock); + if (!list_empty(&client->event_q)) + rc = POLLPRI; + mutex_unlock(&client->event_q_lock); + + synx_put_client(client); + return rc; +} + +struct synx_session *synx_initialize( + struct synx_initialization_params *params) +{ + struct synx_client *client; + + if (IS_ERR_OR_NULL(params)) + return ERR_PTR(-SYNX_INVALID); + + client = vzalloc(sizeof(*client)); + if (IS_ERR_OR_NULL(client)) + return ERR_PTR(-SYNX_NOMEM); + + if (params->name) + strlcpy(client->name, params->name, sizeof(client->name)); + + client->active = true; + client->dma_context = dma_fence_context_alloc(1); + client->id = atomic64_inc_return(&synx_counter); + kref_init(&client->refcount); + spin_lock_init(&client->handle_map_lock); + mutex_init(&client->event_q_lock); + INIT_LIST_HEAD(&client->event_q); + init_waitqueue_head(&client->event_wq); + /* zero idx not allowed */ + set_bit(0, client->cb_bitmap); + + spin_lock_bh(&synx_dev->native->metadata_map_lock); + hash_add(synx_dev->native->client_metadata_map, + &client->node, (u64)client); + spin_unlock_bh(&synx_dev->native->metadata_map_lock); + + dprintk(SYNX_INFO, "[sess :%llu] session created %s\n", + client->id, params->name); + + return (struct synx_session *)client; +} +EXPORT_SYMBOL(synx_initialize); + +int synx_uninitialize(struct synx_session *session) +{ + struct synx_client *client = NULL, *curr; + + spin_lock_bh(&synx_dev->native->metadata_map_lock); + hash_for_each_possible(synx_dev->native->client_metadata_map, + curr, node, (u64)session) { + if (curr == (struct synx_client *)session) { + if (curr->active) { + curr->active = false; + client = curr; + } + break; + } + } + spin_unlock_bh(&synx_dev->native->metadata_map_lock); + + /* release the reference obtained at synx init */ + synx_put_client(client); + return SYNX_SUCCESS; +} +EXPORT_SYMBOL(synx_uninitialize); + +static int synx_open(struct inode *inode, struct file *filep) +{ + int rc = 0; + char name[SYNX_OBJ_NAME_LEN]; + struct synx_initialization_params params = {0}; + + dprintk(SYNX_VERB, "Enter pid: %d\n", current->pid); + + scnprintf(name, SYNX_OBJ_NAME_LEN, "umd-client-%d", current->pid); + params.name = name; + params.id = SYNX_CLIENT_NATIVE; + + filep->private_data = synx_initialize(¶ms); + if (IS_ERR_OR_NULL(filep->private_data)) { + dprintk(SYNX_ERR, "session allocation failed for pid: %d\n", + current->pid); + rc = PTR_ERR(filep->private_data); + } else { + dprintk(SYNX_VERB, "allocated new session for pid: %d\n", + current->pid); + } + + return rc; +} + +static int synx_close(struct inode *inode, struct file *filep) +{ + struct synx_session *session = filep->private_data; + + return synx_uninitialize(session); +} + +static const struct file_operations synx_fops = { + .owner = THIS_MODULE, + .open = synx_open, + .read = synx_read, + .release = synx_close, + .poll = synx_poll, + .unlocked_ioctl = synx_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = synx_ioctl, +#endif +}; + +int synx_register_ops( + const struct synx_register_params *params) +{ + s32 rc = 0; + struct synx_registered_ops *client_ops; + + if (!synx_dev || !params || !params->name || + !synx_util_is_valid_bind_type(params->type) || + !params->ops.register_callback || + !params->ops.deregister_callback || + !params->ops.signal) { + dprintk(SYNX_ERR, "invalid register params\n"); + return -SYNX_INVALID; + } + + mutex_lock(&synx_dev->vtbl_lock); + client_ops = &synx_dev->bind_vtbl[params->type]; + if (!client_ops->valid) { + client_ops->valid = true; + memcpy(&client_ops->ops, ¶ms->ops, + sizeof(client_ops->ops)); + strlcpy(client_ops->name, params->name, + sizeof(client_ops->name)); + client_ops->type = params->type; + dprintk(SYNX_INFO, + "registered bind ops type %u for %s\n", + params->type, params->name); + } else { + dprintk(SYNX_WARN, + "client already registered for type %u by %s\n", + client_ops->type, client_ops->name); + rc = -SYNX_ALREADY; + } + mutex_unlock(&synx_dev->vtbl_lock); + + return rc; +} +EXPORT_SYMBOL(synx_register_ops); + +int synx_deregister_ops( + const struct synx_register_params *params) +{ + struct synx_registered_ops *client_ops; + + if (IS_ERR_OR_NULL(params) || params->name || + !synx_util_is_valid_bind_type(params->type)) { + dprintk(SYNX_ERR, "invalid params\n"); + return -SYNX_INVALID; + } + + mutex_lock(&synx_dev->vtbl_lock); + client_ops = &synx_dev->bind_vtbl[params->type]; + memset(client_ops, 0, sizeof(*client_ops)); + dprintk(SYNX_INFO, "deregistered bind ops for %s\n", + params->name); + mutex_unlock(&synx_dev->vtbl_lock); + + return SYNX_SUCCESS; +} +EXPORT_SYMBOL(synx_deregister_ops); + +void synx_ipc_handler(struct work_struct *cb_dispatch) +{ + struct synx_signal_cb *signal_cb = + container_of(cb_dispatch, struct synx_signal_cb, cb_dispatch); + struct synx_map_entry *map_entry; + + map_entry = synx_util_get_map_entry(signal_cb->handle); + if (IS_ERR_OR_NULL(map_entry)) { + dprintk(SYNX_WARN, + "no clients to notify for %u\n", + signal_cb->handle); + dprintk(SYNX_MEM, "signal cb destroyed %pK\n", signal_cb); + kfree(signal_cb); + return; + } + + /* get reference on synx coredata for signal cb */ + synx_util_get_object(map_entry->synx_obj); + signal_cb->synx_obj = map_entry->synx_obj; + synx_util_release_map_entry(map_entry); + synx_signal_handler(&signal_cb->cb_dispatch); +} + +int synx_ipc_callback(u32 client_id, + s64 data, void *priv) +{ + struct synx_signal_cb *signal_cb; + u32 status = (u32)data; + u32 handle = (u32)(data >> 32); + + signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC); + if (IS_ERR_OR_NULL(signal_cb)) + return -SYNX_NOMEM; + + dprintk(SYNX_INFO, + "signal notification for %u received with status %u\n", + handle, status); + + signal_cb->status = status; + signal_cb->handle = handle; + signal_cb->flag = SYNX_SIGNAL_FROM_IPC; + + INIT_WORK(&signal_cb->cb_dispatch, synx_ipc_handler); + queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch); + + return SYNX_SUCCESS; +} +EXPORT_SYMBOL(synx_ipc_callback); + +int synx_recover(enum synx_client_id id) +{ + u32 core_id; + + core_id = synx_util_map_client_id_to_core(id); + if (core_id >= SYNX_CORE_MAX) { + dprintk(SYNX_ERR, "invalid client id %u\n", id); + return -SYNX_INVALID; + } + + switch (core_id) { + case SYNX_CORE_EVA: + case SYNX_CORE_IRIS: + break; + default: + dprintk(SYNX_ERR, "recovery not supported on %u\n", id); + return -SYNX_NOSUPPORT; + } + + return synx_global_recover(core_id); +} +EXPORT_SYMBOL(synx_recover); + +static int synx_local_mem_init(void) +{ + if (!synx_dev->native) + return -SYNX_INVALID; + + hash_init(synx_dev->native->client_metadata_map); + hash_init(synx_dev->native->fence_map); + hash_init(synx_dev->native->global_map); + hash_init(synx_dev->native->local_map); + hash_init(synx_dev->native->csl_fence_map); + + spin_lock_init(&synx_dev->native->metadata_map_lock); + spin_lock_init(&synx_dev->native->fence_map_lock); + spin_lock_init(&synx_dev->native->global_map_lock); + spin_lock_init(&synx_dev->native->local_map_lock); + spin_lock_init(&synx_dev->native->csl_map_lock); + + /* zero idx not allowed */ + set_bit(0, synx_dev->native->bitmap); + return 0; +} + +static int synx_cdsp_restart_notifier(struct notifier_block *nb, + unsigned long code, void *data) +{ + struct synx_cdsp_ssr *cdsp_ssr = &synx_dev->cdsp_ssr; + + if (&cdsp_ssr->nb != nb) { + dprintk(SYNX_ERR, "Invalid SSR Notifier block\n"); + return NOTIFY_BAD; + } + + switch (code) { + case QCOM_SSR_BEFORE_SHUTDOWN: + break; + case QCOM_SSR_AFTER_SHUTDOWN: + if (cdsp_ssr->ssrcnt != 0) { + dprintk(SYNX_INFO, "Cleaning up global memory\n"); + synx_global_recover(SYNX_CORE_NSP); + } + break; + case QCOM_SSR_BEFORE_POWERUP: + break; + case QCOM_SSR_AFTER_POWERUP: + dprintk(SYNX_DBG, "CDSP is up"); + if (cdsp_ssr->ssrcnt == 0) + cdsp_ssr->ssrcnt++; + break; + default: + dprintk(SYNX_ERR, "Unknown status code for CDSP SSR\n"); + break; + } + + return NOTIFY_DONE; +} + +static int __init synx_init(void) +{ + int rc; + + dprintk(SYNX_INFO, "device initialization start\n"); + + synx_dev = kzalloc(sizeof(*synx_dev), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_dev)) + return -SYNX_NOMEM; + + rc = alloc_chrdev_region(&synx_dev->dev, 0, 1, SYNX_DEVICE_NAME); + if (rc < 0) { + dprintk(SYNX_ERR, "region allocation failed\n"); + goto alloc_fail; + } + + cdev_init(&synx_dev->cdev, &synx_fops); + synx_dev->cdev.owner = THIS_MODULE; + rc = cdev_add(&synx_dev->cdev, synx_dev->dev, 1); + if (rc < 0) { + dprintk(SYNX_ERR, "device registation failed\n"); + goto reg_fail; + } + + synx_dev->class = class_create(THIS_MODULE, SYNX_DEVICE_NAME); + device_create(synx_dev->class, NULL, synx_dev->dev, + NULL, SYNX_DEVICE_NAME); + + synx_dev->wq_cb = alloc_workqueue(SYNX_WQ_CB_NAME, + WQ_HIGHPRI | WQ_UNBOUND, SYNX_WQ_CB_THREADS); + synx_dev->wq_cleanup = alloc_workqueue(SYNX_WQ_CLEANUP_NAME, + WQ_HIGHPRI | WQ_UNBOUND, SYNX_WQ_CLEANUP_THREADS); + if (!synx_dev->wq_cb || !synx_dev->wq_cleanup) { + dprintk(SYNX_ERR, + "high priority work queue creation failed\n"); + rc = -SYNX_INVALID; + goto fail; + } + + synx_dev->native = vzalloc(sizeof(*synx_dev->native)); + if (IS_ERR_OR_NULL(synx_dev->native)) + goto fail; + + mutex_init(&synx_dev->vtbl_lock); + mutex_init(&synx_dev->error_lock); + INIT_LIST_HEAD(&synx_dev->error_list); + synx_dev->debugfs_root = synx_init_debugfs_dir(synx_dev); + + rc = synx_global_mem_init(); + if (rc) { + dprintk(SYNX_ERR, "shared mem init failed, err=%d\n", rc); + goto err; + } + + synx_dev->cdsp_ssr.ssrcnt = 0; + synx_dev->cdsp_ssr.nb.notifier_call = synx_cdsp_restart_notifier; + synx_dev->cdsp_ssr.handle = + qcom_register_ssr_notifier("cdsp", &synx_dev->cdsp_ssr.nb); + if (synx_dev->cdsp_ssr.handle == NULL) { + dprintk(SYNX_ERR, "SSR registration failed\n"); + goto err; + } + + ipclite_register_client(synx_ipc_callback, NULL); + synx_local_mem_init(); + + dprintk(SYNX_INFO, "device initialization success\n"); + + return 0; + +err: + vfree(synx_dev->native); +fail: + device_destroy(synx_dev->class, synx_dev->dev); + class_destroy(synx_dev->class); +reg_fail: + unregister_chrdev_region(synx_dev->dev, 1); +alloc_fail: + kfree(synx_dev); + synx_dev = NULL; + return rc; +} + +static void __exit synx_exit(void) +{ + struct error_node *err_node, *err_node_tmp; + + flush_workqueue(synx_dev->wq_cb); + flush_workqueue(synx_dev->wq_cleanup); + device_destroy(synx_dev->class, synx_dev->dev); + class_destroy(synx_dev->class); + cdev_del(&synx_dev->cdev); + unregister_chrdev_region(synx_dev->dev, 1); + synx_remove_debugfs_dir(synx_dev); + /* release uncleared error nodes */ + list_for_each_entry_safe( + err_node, err_node_tmp, + &synx_dev->error_list, + node) { + list_del(&err_node->node); + kfree(err_node); + } + mutex_destroy(&synx_dev->vtbl_lock); + mutex_destroy(&synx_dev->error_lock); + vfree(synx_dev->native); + kfree(synx_dev); +} + +module_init(synx_init); +module_exit(synx_exit); + +MODULE_DESCRIPTION("Global Synx Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/msm/synx/synx_api.h b/msm/synx/synx_api.h new file mode 100644 index 0000000000..523e646666 --- /dev/null +++ b/msm/synx/synx_api.h @@ -0,0 +1,542 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SYNX_API_H__ +#define __SYNX_API_H__ + +#include +#include + +#include "synx_err.h" + +/** + * enum synx_create_flags - Flags passed during synx_create call + * + * SYNX_CREATE_LOCAL_FENCE : Instructs the framework to create local synx object + * SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object + * SYNX_CREATE_DMA_FENCE : Create a synx object by wrapping the provided dma fence. + * Need to pass the dma_fence ptr through fence variable + * if this flag is set. + * SYNX_CREATE_CSL_FENCE : Create a synx object with provided csl fence. + * Establishes interop with the csl fence through + * bind operations. + */ +enum synx_create_flags { + SYNX_CREATE_LOCAL_FENCE = 0x01, + SYNX_CREATE_GLOBAL_FENCE = 0x02, + SYNX_CREATE_DMA_FENCE = 0x04, + SYNX_CREATE_CSL_FENCE = 0x08, + SYNX_CREATE_MAX_FLAGS = 0x10, +}; + +/** + * enum synx_init_flags - Session initialization flag + */ +enum synx_init_flags { + SYNX_INIT_MAX = 0x01, +}; + +/** + * enum synx_import_flags - Import flags + * + * SYNX_IMPORT_LOCAL_FENCE : Instructs the framework to create local synx object + * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object + * SYNX_IMPORT_SYNX_FENCE : Import native Synx handle for synchronization + * Need to pass the Synx handle ptr through fence variable + * if this flag is set. + * SYNX_IMPORT_DMA_FENCE : Import dma fence.and crate Synx handle for interop + * Need to pass the dma_fence ptr through fence variable + * if this flag is set. + * SYNX_IMPORT_EX_RELEASE : Flag to inform relaxed invocation where release call + * need not be called by client on this handle after import. + */ +enum synx_import_flags { + SYNX_IMPORT_LOCAL_FENCE = 0x01, + SYNX_IMPORT_GLOBAL_FENCE = 0x02, + SYNX_IMPORT_SYNX_FENCE = 0x04, + SYNX_IMPORT_DMA_FENCE = 0x08, + SYNX_IMPORT_EX_RELEASE = 0x10, +}; + +/** + * enum synx_signal_status - Signal status + * + * SYNX_STATE_SIGNALED_SUCCESS : Signal success + * SYNX_STATE_SIGNALED_CANCEL : Signal cancellation + * SYNX_STATE_SIGNALED_MAX : Clients can send custom notification + * beyond the max value (only positive) + */ +enum synx_signal_status { + SYNX_STATE_SIGNALED_SUCCESS = 2, + SYNX_STATE_SIGNALED_CANCEL = 4, + SYNX_STATE_SIGNALED_MAX = 64, +}; + +/** + * synx_callback - Callback invoked by external fence + * + * External fence dispatch the registered callback to notify + * signal to synx framework. + */ +typedef void (*synx_callback)(s32 sync_obj, int status, void *data); + +/** + * synx_user_callback - Callback function registered by clients + * + * User callback registered for non-blocking wait. Dispatched when + * synx object is signaled. + */ +typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data); + +/** + * struct bind_operations - Function pointers that need to be defined + * to achieve bind functionality for external fence with synx obj + * + * @register_callback : Function to register with external sync object + * @deregister_callback : Function to deregister with external sync object + * @enable_signaling : Function to enable the signaling on the external + * sync object (optional) + * @signal : Function to signal the external sync object + */ +struct bind_operations { + int (*register_callback)(synx_callback cb_func, + void *userdata, s32 sync_obj); + int (*deregister_callback)(synx_callback cb_func, + void *userdata, s32 sync_obj); + int (*enable_signaling)(s32 sync_obj); + int (*signal)(s32 sync_obj, u32 status); +}; + +/** + * synx_bind_client_type : External fence supported for bind + * + * SYNX_TYPE_CSL : Camera CSL fence + */ +enum synx_bind_client_type { + SYNX_TYPE_CSL = 0, + SYNX_MAX_BIND_TYPES, +}; + +/** + * struct synx_register_params - External registration parameters + * + * @ops : Bind operations struct + * @name : External client name + * Only first 64 bytes are accepted, rest will be ignored + * @type : Synx bind client type + */ +struct synx_register_params { + struct bind_operations ops; + char *name; + enum synx_bind_client_type type; +}; + +/** + * struct synx_queue_desc - Memory descriptor of the queue allocated by + * the fence driver for each client during + * register. + * + * @vaddr : CPU virtual address of the queue. + * @dev_addr : Physical address of the memory object. + * @size : Size of the memory. + * @mem_data : Internal pointer with the attributes of the allocation. + */ +struct synx_queue_desc { + void *vaddr; + u64 dev_addr; + u64 size; + void *mem_data; +}; + +/** + * enum synx_client_id : Unique identifier of the supported clients + * + * @SYNX_CLIENT_NATIVE : Native Client + * @SYNX_CLIENT_GFX_CTX0 : GFX Client 0 + * @SYNX_CLIENT_DPU_CTL0 : DPU Client 0 + * @SYNX_CLIENT_DPU_CTL1 : DPU Client 1 + * @SYNX_CLIENT_DPU_CTL2 : DPU Client 2 + * @SYNX_CLIENT_DPU_CTL3 : DPU Client 3 + * @SYNX_CLIENT_DPU_CTL4 : DPU Client 4 + * @SYNX_CLIENT_DPU_CTL5 : DPU Client 5 + * @SYNX_CLIENT_EVA_CTX0 : EVA Client 0 + * @SYNX_CLIENT_VID_CTX0 : Video Client 0 + * @SYNX_CLIENT_NSP_CTX0 : NSP Client 0 + * @SYNX_CLIENT_IFE_CTX0 : IFE Client 0 + */ +enum synx_client_id { + SYNX_CLIENT_NATIVE = 0, + SYNX_CLIENT_GFX_CTX0, + SYNX_CLIENT_DPU_CTL0, + SYNX_CLIENT_DPU_CTL1, + SYNX_CLIENT_DPU_CTL2, + SYNX_CLIENT_DPU_CTL3, + SYNX_CLIENT_DPU_CTL4, + SYNX_CLIENT_DPU_CTL5, + SYNX_CLIENT_EVA_CTX0, + SYNX_CLIENT_VID_CTX0, + SYNX_CLIENT_NSP_CTX0, + SYNX_CLIENT_IFE_CTX0, + SYNX_CLIENT_MAX, +}; + +/** + * struct synx_session - Client session identifier + * + * @type : Session type + * @client : Pointer to client session + */ +struct synx_session { + u32 type; + void *client; +}; + +/** + * struct synx_initialization_params - Session params + * + * @name : Client session name + * Only first 64 bytes are accepted, rest will be ignored + * @ptr : Pointer to queue descriptor (filled by function) + * @id : Client identifier + * @flags : Synx initialization flags + */ +struct synx_initialization_params { + const char *name; + struct synx_queue_desc *ptr; + enum synx_client_id id; + enum synx_init_flags flags; +}; + +/** + * struct synx_create_params - Synx creation parameters + * + * @name : Optional parameter associating a name with the synx + * object for debug purposes + * Only first 64 bytes are accepted, + * rest will be ignored + * @h_synx : Pointer to synx object handle (filled by function) + * @fence : Pointer to external fence + * @flags : Synx flags for customization (mentioned below) + * + * SYNX_CREATE_GLOBAL_FENCE - Hints the framework to create global synx object + * If flag not set, hints framework to create a local synx object. + * SYNX_CREATE_DMA_FENCE - Wrap synx object with dma fence. + * Need to pass the dma_fence ptr through 'fence' variable if this flag is set. + * SYNX_CREATE_BIND_FENCE - Create a synx object with provided external fence. + * Establishes interop with supported external fence through bind operations. + * Need to fill synx_external_desc structure if this flag is set. + */ + +struct synx_create_params { + const char *name; + u32 *h_synx; + void *fence; + enum synx_create_flags flags; +}; + +/** + * enum synx_merge_flags - Handle merge flags + * + * SYNX_MERGE_LOCAL_FENCE : Create local composite object. + * SYNX_MERGE_GLOBAL_FENCE : Create global composite object. + * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects + * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object + */ +enum synx_merge_flags { + SYNX_MERGE_LOCAL_FENCE = 0x01, + SYNX_MERGE_GLOBAL_FENCE = 0x02, + SYNX_MERGE_NOTIFY_ON_ALL = 0x04, + SYNX_MERGE_NOTIFY_ON_ANY = 0x08, +}; + +/* + * struct synx_merge_params - Synx merge parameters + * + * @h_synxs : Pointer to a array of synx handles to be merged + * @flags : Merge flags + * @num_objs : Number of synx objs in the block + * @h_merged_obj : Merged synx object handle (filled by function) + */ +struct synx_merge_params { + u32 *h_synxs; + enum synx_merge_flags flags; + u32 num_objs; + u32 *h_merged_obj; +}; + +/** + * enum synx_import_type - Import type + * + * SYNX_IMPORT_INDV_PARAMS : Import filled with synx_import_indv_params struct + * SYNX_IMPORT_ARR_PARAMS : Import filled with synx_import_arr_params struct + */ +enum synx_import_type { + SYNX_IMPORT_INDV_PARAMS = 0x01, + SYNX_IMPORT_ARR_PARAMS = 0x02, +}; + +/** + * struct synx_import_indv_params - Synx import indv parameters + * + * @new_h_synxs : Pointer to new synx object + * (filled by the function) + * The new handle/s should be used by importing + * process for all synx api operations and + * for sharing with FW cores. + * @flags : Synx flags + * @fence : Pointer to external fence + */ +struct synx_import_indv_params { + u32 *new_h_synx; + enum synx_import_flags flags; + void *fence; +}; + +/** + * struct synx_import_arr_params - Synx import arr parameters + * + * @list : Array of synx_import_indv_params pointers + * @num_fences : No of fences passed to framework + */ +struct synx_import_arr_params { + struct synx_import_indv_params *list; + u32 num_fences; +}; + +/** + * struct synx_import_params - Synx import parameters + * + * @type : Import params type filled by client + * @indv : Params to import an individual handle/fence + * @arr : Params to import an array of handles/fences + */ +struct synx_import_params { + enum synx_import_type type; + union { + struct synx_import_indv_params indv; + struct synx_import_arr_params arr; + }; +}; + +/** + * struct synx_callback_params - Synx callback parameters + * + * @h_synx : Synx object handle + * @cb_func : Pointer to callback func to be invoked + * @userdata : Opaque pointer passed back with callback + * @cancel_cb_func : Pointer to callback to ack cancellation (optional) + */ +struct synx_callback_params { + u32 h_synx; + synx_user_callback_t cb_func; + void *userdata; + synx_user_callback_t cancel_cb_func; +}; + +/* Kernel APIs */ + +/* synx_register_ops - Register operations for external synchronization + * + * Register with synx for enabling external synchronization through bind + * + * @param params : Pointer to register params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_INVALID will be returned if params are invalid. + * -SYNX_NOMEM will be returned if bind ops cannot be registered due to + * insufficient memory. + * -SYNX_ALREADY will be returned if type already in use. + */ +int synx_register_ops(const struct synx_register_params *params); + +/** + * synx_deregister_ops - De-register external synchronization operations + * + * @param params : Pointer to register params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_INVALID will be returned if record not found. + */ +int synx_deregister_ops(const struct synx_register_params *params); + +/** + * synx_initialize - Initializes a new client session + * + * @param params : Pointer to session init params + * + * @return Client session pointer on success. NULL or error in case of failure. + */ +struct synx_session *synx_initialize(struct synx_initialization_params *params); + +/** + * synx_uninitialize - Destroys the client session + * + * @param session : Session ptr (returned from synx_initialize) + * + * @return Status of operation. SYNX_SUCCESS in case of success. + */ +int synx_uninitialize(struct synx_session *session); + +/** + * synx_create - Creates a synx object + * + * Creates a new synx obj and returns the handle to client. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Pointer to create params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_INVALID will be returned if params were invalid. + * -SYNX_NOMEM will be returned if the kernel can't allocate space for + * synx object. + */ +int synx_create(struct synx_session *session, struct synx_create_params *params); + +/** + * synx_async_wait - Registers a callback with a synx object + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Callback params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_INVALID will be returned if userdata is invalid. + * -SYNX_NOMEM will be returned if cb_func is invalid. + */ +int synx_async_wait(struct synx_session *session, struct synx_callback_params *params); + +/** + * synx_cancel_async_wait - De-registers a callback with a synx object + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Callback params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_ALREADY if object has already been signaled, and cannot be cancelled. + * -SYNX_INVALID will be returned if userdata is invalid. + * -SYNX_NOMEM will be returned if cb_func is invalid. + */ +int synx_cancel_async_wait(struct synx_session *session, + struct synx_callback_params *params); + +/** + * synx_signal - Signals a synx object with the status argument. + * + * This function will signal the synx object referenced by h_synx + * and invoke any external binding synx objs. + * The status parameter will indicate whether the entity + * performing the signaling wants to convey an error case or a success case. + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle + * @param status : Status of signaling. + * Clients can send custom signaling status + * beyond SYNX_STATE_SIGNALED_MAX. + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_signal(struct synx_session *session, u32 h_synx, + enum synx_signal_status status); + +/** + * synx_merge - Merges multiple synx objects + * + * This function will merge multiple synx objects into a synx group. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Merge params + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_merge(struct synx_session *session, struct synx_merge_params *params); + +/** + * synx_wait - Waits for a synx object synchronously + * + * Does a wait on the synx object identified by h_synx for a maximum + * of timeout_ms milliseconds. Must not be called from interrupt context as + * this API can sleep. + * Will return status if handle was signaled. Status can be from pre-defined + * states (enum synx_signal_status) or custom status sent by producer. + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle to be waited upon + * @param timeout_ms : Timeout in ms + * + * @return Signal status. -SYNX_INVAL if synx object is in bad state or arguments + * are invalid, -SYNX_TIMEOUT if wait times out. + */ +int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms); + +/** + * synx_get_status - Returns the status of the synx object + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle + * + * @return Status of the synx object. + */ +int synx_get_status(struct synx_session *session, u32 h_synx); + +/** + * synx_import - Imports (looks up) synx object from given handle/fence + * + * Import subscribes the client session for notification on signal + * of handles/fences. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Pointer to import params + * + * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state + */ +int synx_import(struct synx_session *session, struct synx_import_params *params); + +/** + * synx_get_fence - Get the native fence backing the synx object + * + * Function returns the native fence. Clients need to + * acquire & release additional reference explicitly. + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle + * + * @return Fence pointer upon success, NULL or error in case of failure. + */ +void *synx_get_fence(struct synx_session *session, u32 h_synx); + +/** + * synx_release - Release the synx object + * + * Decrements refcount of a synx object by 1, and destroys it + * if becomes 0. + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle to be destroyed + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_release(struct synx_session *session, u32 h_synx); + +/** + * synx_recover - Recover any possible handle leaks + * + * Function should be called on HW hang/reset to + * recover the Synx handles shared. This cleans up + * Synx handles held by the rest HW, and avoids + * potential resource leaks. + * + * Function does not destroy the session, but only + * recover synx handles belonging to the session. + * Synx session would still be active and clients + * need to destroy the session explicitly through + * synx_uninitialize API. + * + * @param id : Client ID of core to recover + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_recover(enum synx_client_id id); + +#endif /* __SYNX_API_H__ */ diff --git a/msm/synx/synx_debugfs.c b/msm/synx/synx_debugfs.c new file mode 100644 index 0000000000..8d11ae9ff6 --- /dev/null +++ b/msm/synx/synx_debugfs.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "synx_api.h" +#include "synx_debugfs.h" +#include "synx_util.h" + +#define MAX_DBG_BUF_SIZE (36 * SYNX_MAX_OBJS) + +struct dentry *my_direc; + +int synx_columns = NAME_COLUMN | ID_COLUMN | + STATE_COLUMN | GLOBAL_COLUMN; +EXPORT_SYMBOL(synx_columns); + +int synx_debug = SYNX_ERR | SYNX_WARN | + SYNX_INFO | SYNX_DBG; +EXPORT_SYMBOL(synx_debug); + +void populate_bound_rows( + struct synx_coredata *row, char *cur, char *end) +{ + int j; + + for (j = 0; j < row->num_bound_synxs; j++) + cur += scnprintf(cur, end - cur, + "\n\tID: %d", + row->bound_synxs[j].external_desc.id); +} +static ssize_t synx_table_read(struct file *file, + char *buf, + size_t count, + loff_t *ppos) +{ + struct synx_device *dev = file->private_data; + struct error_node *err_node, *err_node_tmp; + char *dbuf, *cur, *end; + int rc = SYNX_SUCCESS; + ssize_t len = 0; + + dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL); + if (!dbuf) + return -ENOMEM; + + /* dump client details */ + cur = dbuf; + end = cur + MAX_DBG_BUF_SIZE; + if (synx_columns & NAME_COLUMN) + cur += scnprintf(cur, end - cur, "| Name |"); + if (synx_columns & ID_COLUMN) + cur += scnprintf(cur, end - cur, "| ID |"); + if (synx_columns & STATE_COLUMN) + cur += scnprintf(cur, end - cur, "| Status |"); + if (synx_columns & FENCE_COLUMN) + cur += scnprintf(cur, end - cur, "| Fence |"); + if (synx_columns & COREDATA_COLUMN) + cur += scnprintf(cur, end - cur, "| Coredata |"); + if (synx_columns & GLOBAL_COLUMN) + cur += scnprintf(cur, end - cur, "| Coredata |"); + if (synx_columns & BOUND_COLUMN) + cur += scnprintf(cur, end - cur, "| Bound |"); + cur += scnprintf(cur, end - cur, "\n"); + + rc = synx_global_dump_shared_memory(); + if (rc) { + cur += scnprintf(cur, end - cur, + "Err %d: Failed to dump global shared mem\n", rc); + } + + if (synx_columns & ERROR_CODES && !list_empty( + &dev->error_list)) { + cur += scnprintf( + cur, end - cur, "\nError(s): "); + + mutex_lock(&dev->error_lock); + list_for_each_entry_safe( + err_node, err_node_tmp, + &dev->error_list, node) { + cur += scnprintf(cur, end - cur, + "\n\tTime: %s - ID: %d - Code: %d", + err_node->timestamp, + err_node->h_synx, + err_node->error_code); + list_del(&err_node->node); + kfree(err_node); + } + mutex_unlock(&dev->error_lock); + } + + len = simple_read_from_buffer(buf, count, ppos, + dbuf, cur - dbuf); + kfree(dbuf); + return len; +} + +static ssize_t synx_table_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + return 0; +} + +static const struct file_operations synx_table_fops = { + .owner = THIS_MODULE, + .read = synx_table_read, + .write = synx_table_write, + .open = simple_open, +}; + +struct dentry *synx_init_debugfs_dir(struct synx_device *dev) +{ + struct dentry *dir = NULL; + + dir = debugfs_create_dir("synx_debug", NULL); + if (!dir) { + dprintk(SYNX_ERR, "Failed to create debugfs for synx\n"); + return NULL; + } + + debugfs_create_u32("debug_level", 0644, dir, &synx_debug); + debugfs_create_u32("column_level", 0644, dir, &synx_columns); + + if (!debugfs_create_file("synx_table", + 0644, dir, dev, &synx_table_fops)) { + dprintk(SYNX_ERR, "Failed to create debugfs file for synx\n"); + return NULL; + } + + return dir; +} + +void synx_remove_debugfs_dir(struct synx_device *dev) +{ + debugfs_remove_recursive(dev->debugfs_root); +} diff --git a/msm/synx/synx_debugfs.h b/msm/synx/synx_debugfs.h new file mode 100644 index 0000000000..0692a89281 --- /dev/null +++ b/msm/synx/synx_debugfs.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SYNX_DEBUGFS_H__ +#define __SYNX_DEBUGFS_H__ + +#include +#include + +#include "synx_private.h" + +enum synx_debug_level { + SYNX_ERR = 0x0001, + SYNX_WARN = 0x0002, + SYNX_INFO = 0x0004, + SYNX_DBG = 0x0008, + SYNX_VERB = 0x0010, + SYNX_IPCL = 0x0020, + SYNX_GSM = 0x0040, + SYNX_MEM = 0x0080, + SYNX_ALL = SYNX_ERR | SYNX_WARN | SYNX_INFO | + SYNX_DBG | SYNX_IPCL | SYNX_GSM | SYNX_MEM, +}; + +enum synx_columns_level { + NAME_COLUMN = 0x0001, + ID_COLUMN = 0x0002, + BOUND_COLUMN = 0x0004, + STATE_COLUMN = 0x0008, + FENCE_COLUMN = 0x0010, + COREDATA_COLUMN = 0x0020, + GLOBAL_COLUMN = 0x0040, + ERROR_CODES = 0x8000, +}; + +#ifndef SYNX_DBG_LABEL +#define SYNX_DBG_LABEL "synx" +#endif + +#define SYNX_DBG_TAG SYNX_DBG_LABEL ": %4s: " + +extern int synx_debug; + +static inline char *synx_debug_str(int level) +{ + switch (level) { + case SYNX_ERR: + return "err"; + case SYNX_WARN: + return "warn"; + case SYNX_INFO: + return "info"; + case SYNX_DBG: + return "dbg"; + case SYNX_VERB: + return "verb"; + case SYNX_IPCL: + return "ipcl"; + case SYNX_GSM: + return "gmem"; + case SYNX_MEM: + return "mem"; + default: + return "???"; + } +} + +#define dprintk(__level, __fmt, arg...) \ + do { \ + if (synx_debug & __level) { \ + pr_info(SYNX_DBG_TAG "%s: %d: " __fmt, \ + synx_debug_str(__level), __func__, \ + __LINE__, ## arg); \ + } \ + } while (0) + +/** + * synx_init_debugfs_dir - Initializes debugfs + * + * @param dev : Pointer to synx device structure + */ +struct dentry *synx_init_debugfs_dir(struct synx_device *dev); + +/** + * synx_remove_debugfs_dir - Removes debugfs + * + * @param dev : Pointer to synx device structure + */ +void synx_remove_debugfs_dir(struct synx_device *dev); + +#endif /* __SYNX_DEBUGFS_H__ */ diff --git a/msm/synx/synx_err.h b/msm/synx/synx_err.h new file mode 100644 index 0000000000..58bfb561f7 --- /dev/null +++ b/msm/synx/synx_err.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SYNX_ERR_H__ +#define __SYNX_ERR_H__ + +#include + +/** + * Error codes returned from framework + * + * Return codes are mapped to platform specific + * return values. + */ +#define SYNX_SUCCESS 0 +#define SYNX_NOMEM ENOMEM +#define SYNX_NOSUPPORT EOPNOTSUPP +#define SYNX_NOPERM EPERM +#define SYNX_TIMEOUT ETIMEDOUT +#define SYNX_ALREADY EALREADY +#define SYNX_NOENT ENOENT +#define SYNX_INVALID EINVAL +#define SYNX_BUSY EBUSY + +#endif /* __SYNX_ERR_H__ */ diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c new file mode 100644 index 0000000000..4ebf1b9cf4 --- /dev/null +++ b/msm/synx/synx_global.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include + +#include "synx_debugfs.h" +#include "synx_global.h" + +static struct synx_shared_mem synx_gmem; +static struct hwspinlock *synx_hwlock; + +static u32 synx_gmem_lock_owner(u32 idx) +{ + /* + * subscribers field of global table index 0 is used to + * maintain synx gmem lock owner data. + * core updates the field after acquiring the lock and + * before releasing the lock appropriately. + */ + return synx_gmem.table[0].subscribers; +} + +static void synx_gmem_lock_owner_set(u32 idx) +{ + synx_gmem.table[0].subscribers = SYNX_CORE_APSS; +} + +static void synx_gmem_lock_owner_clear(u32 idx) +{ + if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS) + dprintk(SYNX_WARN, "reset lock owned by core %u\n", + synx_gmem.table[0].subscribers); + + synx_gmem.table[0].subscribers = SYNX_CORE_MAX; +} + +static int synx_gmem_lock(u32 idx, unsigned long *flags) +{ + int rc; + + if (!synx_hwlock) + return -SYNX_INVALID; + + rc = hwspin_lock_timeout_irqsave( + synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags); + if (!rc) + synx_gmem_lock_owner_set(idx); + + return rc; +} + +static void synx_gmem_unlock(u32 idx, unsigned long *flags) +{ + synx_gmem_lock_owner_clear(idx); + hwspin_unlock_irqrestore(synx_hwlock, flags); +} + +static void synx_global_print_data( + struct synx_global_coredata *synx_g_obj, + const char *func) +{ + int i = 0; + + dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u", + func, synx_g_obj->status, + synx_g_obj->handle, synx_g_obj->refcount); + + dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u", + func, synx_g_obj->subscribers, synx_g_obj->waiters, + synx_g_obj->num_child); + + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) + if (synx_g_obj->parents[i]) + dprintk(SYNX_VERB, "%s: parents %u:%u", + func, i, synx_g_obj->parents[i]); +} + +int synx_global_dump_shared_memory(void) +{ + int rc = SYNX_SUCCESS, idx; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_INVALID; + + /* Print bitmap memory*/ + for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) { + rc = synx_gmem_lock(idx, &flags); + + if (rc) + return rc; + + dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d", + __func__, idx, synx_gmem.bitmap[idx]); + + synx_gmem_unlock(idx, &flags); + } + + /* Print table memory*/ + for (idx = 0; + idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT; + idx++) { + rc = synx_gmem_lock(idx, &flags); + + if (rc) + return rc; + + dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx); + + synx_g_obj = &synx_gmem.table[idx]; + synx_global_print_data(synx_g_obj, __func__); + + synx_gmem_unlock(idx, &flags); + } + return rc; +} + +static int synx_gmem_init(void) +{ + if (!synx_gmem.table) + return -SYNX_NOMEM; + + synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID); + if (!synx_hwlock) { + dprintk(SYNX_ERR, "hwspinlock request failed\n"); + return -SYNX_NOMEM; + } + + /* zero idx not allocated for clients */ + ipclite_global_test_and_set_bit(0, + (ipclite_atomic_uint32_t *)synx_gmem.bitmap); + memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata)); + + return SYNX_SUCCESS; +} + +u32 synx_global_map_core_id(enum synx_core_id id) +{ + u32 host_id; + + switch (id) { + case SYNX_CORE_APSS: + host_id = IPCMEM_APPS; break; + case SYNX_CORE_NSP: + host_id = IPCMEM_CDSP; break; + case SYNX_CORE_IRIS: + host_id = IPCMEM_VPU; break; + case SYNX_CORE_EVA: + host_id = IPCMEM_CVP; break; + default: + host_id = IPCMEM_NUM_HOSTS; + dprintk(SYNX_ERR, "invalid core id\n"); + } + + return host_id; +} + +int synx_global_alloc_index(u32 *idx) +{ + int rc = SYNX_SUCCESS; + u32 prev, index; + const u32 size = SYNX_GLOBAL_MAX_OBJS; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (IS_ERR_OR_NULL(idx)) + return -SYNX_INVALID; + + do { + index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size); + if (index >= size) { + rc = -SYNX_NOMEM; + break; + } + prev = ipclite_global_test_and_set_bit(index % 32, + (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32)); + if ((prev & (1UL << (index % 32))) == 0) { + *idx = index; + dprintk(SYNX_MEM, "allocated global idx %u\n", *idx); + break; + } + } while (true); + + return rc; +} + +int synx_global_init_coredata(u32 h_synx) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (!synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + memset(synx_g_obj, 0, sizeof(*synx_g_obj)); + /* set status to active */ + synx_g_obj->status = SYNX_STATE_ACTIVE; + synx_g_obj->refcount = 1; + synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS); + synx_g_obj->handle = h_synx; + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + +static int synx_global_get_waiting_cores_locked( + struct synx_global_coredata *synx_g_obj, + bool *cores) +{ + int i; + + synx_global_print_data(synx_g_obj, __func__); + for (i = 0; i < SYNX_CORE_MAX; i++) { + if (synx_g_obj->waiters & (1UL << i)) { + cores[i] = true; + dprintk(SYNX_VERB, + "waiting for handle %u/n", + synx_g_obj->handle); + } + } + + /* clear waiter list so signals are not repeated */ + synx_g_obj->waiters = 0; + + return SYNX_SUCCESS; +} + +int synx_global_get_waiting_cores(u32 idx, bool *cores) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + synx_global_get_waiting_cores_locked(synx_g_obj, cores); + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + +int synx_global_set_waiting_core(u32 idx, enum synx_core_id id) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + synx_g_obj->waiters |= (1UL << id); + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + +int synx_global_get_subscribed_cores(u32 idx, bool *cores) +{ + int i; + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + for (i = 0; i < SYNX_CORE_MAX; i++) + if (synx_g_obj->subscribers & (1UL << i)) + cores[i] = true; + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + +int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + synx_g_obj->subscribers |= (1UL << id); + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + +u32 synx_global_get_parents_num(u32 idx) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + u32 i, count = 0; + + if (!synx_gmem.table) + return 0; + + if (!synx_is_valid_idx(idx)) + return 0; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + if (synx_g_obj->parents[i] != 0) + count++; + } + synx_gmem_unlock(idx, &flags); + + return count; +} + +static int synx_global_get_parents_locked( + struct synx_global_coredata *synx_g_obj, u32 *parents) +{ + u32 i; + + if (!synx_g_obj || !parents) + return -SYNX_NOMEM; + + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) + parents[i] = synx_g_obj->parents[i]; + + return SYNX_SUCCESS; +} + +int synx_global_get_parents(u32 idx, u32 *parents) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table || !parents) + return -SYNX_NOMEM; + + if (!synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + rc = synx_global_get_parents_locked(synx_g_obj, parents); + synx_gmem_unlock(idx, &flags); + + return rc; +} + +u32 synx_global_get_status(u32 idx) +{ + int rc; + unsigned long flags; + u32 status; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return 0; + + if (!synx_is_valid_idx(idx)) + return 0; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + status = synx_g_obj->status; + synx_gmem_unlock(idx, &flags); + + return status; +} + +u32 synx_global_test_status_set_wait(u32 idx, + enum synx_core_id id) +{ + int rc; + unsigned long flags; + u32 status; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return 0; + + if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx)) + return 0; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return 0; + synx_g_obj = &synx_gmem.table[idx]; + synx_global_print_data(synx_g_obj, __func__); + status = synx_g_obj->status; + /* if handle is still ACTIVE */ + if (status == SYNX_STATE_ACTIVE) + synx_g_obj->waiters |= (1UL << id); + else + dprintk(SYNX_DBG, "handle %u already signaled %u", + synx_g_obj->handle, synx_g_obj->status); + synx_gmem_unlock(idx, &flags); + + return status; +} + +static int synx_global_update_status_core(u32 idx, + u32 status) +{ + u32 i, p_idx; + int rc; + bool clear = false; + unsigned long flags; + uint64_t data; + struct synx_global_coredata *synx_g_obj; + u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0}; + bool wait_cores[SYNX_CORE_MAX] = {false}; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + synx_global_print_data(synx_g_obj, __func__); + /* prepare for cross core signaling */ + data = synx_g_obj->handle; + data <<= 32; + if (synx_g_obj->num_child != 0) { + /* composite handle */ + synx_g_obj->num_child--; + if (synx_g_obj->num_child == 0) { + if (synx_g_obj->status == SYNX_STATE_ACTIVE) { + synx_g_obj->status = + (status == SYNX_STATE_SIGNALED_SUCCESS) ? + SYNX_STATE_SIGNALED_SUCCESS : SYNX_STATE_SIGNALED_ERROR; + data |= synx_g_obj->status; + synx_global_get_waiting_cores_locked(synx_g_obj, + wait_cores); + synx_global_get_parents_locked(synx_g_obj, h_parents); + } else { + data = 0; + dprintk(SYNX_WARN, + "merged handle %u already in state %u\n", + synx_g_obj->handle, synx_g_obj->status); + } + /* release ref held by constituting handles */ + synx_g_obj->refcount--; + if (synx_g_obj->refcount == 0) { + memset(synx_g_obj, 0, + sizeof(*synx_g_obj)); + clear = true; + } + } else if (status != SYNX_STATE_SIGNALED_SUCCESS) { + synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR; + data |= synx_g_obj->status; + synx_global_get_waiting_cores_locked(synx_g_obj, + wait_cores); + synx_global_get_parents_locked(synx_g_obj, h_parents); + dprintk(SYNX_WARN, + "merged handle %u signaled with error state\n", + synx_g_obj->handle); + } else { + /* pending notification from handles */ + data = 0; + dprintk(SYNX_DBG, + "Child notified parent handle %u, pending %u\n", + synx_g_obj->handle, synx_g_obj->num_child); + } + } else { + synx_g_obj->status = status; + data |= synx_g_obj->status; + synx_global_get_waiting_cores_locked(synx_g_obj, + wait_cores); + synx_global_get_parents_locked(synx_g_obj, h_parents); + } + synx_gmem_unlock(idx, &flags); + + if (clear) { + ipclite_global_test_and_clear_bit(idx%32, + (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32)); + dprintk(SYNX_MEM, + "cleared global idx %u\n", idx); + } + + /* notify waiting clients on signal */ + if (data) { + /* notify wait client */ + for (i = 1; i < SYNX_CORE_MAX; i++) { + if (!wait_cores[i]) + continue; + dprintk(SYNX_DBG, + "invoking ipc signal handle %u, status %u\n", + synx_g_obj->handle, synx_g_obj->status); + if (ipclite_msg_send( + synx_global_map_core_id(i), + data)) + dprintk(SYNX_ERR, + "ipc signaling %llu to core %u failed\n", + data, i); + } + } + + /* handle parent notifications */ + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + p_idx = h_parents[i]; + if (p_idx == 0) + continue; + synx_global_update_status_core(p_idx, status); + } + + return SYNX_SUCCESS; +} + +int synx_global_update_status(u32 idx, u32 status) +{ + int rc = -SYNX_INVALID; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + if (synx_g_obj->num_child != 0) { + /* composite handle cannot be signaled */ + goto fail; + } else if (synx_g_obj->status != SYNX_STATE_ACTIVE) { + rc = -SYNX_ALREADY; + goto fail; + } + synx_gmem_unlock(idx, &flags); + + return synx_global_update_status_core(idx, status); + +fail: + synx_gmem_unlock(idx, &flags); + return rc; +} + +int synx_global_get_ref(u32 idx) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (!synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + synx_global_print_data(synx_g_obj, __func__); + if (synx_g_obj->handle && synx_g_obj->refcount) + synx_g_obj->refcount++; + else + rc = -SYNX_NOENT; + synx_gmem_unlock(idx, &flags); + + return rc; +} + +void synx_global_put_ref(u32 idx) +{ + int rc; + bool clear = false; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return; + + if (!synx_is_valid_idx(idx)) + return; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return; + synx_g_obj = &synx_gmem.table[idx]; + synx_g_obj->refcount--; + if (synx_g_obj->refcount == 0) { + memset(synx_g_obj, 0, sizeof(*synx_g_obj)); + clear = true; + } + synx_gmem_unlock(idx, &flags); + + if (clear) { + ipclite_global_test_and_clear_bit(idx%32, + (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32)); + dprintk(SYNX_MEM, "cleared global idx %u\n", idx); + } +} + +int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx) +{ + int rc = -SYNX_INVALID; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + u32 i, j = 0; + u32 idx; + bool sig_error = false; + u32 num_child = 0; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (!synx_is_valid_idx(p_idx)) + return -SYNX_INVALID; + + while (j < num_list) { + idx = idx_list[j]; + + if (!synx_is_valid_idx(idx)) + goto fail; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + goto fail; + + synx_g_obj = &synx_gmem.table[idx]; + if (synx_g_obj->status == SYNX_STATE_ACTIVE) { + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + if (synx_g_obj->parents[i] == 0) { + synx_g_obj->parents[i] = p_idx; + break; + } + } + num_child++; + } else if (synx_g_obj->status > + SYNX_STATE_SIGNALED_SUCCESS) { + sig_error = true; + } + synx_gmem_unlock(idx, &flags); + + if (i >= SYNX_GLOBAL_MAX_PARENTS) { + rc = -SYNX_NOMEM; + goto fail; + } + + j++; + } + + rc = synx_gmem_lock(p_idx, &flags); + if (rc) + goto fail; + synx_g_obj = &synx_gmem.table[p_idx]; + synx_g_obj->num_child += num_child; + if (sig_error) + synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR; + else if (synx_g_obj->num_child != 0) + synx_g_obj->refcount++; + else if (synx_g_obj->num_child == 0 && + synx_g_obj->status == SYNX_STATE_ACTIVE) + synx_g_obj->status = SYNX_STATE_SIGNALED_SUCCESS; + synx_global_print_data(synx_g_obj, __func__); + synx_gmem_unlock(p_idx, &flags); + + return SYNX_SUCCESS; + +fail: + while (num_child--) { + idx = idx_list[num_child]; + + if (synx_gmem_lock(idx, &flags)) + continue; + synx_g_obj = &synx_gmem.table[idx]; + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + if (synx_g_obj->parents[i] == p_idx) { + synx_g_obj->parents[i] = 0; + break; + } + } + synx_gmem_unlock(idx, &flags); + } + + return rc; +} + +int synx_global_recover(enum synx_core_id core_id) +{ + int rc = SYNX_SUCCESS; + u32 idx = 0; + const u32 size = SYNX_GLOBAL_MAX_OBJS; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + bool update; + int *clear_idx = NULL; + if (!synx_gmem.table) + return -SYNX_NOMEM; + + clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL); + + if (!clear_idx) + return -SYNX_NOMEM; + + ipclite_hwlock_reset(synx_global_map_core_id(core_id)); + + /* recover synx gmem lock if it was owned by core in ssr */ + if (synx_gmem_lock_owner(0) == core_id) { + synx_gmem_lock_owner_clear(0); + hwspin_unlock_raw(synx_hwlock); + } + + idx = find_next_bit((unsigned long *)synx_gmem.bitmap, + size, idx + 1); + while (idx < size) { + update = false; + rc = synx_gmem_lock(idx, &flags); + if (rc) + goto free; + synx_g_obj = &synx_gmem.table[idx]; + if (synx_g_obj->refcount && + synx_g_obj->subscribers & (1UL << core_id)) { + synx_g_obj->subscribers &= ~(1UL << core_id); + synx_g_obj->refcount--; + if (synx_g_obj->refcount == 0) { + memset(synx_g_obj, 0, sizeof(*synx_g_obj)); + clear_idx[idx] = 1; + } else if (synx_g_obj->status == SYNX_STATE_ACTIVE) { + update = true; + } + } + synx_gmem_unlock(idx, &flags); + if (update) + synx_global_update_status(idx, + SYNX_STATE_SIGNALED_SSR); + idx = find_next_bit((unsigned long *)synx_gmem.bitmap, + size, idx + 1); + } + + for (idx = 1; idx < size; idx++) { + if (clear_idx[idx]) { + ipclite_global_test_and_clear_bit(idx % 32, + (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32)); + dprintk(SYNX_MEM, "released global idx %u\n", idx); + } + } +free: + kfree(clear_idx); + + return rc; +} + +int synx_global_mem_init(void) +{ + int rc; + int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32; + struct global_region_info mem_info; + + rc = get_global_partition_info(&mem_info); + if (rc) { + dprintk(SYNX_ERR, "error setting up global shared memory\n"); + return rc; + } + + memset(mem_info.virt_base, 0, mem_info.size); + dprintk(SYNX_DBG, "global shared memory %pK size %u\n", + mem_info.virt_base, mem_info.size); + + synx_gmem.bitmap = (u32 *)mem_info.virt_base; + synx_gmem.locks = synx_gmem.bitmap + bitmap_size; + synx_gmem.table = + (struct synx_global_coredata *)(synx_gmem.locks + 2); + dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n", + synx_gmem.bitmap, synx_gmem.table); + + return synx_gmem_init(); +} diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h new file mode 100644 index 0000000000..4e7646cd4d --- /dev/null +++ b/msm/synx/synx_global.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SYNX_SHARED_MEM_H__ +#define __SYNX_SHARED_MEM_H__ + +#include "synx_err.h" +#include "ipclite_client.h" + +#include + +/** + * enum synx_core_id - Synx core IDs + * + * SYNX_CORE_APSS : APSS core + * SYNX_CORE_NSP : NSP core + * SYNX_CORE_EVA : EVA core + * SYNX_CORE_IRIS : IRIS core + */ +enum synx_core_id { + SYNX_CORE_APSS = 0, + SYNX_CORE_NSP, + SYNX_CORE_EVA, + SYNX_CORE_IRIS, + SYNX_CORE_MAX, +}; + +/* synx handle encoding */ +#define SYNX_HANDLE_INDEX_BITS 16 +#define SYNX_HANDLE_CORE_BITS 4 +#define SYNX_HANDLE_GLOBAL_FLAG_BIT 1 + +#define SYNX_GLOBAL_SHARED_LOCKS 1 +#define SYNX_GLOBAL_MAX_OBJS 4096 +#define SYNX_GLOBAL_MAX_PARENTS 4 + +#define SYNX_HANDLE_INDEX_MASK ((1UL< +#include +#include +#include +#include +#include +#include + +#include "synx_api.h" +#include "synx_global.h" + +#define SYNX_MAX_OBJS SYNX_GLOBAL_MAX_OBJS + +#define SYNX_NAME "synx" +#define SYNX_DEVICE_NAME "synx_device" +#define SYNX_WQ_CB_NAME "hiprio_synx_cb_queue" +#define SYNX_WQ_CB_THREADS 4 +#define SYNX_WQ_CLEANUP_NAME "hiprio_synx_cleanup_queue" +#define SYNX_WQ_CLEANUP_THREADS 2 +#define SYNX_MAX_NUM_BINDINGS 8 + +#define SYNX_OBJ_HANDLE_SHIFT SYNX_HANDLE_INDEX_BITS +#define SYNX_OBJ_CORE_ID_SHIFT (SYNX_OBJ_HANDLE_SHIFT+SYNX_HANDLE_CORE_BITS) +#define SYNX_OBJ_GLOBAL_FLAG_SHIFT (SYNX_OBJ_CORE_ID_SHIFT+SYNX_HANDLE_GLOBAL_FLAG_BIT) + +#define SYNX_OBJ_HANDLE_MASK GENMASK_ULL(SYNX_OBJ_HANDLE_SHIFT-1, 0) +#define SYNX_OBJ_CORE_ID_MASK GENMASK_ULL(SYNX_OBJ_CORE_ID_SHIFT-1, SYNX_OBJ_HANDLE_SHIFT) +#define SYNX_OBJ_GLOBAL_FLAG_MASK \ + GENMASK_ULL(SYNX_OBJ_GLOBAL_FLAG_SHIFT-1, SYNX_OBJ_CORE_ID_SHIFT) + +#define MAX_TIMESTAMP_SIZE 32 +#define SYNX_OBJ_NAME_LEN 64 + +#define SYNX_PAYLOAD_WORDS 4 + +#define SYNX_CREATE_IM_EX_RELEASE SYNX_CREATE_MAX_FLAGS +#define SYNX_CREATE_MERGED_FENCE (SYNX_CREATE_MAX_FLAGS << 1) + +#define SYNX_MAX_REF_COUNTS 100 + +struct synx_bind_desc { + struct synx_external_desc_v2 external_desc; + void *external_data; +}; + +struct error_node { + char timestamp[32]; + u64 session; + u32 client_id; + u32 h_synx; + s32 error_code; + struct list_head node; +}; + +struct synx_entry_32 { + u32 key; + void *data; + struct hlist_node node; +}; + +struct synx_entry_64 { + u64 key; + u32 data[2]; + struct kref refcount; + struct hlist_node node; +}; + +struct synx_map_entry { + struct synx_coredata *synx_obj; + struct kref refcount; + u32 flags; + u32 key; + struct work_struct dispatch; + struct hlist_node node; +}; + +struct synx_fence_entry { + u32 g_handle; + u32 l_handle; + u64 key; + struct hlist_node node; +}; + +struct synx_kernel_payload { + u32 h_synx; + u32 status; + void *data; + synx_user_callback_t cb_func; + synx_user_callback_t cancel_cb_func; +}; + +struct synx_cb_data { + struct synx_session *session; + u32 idx; + u32 status; + struct work_struct cb_dispatch; + struct list_head node; +}; + +struct synx_client_cb { + bool is_valid; + u32 idx; + struct synx_client *client; + struct synx_kernel_payload kernel_cb; + struct list_head node; +}; + +struct synx_registered_ops { + char name[SYNX_OBJ_NAME_LEN]; + struct bind_operations ops; + enum synx_bind_client_type type; + bool valid; +}; + +struct synx_cleanup_cb { + void *data; + struct work_struct cb_dispatch; +}; + +enum synx_signal_handler { + SYNX_SIGNAL_FROM_CLIENT = 0x1, + SYNX_SIGNAL_FROM_FENCE = 0x2, + SYNX_SIGNAL_FROM_IPC = 0x4, + SYNX_SIGNAL_FROM_CALLBACK = 0x8, +}; + +struct synx_signal_cb { + u32 handle; + u32 status; + u64 ext_sync_id; + struct synx_coredata *synx_obj; + enum synx_signal_handler flag; + struct dma_fence_cb fence_cb; + struct work_struct cb_dispatch; +}; + +struct synx_coredata { + char name[SYNX_OBJ_NAME_LEN]; + struct dma_fence *fence; + struct mutex obj_lock; + struct kref refcount; + u32 type; + u32 num_bound_synxs; + struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS]; + struct list_head reg_cbs_list; + u32 global_idx; + u32 map_count; + struct synx_signal_cb *signal_cb; +}; + +struct synx_client; +struct synx_device; + +struct synx_handle_coredata { + struct synx_client *client; + struct synx_coredata *synx_obj; + void *map_entry; + struct kref refcount; + u32 key; + u32 rel_count; + struct work_struct dispatch; + struct hlist_node node; +}; + +struct synx_client { + u32 type; + bool active; + struct synx_device *device; + char name[SYNX_OBJ_NAME_LEN]; + u64 id; + u64 dma_context; + struct kref refcount; + struct mutex event_q_lock; + struct list_head event_q; + wait_queue_head_t event_wq; + DECLARE_BITMAP(cb_bitmap, SYNX_MAX_OBJS); + struct synx_client_cb cb_table[SYNX_MAX_OBJS]; + DECLARE_HASHTABLE(handle_map, 8); + spinlock_t handle_map_lock; + struct work_struct dispatch; + struct hlist_node node; +}; + +struct synx_native { + spinlock_t metadata_map_lock; + DECLARE_HASHTABLE(client_metadata_map, 8); + spinlock_t fence_map_lock; + DECLARE_HASHTABLE(fence_map, 10); + spinlock_t global_map_lock; + DECLARE_HASHTABLE(global_map, 10); + spinlock_t local_map_lock; + DECLARE_HASHTABLE(local_map, 8); + spinlock_t csl_map_lock; + DECLARE_HASHTABLE(csl_fence_map, 8); + DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS); +}; + +struct synx_cdsp_ssr { + u64 ssrcnt; + void *handle; + struct notifier_block nb; +}; + +struct synx_device { + struct cdev cdev; + dev_t dev; + struct class *class; + struct synx_native *native; + struct workqueue_struct *wq_cb; + struct workqueue_struct *wq_cleanup; + struct mutex vtbl_lock; + struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES]; + struct dentry *debugfs_root; + struct list_head error_list; + struct mutex error_lock; + struct synx_cdsp_ssr cdsp_ssr; +}; + +int synx_signal_core(struct synx_coredata *synx_obj, + u32 status, + bool cb_signal, + s32 ext_sync_id); + +int synx_ipc_callback(uint32_t client_id, + int64_t data, void *priv); + +void synx_signal_handler(struct work_struct *cb_dispatch); + +int synx_native_release_core(struct synx_client *session, + u32 h_synx); + +int synx_bind(struct synx_session *session, + u32 h_synx, + struct synx_external_desc_v2 external_sync); + +#endif /* __SYNX_PRIVATE_H__ */ diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c new file mode 100644 index 0000000000..689a571c42 --- /dev/null +++ b/msm/synx/synx_util.c @@ -0,0 +1,1525 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "synx_debugfs.h" +#include "synx_util.h" + +extern void synx_external_callback(s32 sync_obj, int status, void *data); + +int synx_util_init_coredata(struct synx_coredata *synx_obj, + struct synx_create_params *params, + struct dma_fence_ops *ops, + u64 dma_context) +{ + int rc = -SYNX_INVALID; + spinlock_t *fence_lock; + struct dma_fence *fence; + struct synx_fence_entry *entry; + + if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx)) + return -SYNX_INVALID; + + if (params->flags & SYNX_CREATE_GLOBAL_FENCE && + *params->h_synx != 0) { + rc = synx_global_get_ref( + synx_util_global_idx(*params->h_synx)); + synx_obj->global_idx = synx_util_global_idx(*params->h_synx); + } else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) { + rc = synx_alloc_global_handle(params->h_synx); + synx_obj->global_idx = synx_util_global_idx(*params->h_synx); + } else { + rc = synx_alloc_local_handle(params->h_synx); + } + + if (rc != SYNX_SUCCESS) + return rc; + + synx_obj->map_count = 1; + synx_obj->num_bound_synxs = 0; + synx_obj->type |= params->flags; + kref_init(&synx_obj->refcount); + mutex_init(&synx_obj->obj_lock); + INIT_LIST_HEAD(&synx_obj->reg_cbs_list); + if (params->name) + strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name)); + + if (params->flags & SYNX_CREATE_DMA_FENCE) { + fence = params->fence; + if (IS_ERR_OR_NULL(fence)) { + dprintk(SYNX_ERR, "invalid external fence\n"); + goto free; + } + + dma_fence_get(fence); + synx_obj->fence = fence; + } else { + /* + * lock and fence memory will be released in fence + * release function + */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + if (IS_ERR_OR_NULL(fence_lock)) { + rc = -SYNX_NOMEM; + goto free; + } + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (IS_ERR_OR_NULL(fence)) { + kfree(fence_lock); + rc = -SYNX_NOMEM; + goto free; + } + + spin_lock_init(fence_lock); + dma_fence_init(fence, ops, fence_lock, dma_context, 1); + + synx_obj->fence = fence; + synx_util_activate(synx_obj); + dprintk(SYNX_MEM, + "allocated backing fence %pK\n", fence); + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (IS_ERR_OR_NULL(entry)) { + rc = -SYNX_NOMEM; + goto clean; + } + + entry->key = (u64)fence; + if (params->flags & SYNX_CREATE_GLOBAL_FENCE) + entry->g_handle = *params->h_synx; + else + entry->l_handle = *params->h_synx; + + rc = synx_util_insert_fence_entry(entry, + params->h_synx, + params->flags & SYNX_CREATE_GLOBAL_FENCE); + BUG_ON(rc != SYNX_SUCCESS); + } + + if (rc != SYNX_SUCCESS) + goto clean; + + return SYNX_SUCCESS; + +clean: + dma_fence_put(fence); +free: + if (params->flags & SYNX_CREATE_GLOBAL_FENCE) + synx_global_put_ref( + synx_util_global_idx(*params->h_synx)); + else + clear_bit(synx_util_global_idx(*params->h_synx), + synx_dev->native->bitmap); + + return rc; +} + +int synx_util_add_callback(struct synx_coredata *synx_obj, + u32 h_synx) +{ + int rc; + struct synx_signal_cb *signal_cb; + + if (IS_ERR_OR_NULL(synx_obj)) + return -SYNX_INVALID; + + signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL); + if (IS_ERR_OR_NULL(signal_cb)) + return -SYNX_NOMEM; + + signal_cb->handle = h_synx; + signal_cb->flag = SYNX_SIGNAL_FROM_FENCE; + signal_cb->synx_obj = synx_obj; + + /* get reference on synx coredata for signal cb */ + synx_util_get_object(synx_obj); + + /* + * adding callback enables synx framework to + * get notified on signal from clients using + * native dma fence operations. + */ + rc = dma_fence_add_callback(synx_obj->fence, + &signal_cb->fence_cb, synx_fence_callback); + if (rc != 0) { + if (rc == -ENOENT) { + if (synx_util_is_global_object(synx_obj)) { + /* signal (if) global handle */ + rc = synx_global_update_status( + synx_obj->global_idx, + synx_util_get_object_status(synx_obj)); + if (rc != SYNX_SUCCESS) + dprintk(SYNX_ERR, + "status update of %u with fence %pK\n", + synx_obj->global_idx, synx_obj->fence); + } else { + rc = SYNX_SUCCESS; + } + } else { + dprintk(SYNX_ERR, + "error adding callback for %pK err %d\n", + synx_obj->fence, rc); + } + synx_util_put_object(synx_obj); + kfree(signal_cb); + return rc; + } + + synx_obj->signal_cb = signal_cb; + dprintk(SYNX_VERB, "added callback %pK to fence %pK\n", + signal_cb, synx_obj->fence); + + return SYNX_SUCCESS; +} + +int synx_util_init_group_coredata(struct synx_coredata *synx_obj, + struct dma_fence **fences, + struct synx_merge_params *params, + u32 num_objs, + u64 dma_context) +{ + int rc; + struct dma_fence_array *array; + + if (IS_ERR_OR_NULL(synx_obj)) + return -SYNX_INVALID; + + if (params->flags & SYNX_MERGE_GLOBAL_FENCE) { + rc = synx_alloc_global_handle(params->h_merged_obj); + synx_obj->global_idx = + synx_util_global_idx(*params->h_merged_obj); + } else { + rc = synx_alloc_local_handle(params->h_merged_obj); + } + + if (rc != SYNX_SUCCESS) + return rc; + + array = dma_fence_array_create(num_objs, fences, + dma_context, 1, false); + if (IS_ERR_OR_NULL(array)) + return -SYNX_INVALID; + + synx_obj->fence = &array->base; + synx_obj->map_count = 1; + synx_obj->type = params->flags; + synx_obj->type |= SYNX_CREATE_MERGED_FENCE; + synx_obj->num_bound_synxs = 0; + kref_init(&synx_obj->refcount); + mutex_init(&synx_obj->obj_lock); + INIT_LIST_HEAD(&synx_obj->reg_cbs_list); + + synx_util_activate(synx_obj); + return rc; +} + +static void synx_util_destroy_coredata(struct kref *kref) +{ + struct synx_coredata *synx_obj = + container_of(kref, struct synx_coredata, refcount); + + if (synx_util_is_global_object(synx_obj)) + synx_global_put_ref(synx_obj->global_idx); + synx_util_object_destroy(synx_obj); +} + +void synx_util_get_object(struct synx_coredata *synx_obj) +{ + kref_get(&synx_obj->refcount); +} + +void synx_util_put_object(struct synx_coredata *synx_obj) +{ + kref_put(&synx_obj->refcount, synx_util_destroy_coredata); +} + +void synx_util_object_destroy(struct synx_coredata *synx_obj) +{ + int rc; + u32 i; + s32 sync_id; + u32 type; + struct synx_cb_data *synx_cb, *synx_cb_temp; + struct synx_bind_desc *bind_desc; + struct bind_operations *bind_ops; + struct synx_external_data *data; + + /* clear all the undispatched callbacks */ + list_for_each_entry_safe(synx_cb, + synx_cb_temp, &synx_obj->reg_cbs_list, node) { + dprintk(SYNX_ERR, + "cleaning up callback of session %pK\n", + synx_cb->session); + list_del_init(&synx_cb->node); + kfree(synx_cb); + } + + for (i = 0; i < synx_obj->num_bound_synxs; i++) { + bind_desc = &synx_obj->bound_synxs[i]; + sync_id = bind_desc->external_desc.id; + type = bind_desc->external_desc.type; + data = bind_desc->external_data; + bind_ops = synx_util_get_bind_ops(type); + if (IS_ERR_OR_NULL(bind_ops)) { + dprintk(SYNX_ERR, + "bind ops fail id: %d, type: %u, err: %d\n", + sync_id, type, rc); + continue; + } + + /* clear the hash table entry */ + synx_util_remove_data(&sync_id, type); + + rc = bind_ops->deregister_callback( + synx_external_callback, data, sync_id); + if (rc < 0) { + dprintk(SYNX_ERR, + "de-registration fail id: %d, type: %u, err: %d\n", + sync_id, type, rc); + continue; + } + + /* + * release the memory allocated for external data. + * It is safe to release this memory + * only if deregistration is successful. + */ + kfree(data); + } + + mutex_destroy(&synx_obj->obj_lock); + synx_util_release_fence_entry((u64)synx_obj->fence); + dma_fence_put(synx_obj->fence); + kfree(synx_obj); + dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj); +} + +long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size) +{ + bool bit; + long idx; + + do { + idx = find_first_zero_bit(bitmap, size); + if (idx >= size) + break; + bit = test_and_set_bit(idx, bitmap); + } while (bit); + + return idx; +} + +u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx) +{ + u32 handle = 0; + + if (idx >= SYNX_MAX_OBJS) + return 0; + + if (global_idx) { + handle = 1; + handle <<= SYNX_HANDLE_CORE_BITS; + } + + handle |= core_id; + handle <<= SYNX_HANDLE_INDEX_BITS; + handle |= idx; + + return handle; +} + +int synx_alloc_global_handle(u32 *new_synx) +{ + int rc; + u32 idx; + + rc = synx_global_alloc_index(&idx); + if (rc != SYNX_SUCCESS) + return rc; + + *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true); + dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n", + *new_synx, *new_synx); + + rc = synx_global_init_coredata(*new_synx); + return rc; +} + +int synx_alloc_local_handle(u32 *new_synx) +{ + u32 idx; + + idx = synx_util_get_free_handle(synx_dev->native->bitmap, + SYNX_MAX_OBJS); + if (idx >= SYNX_MAX_OBJS) + return -SYNX_NOMEM; + + *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false); + dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n", + *new_synx, *new_synx); + + return SYNX_SUCCESS; +} + +int synx_util_init_handle(struct synx_client *client, + struct synx_coredata *synx_obj, u32 *new_h_synx, + void *map_entry) +{ + int rc = SYNX_SUCCESS; + bool found = false; + struct synx_handle_coredata *synx_data, *curr; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) || + IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry)) + return -SYNX_INVALID; + + synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC); + if (IS_ERR_OR_NULL(synx_data)) + return -SYNX_NOMEM; + + synx_data->client = client; + synx_data->synx_obj = synx_obj; + synx_data->key = *new_h_synx; + synx_data->map_entry = map_entry; + kref_init(&synx_data->refcount); + synx_data->rel_count = 1; + + spin_lock_bh(&client->handle_map_lock); + hash_for_each_possible(client->handle_map, + curr, node, *new_h_synx) { + if (curr->key == *new_h_synx) { + if (curr->synx_obj != synx_obj) { + rc = -SYNX_INVALID; + dprintk(SYNX_ERR, + "inconsistent data in handle map\n"); + } else { + kref_get(&curr->refcount); + curr->rel_count++; + } + found = true; + break; + } + } + if (unlikely(found)) + kfree(synx_data); + else + hash_add(client->handle_map, + &synx_data->node, *new_h_synx); + spin_unlock_bh(&client->handle_map_lock); + + return rc; +} + +int synx_util_activate(struct synx_coredata *synx_obj) +{ + if (IS_ERR_OR_NULL(synx_obj)) + return -SYNX_INVALID; + + /* move synx to ACTIVE state and register cb for merged object */ + dma_fence_enable_sw_signaling(synx_obj->fence); + return 0; +} + +static u32 synx_util_get_references(struct synx_coredata *synx_obj) +{ + u32 count = 0; + u32 i = 0; + struct dma_fence_array *array = NULL; + + /* obtain dma fence reference */ + if (dma_fence_is_array(synx_obj->fence)) { + array = to_dma_fence_array(synx_obj->fence); + if (IS_ERR_OR_NULL(array)) + return 0; + + for (i = 0; i < array->num_fences; i++) + dma_fence_get(array->fences[i]); + count = array->num_fences; + } else { + dma_fence_get(synx_obj->fence); + count = 1; + } + + return count; +} + +static void synx_util_put_references(struct synx_coredata *synx_obj) +{ + u32 i = 0; + struct dma_fence_array *array = NULL; + + if (dma_fence_is_array(synx_obj->fence)) { + array = to_dma_fence_array(synx_obj->fence); + if (IS_ERR_OR_NULL(array)) + return; + + for (i = 0; i < array->num_fences; i++) + dma_fence_put(array->fences[i]); + } else { + dma_fence_put(synx_obj->fence); + } +} + +static u32 synx_util_add_fence(struct synx_coredata *synx_obj, + struct dma_fence **fences, + u32 idx) +{ + struct dma_fence_array *array = NULL; + u32 i = 0; + + if (dma_fence_is_array(synx_obj->fence)) { + array = to_dma_fence_array(synx_obj->fence); + if (IS_ERR_OR_NULL(array)) + return 0; + + for (i = 0; i < array->num_fences; i++) + fences[idx+i] = array->fences[i]; + + return array->num_fences; + } + + fences[idx] = synx_obj->fence; + return 1; +} + +static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num) +{ + int i, j; + u32 wr_idx = 1; + + if (IS_ERR_OR_NULL(arr)) { + dprintk(SYNX_ERR, "invalid input array\n"); + return 0; + } + + for (i = 1; i < num; i++) { + for (j = 0; j < wr_idx ; j++) { + if (arr[i] == arr[j]) { + /* release reference obtained for duplicate */ + dprintk(SYNX_DBG, + "releasing duplicate reference\n"); + dma_fence_put(arr[i]); + break; + } + } + if (j == wr_idx) + arr[wr_idx++] = arr[i]; + } + + return wr_idx; +} + +s32 synx_util_merge_error(struct synx_client *client, + u32 *h_synxs, + u32 num_objs) +{ + u32 i = 0; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs)) + return -SYNX_INVALID; + + for (i = 0; i < num_objs; i++) { + synx_data = synx_util_acquire_handle(client, h_synxs[i]); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj) || + IS_ERR_OR_NULL(synx_obj->fence)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle %d in cleanup\n", + client->id, h_synxs[i]); + continue; + } + /* release all references obtained during merge validatation */ + synx_util_put_references(synx_obj); + synx_util_release_handle(synx_data); + } + + return 0; +} + +int synx_util_validate_merge(struct synx_client *client, + u32 *h_synxs, + u32 num_objs, + struct dma_fence ***fence_list, + u32 *fence_cnt) +{ + u32 count = 0; + u32 i = 0; + struct synx_handle_coredata **synx_datas; + struct synx_coredata **synx_objs; + struct dma_fence **fences = NULL; + + if (num_objs <= 1) { + dprintk(SYNX_ERR, "single handle merge is not allowed\n"); + return -SYNX_INVALID; + } + + synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_datas)) + return -SYNX_NOMEM; + + synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_objs)) { + kfree(synx_datas); + return -SYNX_NOMEM; + } + + for (i = 0; i < num_objs; i++) { + synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]); + synx_objs[i] = synx_util_obtain_object(synx_datas[i]); + if (IS_ERR_OR_NULL(synx_objs[i]) || + IS_ERR_OR_NULL(synx_objs[i]->fence)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid handle %d in merge list\n", + client->id, h_synxs[i]); + *fence_cnt = i; + goto error; + } + count += synx_util_get_references(synx_objs[i]); + } + + fences = kcalloc(count, sizeof(*fences), GFP_KERNEL); + if (IS_ERR_OR_NULL(fences)) { + *fence_cnt = num_objs; + goto error; + } + + /* memory will be released later in the invoking function */ + *fence_list = fences; + count = 0; + + for (i = 0; i < num_objs; i++) { + count += synx_util_add_fence(synx_objs[i], fences, count); + /* release the reference obtained earlier in the function */ + synx_util_release_handle(synx_datas[i]); + } + + *fence_cnt = synx_util_remove_duplicates(fences, count); + kfree(synx_objs); + kfree(synx_datas); + return 0; + +error: + /* release the reference/s obtained earlier in the function */ + for (i = 0; i < *fence_cnt; i++) { + synx_util_put_references(synx_objs[i]); + synx_util_release_handle(synx_datas[i]); + } + *fence_cnt = 0; + kfree(synx_objs); + kfree(synx_datas); + return -SYNX_INVALID; +} + +static u32 __fence_state(struct dma_fence *fence, bool locked) +{ + s32 status; + u32 state = SYNX_STATE_INVALID; + + if (IS_ERR_OR_NULL(fence)) { + dprintk(SYNX_ERR, "invalid fence\n"); + return SYNX_STATE_INVALID; + } + + if (locked) + status = dma_fence_get_status_locked(fence); + else + status = dma_fence_get_status(fence); + + /* convert fence status to synx state */ + switch (status) { + case 0: + state = SYNX_STATE_ACTIVE; + break; + case 1: + state = SYNX_STATE_SIGNALED_SUCCESS; + break; + case -SYNX_STATE_SIGNALED_CANCEL: + state = SYNX_STATE_SIGNALED_CANCEL; + break; + case -SYNX_STATE_SIGNALED_EXTERNAL: + state = SYNX_STATE_SIGNALED_EXTERNAL; + break; + case -SYNX_STATE_SIGNALED_ERROR: + state = SYNX_STATE_SIGNALED_ERROR; + break; + default: + state = (u32)(-status); + } + + return state; +} + +static u32 __fence_group_state(struct dma_fence *fence, bool locked) +{ + u32 i = 0; + u32 state = SYNX_STATE_INVALID; + struct dma_fence_array *array = NULL; + u32 intr, actv_cnt, sig_cnt, err_cnt; + + if (IS_ERR_OR_NULL(fence)) { + dprintk(SYNX_ERR, "invalid fence\n"); + return SYNX_STATE_INVALID; + } + + actv_cnt = sig_cnt = err_cnt = 0; + array = to_dma_fence_array(fence); + if (IS_ERR_OR_NULL(array)) + return SYNX_STATE_INVALID; + + for (i = 0; i < array->num_fences; i++) { + intr = __fence_state(array->fences[i], locked); + switch (intr) { + case SYNX_STATE_ACTIVE: + actv_cnt++; + break; + case SYNX_STATE_SIGNALED_SUCCESS: + sig_cnt++; + break; + default: + err_cnt++; + } + } + + dprintk(SYNX_DBG, + "group cnt stats act:%u, sig: %u, err: %u\n", + actv_cnt, sig_cnt, err_cnt); + + if (err_cnt) + state = SYNX_STATE_SIGNALED_ERROR; + else if (actv_cnt) + state = SYNX_STATE_ACTIVE; + else if (sig_cnt == array->num_fences) + state = SYNX_STATE_SIGNALED_SUCCESS; + + return state; +} + +/* + * WARN: Should not hold the fence spinlock when invoking + * this function. Use synx_fence_state_locked instead + */ +u32 synx_util_get_object_status(struct synx_coredata *synx_obj) +{ + u32 state; + + if (IS_ERR_OR_NULL(synx_obj)) + return SYNX_STATE_INVALID; + + if (synx_util_is_merged_object(synx_obj)) + state = __fence_group_state(synx_obj->fence, false); + else + state = __fence_state(synx_obj->fence, false); + + return state; +} + +/* use this for status check when holding on to metadata spinlock */ +u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj) +{ + u32 state; + + if (IS_ERR_OR_NULL(synx_obj)) + return SYNX_STATE_INVALID; + + if (synx_util_is_merged_object(synx_obj)) + state = __fence_group_state(synx_obj->fence, true); + else + state = __fence_state(synx_obj->fence, true); + + return state; +} + +struct synx_handle_coredata *synx_util_acquire_handle( + struct synx_client *client, u32 h_synx) +{ + struct synx_handle_coredata *synx_data = NULL; + struct synx_handle_coredata *synx_handle = + ERR_PTR(-SYNX_NOENT); + + if (IS_ERR_OR_NULL(client)) + return ERR_PTR(-SYNX_INVALID); + + spin_lock_bh(&client->handle_map_lock); + hash_for_each_possible(client->handle_map, + synx_data, node, h_synx) { + if (synx_data->key == h_synx && + synx_data->rel_count != 0) { + kref_get(&synx_data->refcount); + synx_handle = synx_data; + break; + } + } + spin_unlock_bh(&client->handle_map_lock); + + return synx_handle; +} + +struct synx_map_entry *synx_util_insert_to_map( + struct synx_coredata *synx_obj, + u32 h_synx, u32 flags) +{ + struct synx_map_entry *map_entry; + + map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL); + if (IS_ERR_OR_NULL(map_entry)) + return ERR_PTR(-SYNX_NOMEM); + + kref_init(&map_entry->refcount); + map_entry->synx_obj = synx_obj; + map_entry->flags = flags; + map_entry->key = h_synx; + + if (synx_util_is_global_handle(h_synx)) { + spin_lock_bh(&synx_dev->native->global_map_lock); + hash_add(synx_dev->native->global_map, + &map_entry->node, h_synx); + spin_unlock_bh(&synx_dev->native->global_map_lock); + dprintk(SYNX_MEM, + "added handle %u to global map %pK\n", + h_synx, map_entry); + } else { + spin_lock_bh(&synx_dev->native->local_map_lock); + hash_add(synx_dev->native->local_map, + &map_entry->node, h_synx); + spin_unlock_bh(&synx_dev->native->local_map_lock); + dprintk(SYNX_MEM, + "added handle %u to local map %pK\n", + h_synx, map_entry); + } + + return map_entry; +} + +struct synx_map_entry *synx_util_get_map_entry(u32 h_synx) +{ + struct synx_map_entry *curr; + struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT); + + if (h_synx == 0) + return ERR_PTR(-SYNX_INVALID); + + if (synx_util_is_global_handle(h_synx)) { + spin_lock_bh(&synx_dev->native->global_map_lock); + hash_for_each_possible(synx_dev->native->global_map, + curr, node, h_synx) { + if (curr->key == h_synx) { + kref_get(&curr->refcount); + map_entry = curr; + break; + } + } + spin_unlock_bh(&synx_dev->native->global_map_lock); + } else { + spin_lock_bh(&synx_dev->native->local_map_lock); + hash_for_each_possible(synx_dev->native->local_map, + curr, node, h_synx) { + if (curr->key == h_synx) { + kref_get(&curr->refcount); + map_entry = curr; + break; + } + } + spin_unlock_bh(&synx_dev->native->local_map_lock); + } + + /* should we allocate if entry not found? */ + return map_entry; +} + +static void synx_util_cleanup_fence( + struct synx_coredata *synx_obj) +{ + struct synx_signal_cb *signal_cb; + unsigned long flags; + u32 g_status; + u32 f_status; + + mutex_lock(&synx_obj->obj_lock); + synx_obj->map_count--; + signal_cb = synx_obj->signal_cb; + f_status = synx_util_get_object_status(synx_obj); + dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n", + f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx); + if (synx_obj->map_count == 0 && + (signal_cb != NULL) && + (synx_obj->global_idx != 0) && + (f_status == SYNX_STATE_ACTIVE)) { + /* + * no more clients interested for notification + * on handle on local core. + * remove reference held by callback on synx + * coredata structure and update cb (if still + * un-signaled) with global handle idx to + * notify any cross-core clients waiting on + * handle. + */ + g_status = synx_global_get_status(synx_obj->global_idx); + if (g_status > SYNX_STATE_ACTIVE) { + dprintk(SYNX_DBG, "signaling fence %pK with status %u\n", + synx_obj->fence, g_status); + synx_native_signal_fence(synx_obj, g_status); + } else { + spin_lock_irqsave(synx_obj->fence->lock, flags); + if (synx_util_get_object_status_locked(synx_obj) == + SYNX_STATE_ACTIVE) { + signal_cb->synx_obj = NULL; + signal_cb->handle = synx_obj->global_idx; + synx_obj->signal_cb = NULL; + /* + * release reference held by signal cb and + * get reference on global index instead. + */ + synx_util_put_object(synx_obj); + synx_global_get_ref(synx_obj->global_idx); + } + spin_unlock_irqrestore(synx_obj->fence->lock, flags); + } + } else if (synx_obj->map_count == 0 && signal_cb && + (f_status == SYNX_STATE_ACTIVE)) { + if (dma_fence_remove_callback(synx_obj->fence, + &signal_cb->fence_cb)) { + kfree(signal_cb); + synx_obj->signal_cb = NULL; + /* + * release reference held by signal cb and + * get reference on global index instead. + */ + synx_util_put_object(synx_obj); + dprintk(SYNX_MEM, "signal cb destroyed %pK\n", + synx_obj->signal_cb); + } + } + mutex_unlock(&synx_obj->obj_lock); +} + +static void synx_util_destroy_map_entry_worker( + struct work_struct *dispatch) +{ + struct synx_map_entry *map_entry = + container_of(dispatch, struct synx_map_entry, dispatch); + struct synx_coredata *synx_obj; + + synx_obj = map_entry->synx_obj; + if (!IS_ERR_OR_NULL(synx_obj)) { + synx_util_cleanup_fence(synx_obj); + /* release reference held by map entry */ + synx_util_put_object(synx_obj); + } + + if (!synx_util_is_global_handle(map_entry->key)) + clear_bit(synx_util_global_idx(map_entry->key), + synx_dev->native->bitmap); + dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n", + map_entry->key, map_entry); + kfree(map_entry); +} + +static void synx_util_destroy_map_entry(struct kref *kref) +{ + struct synx_map_entry *map_entry = + container_of(kref, struct synx_map_entry, refcount); + + hash_del(&map_entry->node); + dprintk(SYNX_MEM, "map entry for %u removed %pK\n", + map_entry->key, map_entry); + INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker); + queue_work(synx_dev->wq_cleanup, &map_entry->dispatch); +} + +void synx_util_release_map_entry(struct synx_map_entry *map_entry) +{ + spinlock_t *lock; + + if (IS_ERR_OR_NULL(map_entry)) + return; + + if (synx_util_is_global_handle(map_entry->key)) + lock = &synx_dev->native->global_map_lock; + else + lock = &synx_dev->native->local_map_lock; + + spin_lock_bh(lock); + kref_put(&map_entry->refcount, + synx_util_destroy_map_entry); + spin_unlock_bh(lock); +} + +static void synx_util_destroy_handle_worker( + struct work_struct *dispatch) +{ + struct synx_handle_coredata *synx_data = + container_of(dispatch, struct synx_handle_coredata, + dispatch); + + synx_util_release_map_entry(synx_data->map_entry); + dprintk(SYNX_VERB, "handle %u destroyed %pK\n", + synx_data->key, synx_data); + kfree(synx_data); +} + +static void synx_util_destroy_handle(struct kref *kref) +{ + struct synx_handle_coredata *synx_data = + container_of(kref, struct synx_handle_coredata, + refcount); + + hash_del(&synx_data->node); + dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n", + synx_data->client->id, synx_data->key, synx_data); + INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker); + queue_work(synx_dev->wq_cleanup, &synx_data->dispatch); +} + +void synx_util_release_handle(struct synx_handle_coredata *synx_data) +{ + struct synx_client *client; + + if (IS_ERR_OR_NULL(synx_data)) + return; + + client = synx_data->client; + if (IS_ERR_OR_NULL(client)) + return; + + spin_lock_bh(&client->handle_map_lock); + kref_put(&synx_data->refcount, + synx_util_destroy_handle); + spin_unlock_bh(&client->handle_map_lock); +} + +struct bind_operations *synx_util_get_bind_ops(u32 type) +{ + struct synx_registered_ops *client_ops; + + if (!synx_util_is_valid_bind_type(type)) + return NULL; + + mutex_lock(&synx_dev->vtbl_lock); + client_ops = &synx_dev->bind_vtbl[type]; + if (!client_ops->valid) { + mutex_unlock(&synx_dev->vtbl_lock); + return NULL; + } + mutex_unlock(&synx_dev->vtbl_lock); + + return &client_ops->ops; +} + +int synx_util_alloc_cb_entry(struct synx_client *client, + struct synx_kernel_payload *data, + u32 *cb_idx) +{ + long idx; + struct synx_client_cb *cb; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) || + IS_ERR_OR_NULL(cb_idx)) + return -SYNX_INVALID; + + idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS); + if (idx >= SYNX_MAX_OBJS) { + dprintk(SYNX_ERR, + "[sess :%llu] free cb index not available\n", + client->id); + return -SYNX_NOMEM; + } + + cb = &client->cb_table[idx]; + memset(cb, 0, sizeof(*cb)); + cb->is_valid = true; + cb->client = client; + cb->idx = idx; + memcpy(&cb->kernel_cb, data, + sizeof(cb->kernel_cb)); + + *cb_idx = idx; + dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n", + client->id, *cb_idx); + return 0; +} + +int synx_util_clear_cb_entry(struct synx_client *client, + struct synx_client_cb *cb) +{ + int rc = 0; + u32 idx; + + if (IS_ERR_OR_NULL(cb)) + return -SYNX_INVALID; + + idx = cb->idx; + memset(cb, 0, sizeof(*cb)); + if (idx && idx < SYNX_MAX_OBJS) { + clear_bit(idx, client->cb_bitmap); + } else { + dprintk(SYNX_ERR, "invalid index\n"); + rc = -SYNX_INVALID; + } + + return rc; +} + +void synx_util_default_user_callback(u32 h_synx, + int status, void *data) +{ + struct synx_client_cb *cb = data; + struct synx_client *client = NULL; + + if (cb && cb->client) { + client = cb->client; + dprintk(SYNX_VERB, + "[sess :%llu] user cb queued for handle %d\n", + client->id, h_synx); + cb->kernel_cb.status = status; + mutex_lock(&client->event_q_lock); + list_add_tail(&cb->node, &client->event_q); + mutex_unlock(&client->event_q_lock); + wake_up_all(&client->event_wq); + } else { + dprintk(SYNX_ERR, "invalid params\n"); + } +} + +void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status) +{ + struct synx_cb_data *synx_cb, *synx_cb_temp; + + if (IS_ERR_OR_NULL(synx_obj)) { + dprintk(SYNX_ERR, "invalid arguments\n"); + return; + } + + list_for_each_entry_safe(synx_cb, + synx_cb_temp, &synx_obj->reg_cbs_list, node) { + synx_cb->status = status; + list_del_init(&synx_cb->node); + queue_work(synx_dev->wq_cb, + &synx_cb->cb_dispatch); + dprintk(SYNX_VERB, "dispatched callback\n"); + } +} + +void synx_util_cb_dispatch(struct work_struct *cb_dispatch) +{ + struct synx_cb_data *synx_cb = + container_of(cb_dispatch, struct synx_cb_data, cb_dispatch); + struct synx_client *client; + struct synx_client_cb *cb; + struct synx_kernel_payload payload; + u32 status; + + client = synx_get_client(synx_cb->session); + if (IS_ERR_OR_NULL(client)) { + dprintk(SYNX_ERR, + "invalid session data %pK in cb payload\n", + synx_cb->session); + goto free; + } + + if (synx_cb->idx == 0 || + synx_cb->idx >= SYNX_MAX_OBJS) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid cb index %u\n", + client->id, synx_cb->idx); + goto fail; + } + + status = synx_cb->status; + cb = &client->cb_table[synx_cb->idx]; + if (!cb->is_valid) { + dprintk(SYNX_ERR, "invalid cb payload\n"); + goto fail; + } + + memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb)); + payload.status = status; + + if (payload.cb_func == synx_util_default_user_callback) { + /* + * need to send client cb data for default + * user cb (userspace cb) + */ + payload.data = cb; + } else { + /* + * clear the cb entry. userspace cb entry + * will be cleared after data read by the + * polling thread or when client is destroyed + */ + if (synx_util_clear_cb_entry(client, cb)) + dprintk(SYNX_ERR, + "[sess :%llu] error clearing cb entry\n", + client->id); + } + + dprintk(SYNX_INFO, + "callback dispatched for handle %u, status %u, data %pK\n", + payload.h_synx, payload.status, payload.data); + + /* dispatch kernel callback */ + payload.cb_func(payload.h_synx, + payload.status, payload.data); + +fail: + synx_put_client(client); +free: + kfree(synx_cb); +} + +u32 synx_util_get_fence_entry(u64 key, u32 global) +{ + u32 h_synx = 0; + struct synx_fence_entry *curr; + + spin_lock_bh(&synx_dev->native->fence_map_lock); + hash_for_each_possible(synx_dev->native->fence_map, + curr, node, key) { + if (curr->key == key) { + if (global) + h_synx = curr->g_handle; + /* return local handle if global not available */ + if (h_synx == 0) + h_synx = curr->l_handle; + + break; + } + } + spin_unlock_bh(&synx_dev->native->fence_map_lock); + + return h_synx; +} + +void synx_util_release_fence_entry(u64 key) +{ + struct synx_fence_entry *entry = NULL, *curr; + + spin_lock_bh(&synx_dev->native->fence_map_lock); + hash_for_each_possible(synx_dev->native->fence_map, + curr, node, key) { + if (curr->key == key) { + entry = curr; + break; + } + } + + if (entry) { + hash_del(&entry->node); + dprintk(SYNX_MEM, + "released fence entry %pK for fence %pK\n", + entry, (void *)key); + kfree(entry); + } + + spin_unlock_bh(&synx_dev->native->fence_map_lock); +} + +int synx_util_insert_fence_entry(struct synx_fence_entry *entry, + u32 *h_synx, u32 global) +{ + int rc = SYNX_SUCCESS; + struct synx_fence_entry *curr; + + if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx)) + return -SYNX_INVALID; + + spin_lock_bh(&synx_dev->native->fence_map_lock); + hash_for_each_possible(synx_dev->native->fence_map, + curr, node, entry->key) { + /* raced with import from another process on same fence */ + if (curr->key == entry->key) { + if (global) + *h_synx = curr->g_handle; + + if (*h_synx == 0 || !global) + *h_synx = curr->l_handle; + + rc = -SYNX_ALREADY; + break; + } + } + /* add entry only if its not present in the map */ + if (rc == SYNX_SUCCESS) { + hash_add(synx_dev->native->fence_map, + &entry->node, entry->key); + dprintk(SYNX_MEM, + "added fence entry %pK for fence %pK\n", + entry, (void *)entry->key); + } + spin_unlock_bh(&synx_dev->native->fence_map_lock); + + return rc; +} + +struct synx_client *synx_get_client(struct synx_session *session) +{ + struct synx_client *client = NULL; + struct synx_client *curr; + + if (IS_ERR_OR_NULL(session)) + return ERR_PTR(-SYNX_INVALID); + + spin_lock_bh(&synx_dev->native->metadata_map_lock); + hash_for_each_possible(synx_dev->native->client_metadata_map, + curr, node, (u64)session) { + if (curr == (struct synx_client *)session) { + if (curr->active) { + kref_get(&curr->refcount); + client = curr; + } + break; + } + } + spin_unlock_bh(&synx_dev->native->metadata_map_lock); + + return client; +} + +static void synx_client_cleanup(struct work_struct *dispatch) +{ + int i, j; + struct synx_client *client = + container_of(dispatch, struct synx_client, dispatch); + struct synx_handle_coredata *curr; + struct hlist_node *tmp; + + /* + * go over all the remaining synx obj handles + * un-released from this session and remove them. + */ + hash_for_each_safe(client->handle_map, i, tmp, curr, node) { + dprintk(SYNX_WARN, + "[sess :%llu] un-released handle %u\n", + client->id, curr->key); + j = kref_read(&curr->refcount); + /* release pending reference */ + while (j--) + kref_put(&curr->refcount, synx_util_destroy_handle); + } + + mutex_destroy(&client->event_q_lock); + + dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n", + client->id, client->name, client); + vfree(client); +} + +static void synx_client_destroy(struct kref *kref) +{ + struct synx_client *client = + container_of(kref, struct synx_client, refcount); + + hash_del(&client->node); + dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n", + client->id, client->name); + + INIT_WORK(&client->dispatch, synx_client_cleanup); + queue_work(synx_dev->wq_cleanup, &client->dispatch); +} + +void synx_put_client(struct synx_client *client) +{ + if (IS_ERR_OR_NULL(client)) + return; + + spin_lock_bh(&synx_dev->native->metadata_map_lock); + kref_put(&client->refcount, synx_client_destroy); + spin_unlock_bh(&synx_dev->native->metadata_map_lock); +} + +void synx_util_generate_timestamp(char *timestamp, size_t size) +{ + struct timespec64 tv; + struct tm tm; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, 0, &tm); + snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d", + tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, + tm.tm_min, tm.tm_sec); +} + +void synx_util_log_error(u32 client_id, u32 h_synx, s32 err) +{ + struct error_node *err_node; + + if (!synx_dev->debugfs_root) + return; + + err_node = kzalloc(sizeof(*err_node), GFP_KERNEL); + if (!err_node) + return; + + err_node->client_id = client_id; + err_node->error_code = err; + err_node->h_synx = h_synx; + synx_util_generate_timestamp(err_node->timestamp, + sizeof(err_node->timestamp)); + mutex_lock(&synx_dev->error_lock); + list_add(&err_node->node, + &synx_dev->error_list); + mutex_unlock(&synx_dev->error_lock); +} + +int synx_util_save_data(void *fence, u32 flags, + u32 h_synx) +{ + int rc = SYNX_SUCCESS; + struct synx_entry_64 *entry, *curr; + u64 key; + u32 tbl = synx_util_map_params_to_type(flags); + + switch (tbl) { + case SYNX_TYPE_CSL: + key = *(u32 *)fence; + spin_lock_bh(&synx_dev->native->csl_map_lock); + /* ensure fence is not already added to map */ + hash_for_each_possible(synx_dev->native->csl_fence_map, + curr, node, key) { + if (curr->key == key) { + rc = -SYNX_ALREADY; + break; + } + } + if (rc == SYNX_SUCCESS) { + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (entry) { + entry->data[0] = h_synx; + entry->key = key; + kref_init(&entry->refcount); + hash_add(synx_dev->native->csl_fence_map, + &entry->node, entry->key); + dprintk(SYNX_MEM, "added csl fence %d to map %pK\n", + entry->key, entry); + } else { + rc = -SYNX_NOMEM; + } + } + spin_unlock_bh(&synx_dev->native->csl_map_lock); + break; + default: + dprintk(SYNX_ERR, "invalid hash table selection\n"); + kfree(entry); + rc = -SYNX_INVALID; + } + + return rc; +} + +struct synx_entry_64 *synx_util_retrieve_data(void *fence, + u32 type) +{ + u64 key; + struct synx_entry_64 *entry = NULL; + struct synx_entry_64 *curr; + + switch (type) { + case SYNX_TYPE_CSL: + key = *(u32 *)fence; + spin_lock_bh(&synx_dev->native->csl_map_lock); + hash_for_each_possible(synx_dev->native->csl_fence_map, + curr, node, key) { + if (curr->key == key) { + kref_get(&curr->refcount); + entry = curr; + break; + } + } + spin_unlock_bh(&synx_dev->native->csl_map_lock); + break; + default: + dprintk(SYNX_ERR, "invalid hash table selection %u\n", + type); + } + + return entry; +} + +static void synx_util_destroy_data(struct kref *kref) +{ + struct synx_entry_64 *entry = + container_of(kref, struct synx_entry_64, refcount); + + hash_del(&entry->node); + dprintk(SYNX_MEM, "released fence %llu entry %pK\n", + entry->key, entry); + kfree(entry); +} + +void synx_util_remove_data(void *fence, + u32 type) +{ + u64 key; + struct synx_entry_64 *entry = NULL; + struct synx_entry_64 *curr; + + if (IS_ERR_OR_NULL(fence)) + return; + + switch (type) { + case SYNX_TYPE_CSL: + key = *((u32 *)fence); + spin_lock_bh(&synx_dev->native->csl_map_lock); + hash_for_each_possible(synx_dev->native->csl_fence_map, + curr, node, key) { + if (curr->key == key) { + entry = curr; + break; + } + } + if (entry) + kref_put(&entry->refcount, synx_util_destroy_data); + spin_unlock_bh(&synx_dev->native->csl_map_lock); + break; + default: + dprintk(SYNX_ERR, "invalid hash table selection %u\n", + type); + } +} + +void synx_util_map_import_params_to_create( + struct synx_import_indv_params *params, + struct synx_create_params *c_params) +{ + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params)) + return; + + if (params->flags & SYNX_IMPORT_GLOBAL_FENCE) + c_params->flags |= SYNX_CREATE_GLOBAL_FENCE; + + if (params->flags & SYNX_IMPORT_LOCAL_FENCE) + c_params->flags |= SYNX_CREATE_LOCAL_FENCE; + + if (params->flags & SYNX_IMPORT_DMA_FENCE) + c_params->flags |= SYNX_CREATE_DMA_FENCE; +} + +u32 synx_util_map_client_id_to_core( + enum synx_client_id id) +{ + u32 core_id; + + switch (id) { + case SYNX_CLIENT_NATIVE: + core_id = SYNX_CORE_APSS; break; + case SYNX_CLIENT_EVA_CTX0: + core_id = SYNX_CORE_EVA; break; + case SYNX_CLIENT_VID_CTX0: + core_id = SYNX_CORE_IRIS; break; + case SYNX_CLIENT_NSP_CTX0: + core_id = SYNX_CORE_NSP; break; + default: + core_id = SYNX_CORE_MAX; + } + + return core_id; +} diff --git a/msm/synx/synx_util.h b/msm/synx/synx_util.h new file mode 100644 index 0000000000..c1483cd81c --- /dev/null +++ b/msm/synx/synx_util.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SYNX_UTIL_H__ +#define __SYNX_UTIL_H__ + +#include "synx_api.h" +#include "synx_private.h" + +extern struct synx_device *synx_dev; + +extern void synx_fence_callback(struct dma_fence *fence, + struct dma_fence_cb *cb); +extern int synx_native_signal_fence(struct synx_coredata *synx_obj, + u32 status); + +static inline bool synx_util_is_valid_bind_type(u32 type) +{ + if (type < SYNX_MAX_BIND_TYPES) + return true; + + return false; +} + +static inline bool synx_util_is_global_handle(u32 h_synx) +{ + return (h_synx & SYNX_OBJ_GLOBAL_FLAG_MASK) ? true : false; +} + +static inline u32 synx_util_get_object_type( + struct synx_coredata *synx_obj) +{ + return synx_obj ? synx_obj->type : 0; +} + +static inline bool synx_util_is_merged_object( + struct synx_coredata *synx_obj) +{ + if (synx_obj && + (synx_obj->type & SYNX_CREATE_MERGED_FENCE)) + return true; + + return false; +} + +static inline bool synx_util_is_global_object( + struct synx_coredata *synx_obj) +{ + if (synx_obj && + (synx_obj->type & SYNX_CREATE_GLOBAL_FENCE)) + return true; + + return false; +} + +static inline bool synx_util_is_external_object( + struct synx_coredata *synx_obj) +{ + if (synx_obj && + (synx_obj->type & SYNX_CREATE_DMA_FENCE)) + return true; + + return false; +} + +static inline u32 synx_util_map_params_to_type(u32 flags) +{ + if (flags & SYNX_CREATE_CSL_FENCE) + return SYNX_TYPE_CSL; + + return SYNX_MAX_BIND_TYPES; +} + +static inline u32 synx_util_global_idx(u32 h_synx) +{ + return (h_synx & SYNX_OBJ_HANDLE_MASK); +} + +/* coredata memory functions */ +void synx_util_get_object(struct synx_coredata *synx_obj); +void synx_util_put_object(struct synx_coredata *synx_obj); +void synx_util_object_destroy(struct synx_coredata *synx_obj); + +static inline struct synx_coredata *synx_util_obtain_object( + struct synx_handle_coredata *synx_data) +{ + if (IS_ERR_OR_NULL(synx_data)) + return NULL; + + return synx_data->synx_obj; +} + +/* global/local map functions */ +struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj, + u32 h_synx, u32 flags); +struct synx_map_entry *synx_util_get_map_entry(u32 h_synx); +void synx_util_release_map_entry(struct synx_map_entry *map_entry); + +/* fence map functions */ +int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx, + u32 global); +u32 synx_util_get_fence_entry(u64 key, u32 global); +void synx_util_release_fence_entry(u64 key); + +/* coredata initialize functions */ +int synx_util_init_coredata(struct synx_coredata *synx_obj, + struct synx_create_params *params, + struct dma_fence_ops *ops, + u64 dma_context); +int synx_util_init_group_coredata(struct synx_coredata *synx_obj, + struct dma_fence **fences, + struct synx_merge_params *params, + u32 num_objs, + u64 dma_context); + +/* handle related functions */ +int synx_alloc_global_handle(u32 *new_synx); +int synx_alloc_local_handle(u32 *new_synx); +long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size); +int synx_util_init_handle(struct synx_client *client, struct synx_coredata *obj, + u32 *new_h_synx, + void *map_entry); + +u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx); + +/* callback related functions */ +int synx_util_alloc_cb_entry(struct synx_client *client, + struct synx_kernel_payload *data, + u32 *cb_idx); +int synx_util_clear_cb_entry(struct synx_client *client, + struct synx_client_cb *cb); +void synx_util_default_user_callback(u32 h_synx, int status, void *data); +void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 state); +void synx_util_cb_dispatch(struct work_struct *cb_dispatch); + +/* external fence functions */ +int synx_util_activate(struct synx_coredata *synx_obj); +int synx_util_add_callback(struct synx_coredata *synx_obj, u32 h_synx); + +/* merge related helper functions */ +s32 synx_util_merge_error(struct synx_client *client, u32 *h_synxs, u32 num_objs); +int synx_util_validate_merge(struct synx_client *client, u32 *h_synxs, u32 num_objs, + struct dma_fence ***fences, + u32 *fence_cnt); + +/* coredata status functions */ +u32 synx_util_get_object_status(struct synx_coredata *synx_obj); +u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj); + +/* client handle map related functions */ +struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client, + u32 h_synx); +void synx_util_release_handle(struct synx_handle_coredata *synx_data); +int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id, + u32 type, struct synx_handle_coredata **handle); + +/* client memory handler functions */ +struct synx_client *synx_get_client(struct synx_session *session); +void synx_put_client(struct synx_client *client); + +/* error log functions */ +void synx_util_generate_timestamp(char *timestamp, size_t size); +void synx_util_log_error(u32 id, u32 h_synx, s32 err); + +/* external fence map functions */ +int synx_util_save_data(void *fence, u32 flags, u32 data); +struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type); +void synx_util_remove_data(void *fence, u32 type); + +/* misc */ +void synx_util_map_import_params_to_create( + struct synx_import_indv_params *params, + struct synx_create_params *c_params); + +struct bind_operations *synx_util_get_bind_ops(u32 type); +u32 synx_util_map_client_id_to_core(enum synx_client_id id); + +#endif /* __SYNX_UTIL_H__ */ diff --git a/synx_kernel_board.mk b/synx_kernel_board.mk new file mode 100644 index 0000000000..0163169aa8 --- /dev/null +++ b/synx_kernel_board.mk @@ -0,0 +1,19 @@ +# Build synx kernel driver + +TARGET_SYNX_ENABLE := false +ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true) + ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true) + TARGET_SYNX_ENABLE := true + endif +else +TARGET_SYNX_ENABLE := true +endif +# +ifeq ($(TARGET_SYNX_ENABLE), true) +ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko +BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko +BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko +endif +endif + diff --git a/synx_kernel_product.mk b/synx_kernel_product.mk new file mode 100644 index 0000000000..70ce198745 --- /dev/null +++ b/synx_kernel_product.mk @@ -0,0 +1,12 @@ +TARGET_SYNX_ENABLE := false +ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true) + ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true) + TARGET_SYNX_ENABLE := true + endif +else +TARGET_SYNX_ENABLE := true +endif + +ifeq ($(TARGET_SYNX_ENABLE), true) +PRODUCT_PACKAGES += synx-driver.ko +endif \ No newline at end of file From a2639f4c3d2384714236e7e109718e8a634146cc Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Sun, 23 Oct 2022 16:37:56 +0530 Subject: [PATCH 03/42] msm: ipclite: Rebased SSR updates from kernel_platform Added latest SSR updates in IPCLite from kernel_platform to vendor space Change-Id: I9e551a0d69f45d89cae2165e25468945fcc68f7f Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.c | 166 ++++++++++++++++++++++++++++++++------ msm/synx/ipclite.h | 30 ++++--- msm/synx/ipclite_client.h | 11 ++- msm/synx/synx_global.c | 2 +- 4 files changed, 173 insertions(+), 36 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index 03ff780f69..90daaab0fd 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -33,22 +33,14 @@ static struct ipclite_info *ipclite; static struct ipclite_client synx_client; static struct ipclite_client test_client; struct ipclite_hw_mutex_ops *ipclite_hw_mutex; +struct mutex ssr_mutex; +uint32_t channel_status_info[IPCMEM_NUM_HOSTS]; u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; #define FIFO_FULL_RESERVE 8 #define FIFO_ALIGNMENT 8 -void ipclite_hwlock_reset(enum ipcmem_host_type core_id) -{ - /* verify and reset the hw mutex lock */ - if (core_id == ipclite->ipcmem.toc->global_atomic_hwlock_owner) { - ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; - hwspin_unlock_raw(ipclite->hwlock); - } -} -EXPORT_SYMBOL(ipclite_hwlock_reset); - static void ipclite_hw_mutex_acquire(void) { int32_t ret; @@ -61,7 +53,7 @@ static void ipclite_hw_mutex_acquire(void) if (ret) pr_err("Hw mutex lock acquire failed\n"); - ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_APPS; + ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS; pr_debug("Hw mutex lock acquired\n"); } @@ -72,7 +64,8 @@ static void ipclite_hw_mutex_release(void) { if (ipclite != NULL) { if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) { - ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = + IPCMEM_INVALID_HOST; hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->ipclite_hw_mutex->flags); pr_debug("Hw mutex lock release\n"); @@ -479,6 +472,100 @@ static int ipclite_tx(struct ipclite_channel *channel, return ret; } +int ipclite_ssr_update(int32_t proc_id) +{ + int ret = 0; + + if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { + pr_debug("Invalid proc_id %d\n", proc_id); + return -EINVAL; + } + + if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { + if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { + channel_status_info[proc_id] = CHANNEL_ACTIVE; + } else { + pr_err("Cannot send msg to remote client. Channel inactive\n"); + return -IPCLITE_EINCHAN; + } + } + + ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, + NULL); + if (ret < 0) { + pr_debug("Signal sending failed to core : %d ret : %d\n", proc_id, ret); + return ret; + } + + pr_debug("SSR update send completed with ret=%d\n", ret); + return ret; +} + +void ipclite_recover(enum ipcmem_host_type core_id) +{ + int ret, i, host, host0, host1; + + pr_debug("IPCLite Recover - Crashed Core : %d\n", core_id); + + /* verify and reset the hw mutex lock */ + if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) { + ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + hwspin_unlock_raw(ipclite->hwlock); + pr_debug("HW Lock Reset\n"); + } + + mutex_lock(&ssr_mutex); + /* Set the Global Channel Status to 0 to avoid Race condition */ + for (i = 0; i < MAX_PARTITION_COUNT; i++) { + host0 = ipcmem_toc_partition_entries[i].host0; + host1 = ipcmem_toc_partition_entries[i].host1; + + if (host0 == core_id || host1 == core_id) { + + ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) + (&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0); + ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) + (&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0); + + channel_status_info[core_id] = + ipclite->ipcmem.toc->toc_entry[host0][host1].status; + } + pr_debug("Global Channel Status : [%d][%d] : %d\n", host0, host1, + ipclite->ipcmem.toc->toc_entry[host0][host1].status); + pr_debug("Global Channel Status : [%d][%d] : %d\n", host1, host0, + ipclite->ipcmem.toc->toc_entry[host1][host0].status); + } + + /* Resets the TX/RX queue */ + *(ipclite->channel[core_id].tx_fifo->head) = 0; + *(ipclite->channel[core_id].rx_fifo->tail) = 0; + + pr_debug("TX Fifo Reset : %d\n", *(ipclite->channel[core_id].tx_fifo->head)); + pr_debug("RX Fifo Reset : %d\n", *(ipclite->channel[core_id].rx_fifo->tail)); + + /* Increment the Global Channel Status for APPS and crashed core*/ + ipclite_global_atomic_inc((ipclite_atomic_int32_t *) + (&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status))); + ipclite_global_atomic_inc((ipclite_atomic_int32_t *) + (&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status))); + + channel_status_info[core_id] = + ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status; + + /* Update other cores about SSR */ + for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { + if (host != core_id) { + ret = ipclite_ssr_update(host); + if (ret < 0) + pr_debug("Failed to send the SSR update %d\n", host); + else + pr_debug("SSR update sent to host %d\n", host); + } + } + mutex_unlock(&ssr_mutex); +} +EXPORT_SYMBOL(ipclite_recover); + int ipclite_msg_send(int32_t proc_id, uint64_t data) { int ret = 0; @@ -488,9 +575,13 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data) return -EINVAL; } - if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) { - pr_err("Cannot send msg to remote client. Channel inactive\n"); - return -ENXIO; + if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { + if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { + channel_status_info[proc_id] = CHANNEL_ACTIVE; + } else { + pr_err("Cannot send msg to remote client. Channel inactive\n"); + return -IPCLITE_EINCHAN; + } } ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), @@ -523,10 +614,13 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data) return -EINVAL; } - /* Limit Message Sending without Client Registration */ - if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) { - pr_err("Cannot send msg to remote client. Channel inactive\n"); - return -ENXIO; + if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { + if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { + channel_status_info[proc_id] = CHANNEL_ACTIVE; + } else { + pr_err("Cannot send msg to remote client. Channel inactive\n"); + return -IPCLITE_EINCHAN; + } } ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), @@ -586,7 +680,7 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name) static void ipcmem_init(struct ipclite_mem *ipcmem) { - int host0, host1; + int host, host0, host1; int i = 0; ipcmem->toc = ipcmem->mem.virt_base; @@ -621,6 +715,28 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i]; ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i]; + if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) { + /* Updating the Global Channel Status for APPS Loopback */ + ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE; + ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE; + + /* Updating Local Channel Status */ + channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status; + + } else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) { + /* Updating the Global Channel Status */ + ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS; + ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS; + + /* Updating Local Channel Status */ + if (host0 == IPCMEM_APPS) + host = host1; + else if (host1 == IPCMEM_APPS) + host = host0; + + channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status; + } + ipcmem->partition[i] = (struct ipcmem_partition *) ((char *)ipcmem->mem.virt_base + ipcmem_toc_partition_entries[i].base_offset); @@ -668,7 +784,7 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n { int ret = 0; u32 index; - char strs[4][9] = {"msg", "mem-init", "version", "test"}; + char strs[5][9] = {"msg", "mem-init", "version", "test", "ssr"}; struct ipclite_irq_info *irq_info; struct device *dev; @@ -887,7 +1003,8 @@ static int ipclite_channel_init(struct device *parent, goto err_put_dev; } } - ipclite->channel[remote_pid].channel_status = ACTIVE_CHANNEL; + + ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE; pr_debug("Channel init completed, ret = %d\n", ret); return ret; @@ -941,6 +1058,9 @@ static int ipclite_probe(struct platform_device *pdev) } pr_debug("Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock); + /* Initializing Local Mutex Lock for SSR functionality */ + mutex_init(&ssr_mutex); + ret = map_ipcmem(ipclite, "memory-region"); if (ret) { pr_err("failed to map ipcmem\n"); @@ -990,7 +1110,7 @@ static int ipclite_probe(struct platform_device *pdev) ipclite->ipclite_hw_mutex = ipclite_hw_mutex; /* initialize hwlock owner to invalid host */ - ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; pr_info("IPCLite probe completed successfully\n"); return ret; diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h index d622bad099..f29053bba5 100644 --- a/msm/synx/ipclite.h +++ b/msm/synx/ipclite.h @@ -14,7 +14,7 @@ #define ACTIVE_CHANNEL 0x1 #define IPCMEM_TOC_SIZE (4*1024) -#define MAX_CHANNEL_SIGNALS 4 +#define MAX_CHANNEL_SIGNALS 5 #define MAX_PARTITION_COUNT 7 /*7 partitions other than global partition*/ @@ -22,6 +22,7 @@ #define IPCLITE_MEM_INIT_SIGNAL 1 #define IPCLITE_VERSION_SIGNAL 2 #define IPCLITE_TEST_SIGNAL 3 +#define IPCLITE_SSR_SIGNAL 4 /** Flag definitions for the entries */ #define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01) @@ -38,6 +39,12 @@ /* Timeout (ms) for the trylock of remote spinlocks */ #define HWSPINLOCK_TIMEOUT 1000 +#define CHANNEL_INACTIVE 0 +#define CHANNEL_ACTIVATE_IN_PROGRESS 1 +#define CHANNEL_ACTIVE 2 + +#define CONFIGURED_CORE 1 + /*IPCMEM Structure Definitions*/ struct ipclite_features { @@ -45,6 +52,11 @@ struct ipclite_features { uint32_t version_finalised; }; +struct ipclite_recover { + uint32_t global_atomic_hwlock_owner; + uint32_t configured_core[IPCMEM_NUM_HOSTS]; +}; + struct ipcmem_partition_header { uint32_t type; /*partition type*/ uint32_t desc_offset; /*descriptor offset*/ @@ -77,7 +89,7 @@ struct ipcmem_toc { /* as ipcmem is 4k and if host number increases */ /* it would create problems*/ struct ipclite_features ipclite_features; - uint32_t global_atomic_hwlock_owner; + struct ipclite_recover recovery; }; struct ipcmem_region { @@ -202,7 +214,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_APPS, IPCMEM_CDSP, - 1, + CHANNEL_INACTIVE, }, /* APPS<->CVP (EVA) partition. */ { @@ -211,7 +223,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_APPS, IPCMEM_CVP, - 1, + CHANNEL_INACTIVE, }, /* APPS<->VPU partition. */ { @@ -220,7 +232,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_APPS, IPCMEM_VPU, - 1, + CHANNEL_INACTIVE, }, /* CDSP<->CVP (EVA) partition. */ { @@ -229,7 +241,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_CDSP, IPCMEM_CVP, - 1, + CHANNEL_INACTIVE, }, /* CDSP<->VPU partition. */ { @@ -238,7 +250,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_CDSP, IPCMEM_VPU, - 1, + CHANNEL_INACTIVE, }, /* VPU<->CVP (EVA) partition. */ { @@ -247,7 +259,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_VPU, IPCMEM_CVP, - 1, + CHANNEL_INACTIVE, }, /* APPS<->APPS partition. */ { @@ -256,7 +268,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_APPS, IPCMEM_APPS, - 1, + CHANNEL_INACTIVE, } /* Last entry uses invalid hosts and no protections to signify the end. */ /* { diff --git a/msm/synx/ipclite_client.h b/msm/synx/ipclite_client.h index 3ffa3a5652..37849db432 100644 --- a/msm/synx/ipclite_client.h +++ b/msm/synx/ipclite_client.h @@ -27,6 +27,11 @@ enum ipcmem_host_type { IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */ }; +/** + * IPCLite return codes + */ +#define IPCLITE_EINCHAN 9 /**< Inactive Channel */ + struct global_region_info { void *virt_base; uint32_t size; @@ -84,13 +89,13 @@ int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv); int32_t get_global_partition_info(struct global_region_info *global_ipcmem); /** - * ipclite_hwlock_reset() - Resets the lock if the lock is currently held by core_id + * ipclite_recover() - Recovers the ipclite if any core goes for SSR * - * core_id : takes the core id of which the lock needs to be resetted. + * core_id : takes the core id of the core which went to SSR. * * @return None. */ -void ipclite_hwlock_reset(enum ipcmem_host_type core_id); +void ipclite_recover(enum ipcmem_host_type core_id); /** * ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value. diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c index 4ebf1b9cf4..0184d3ec48 100644 --- a/msm/synx/synx_global.c +++ b/msm/synx/synx_global.c @@ -744,7 +744,7 @@ int synx_global_recover(enum synx_core_id core_id) if (!clear_idx) return -SYNX_NOMEM; - ipclite_hwlock_reset(synx_global_map_core_id(core_id)); + ipclite_recover(synx_global_map_core_id(core_id)); /* recover synx gmem lock if it was owned by core in ssr */ if (synx_gmem_lock_owner(0) == core_id) { From f2f91c79c048b720c6695654e2743e40d9890427 Mon Sep 17 00:00:00 2001 From: NITIN LAXMIDAS NAIK Date: Mon, 31 Oct 2022 18:05:36 -0700 Subject: [PATCH 04/42] msm: synx: android.mk modification to enable automatic loading of drivers Modified android.mk to generate ipclite.ko in OUT/dlkm/lib/modules Change-Id: I374de933b73e8ba94d55836c527669570970db90 Signed-off-by: NITIN LAXMIDAS NAIK --- Android.mk | 11 +++++++++++ synx_kernel_board.mk | 1 + 2 files changed, 12 insertions(+) diff --git a/Android.mk b/Android.mk index 89c39caf25..53da29e886 100644 --- a/Android.mk +++ b/Android.mk @@ -39,6 +39,17 @@ $(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES)) LOCAL_MODULE := synx-driver.ko LOCAL_MODULE_KBUILD_NAME := msm/synx-driver.ko LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk + + +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES)) +LOCAL_MODULE := ipclite.ko +LOCAL_MODULE_KBUILD_NAME := msm/synx/ipclite.ko +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +#BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE) # print out variables $(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) diff --git a/synx_kernel_board.mk b/synx_kernel_board.mk index 0163169aa8..90432a738c 100644 --- a/synx_kernel_board.mk +++ b/synx_kernel_board.mk @@ -12,6 +12,7 @@ endif ifeq ($(TARGET_SYNX_ENABLE), true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko endif From 4e4dd1af3754afe309b77147a58c0ce3f3235048 Mon Sep 17 00:00:00 2001 From: NITIN LAXMIDAS NAIK Date: Thu, 15 Dec 2022 10:34:17 -0800 Subject: [PATCH 05/42] msm: synx: synx_kernel_board.mk change to disable recovery mode Remove synx from recovery mode and add ipclite to ramdisk modules Change-Id: Iedec1f6961bef5be1b73fbd3543a01a651698780 Signed-off-by: NITIN LAXMIDAS NAIK --- synx_kernel_board.mk | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synx_kernel_board.mk b/synx_kernel_board.mk index 90432a738c..b4fd6f17c4 100644 --- a/synx_kernel_board.mk +++ b/synx_kernel_board.mk @@ -14,7 +14,8 @@ ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko -BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko +BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko +#BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko endif endif From fd9e62c31f5664d98ee92ce4a526b333668e34e8 Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Tue, 15 Nov 2022 19:20:52 +0530 Subject: [PATCH 06/42] msm: synx: ipclite: Added ICP core to IPCLite Global memory Enables ICP to communicate with other cores through corresponding channels Change-Id: Id7e6e9e14ee257bcce014c29147877375d8d48bb Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.h | 66 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 15 deletions(-) diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h index f29053bba5..4d0a5d7232 100644 --- a/msm/synx/ipclite.h +++ b/msm/synx/ipclite.h @@ -16,7 +16,7 @@ #define IPCMEM_TOC_SIZE (4*1024) #define MAX_CHANNEL_SIGNALS 5 -#define MAX_PARTITION_COUNT 7 /*7 partitions other than global partition*/ +#define MAX_PARTITION_COUNT 11 /*11 partitions other than global partition*/ #define IPCLITE_MSG_SIGNAL 0 #define IPCLITE_MEM_INIT_SIGNAL 1 @@ -207,7 +207,7 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { * }, */ - /* Apps<->CDSP partition. */ + /* APPS<->CDSP partition. */ { 132 * 1024, 32 * 1024, @@ -225,45 +225,81 @@ const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { IPCMEM_CVP, CHANNEL_INACTIVE, }, - /* APPS<->VPU partition. */ + /* APPS<->CAM (ICP) partition. */ { 196 * 1024, 32 * 1024, IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_APPS, + IPCMEM_CAM, + CHANNEL_INACTIVE, + }, + /* APPS<->VPU (IRIS) partition. */ + { + 228 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_APPS, IPCMEM_VPU, CHANNEL_INACTIVE, }, /* CDSP<->CVP (EVA) partition. */ - { - 228 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CDSP, - IPCMEM_CVP, - CHANNEL_INACTIVE, - }, - /* CDSP<->VPU partition. */ { 260 * 1024, 32 * 1024, IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_CDSP, - IPCMEM_VPU, + IPCMEM_CVP, CHANNEL_INACTIVE, }, - /* VPU<->CVP (EVA) partition. */ + /* CDSP<->CAM (ICP) partition. */ { 292 * 1024, 32 * 1024, IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_CDSP, + IPCMEM_CAM, + CHANNEL_INACTIVE, + }, + /* CDSP<->VPU (IRIS) partition. */ + { + 324 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_CDSP, IPCMEM_VPU, + CHANNEL_INACTIVE, + }, + /* CVP<->CAM (ICP) partition. */ + { + 356 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_CVP, + IPCMEM_CAM, + CHANNEL_INACTIVE, + }, + /* CVP<->VPU (IRIS) partition. */ + { + 388 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_CVP, + IPCMEM_VPU, + CHANNEL_INACTIVE, + }, + /* CAM<->VPU (IRIS) partition. */ + { + 420 * 1024, + 32 * 1024, + IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, + IPCMEM_CAM, + IPCMEM_VPU, CHANNEL_INACTIVE, }, /* APPS<->APPS partition. */ { - 326 * 1024, + 454 * 1024, 32 * 1024, IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, IPCMEM_APPS, From eeb75f9c2345e237764139aa6364d0dc8c705c49 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Mon, 14 Nov 2022 10:29:06 -0800 Subject: [PATCH 07/42] synx: support ICP core This change adds necessary functionality to support ICP core. Change-Id: I55793f6508ae5d7180b0f50d477e366842199148 Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx.c | 3 ++- msm/synx/synx_api.h | 4 +++- msm/synx/synx_global.c | 4 +++- msm/synx/synx_global.h | 4 +++- msm/synx/synx_util.c | 4 +++- 5 files changed, 14 insertions(+), 5 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 8135a6edc7..df440eb416 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -2455,6 +2455,7 @@ int synx_recover(enum synx_client_id id) switch (core_id) { case SYNX_CORE_EVA: case SYNX_CORE_IRIS: + case SYNX_CORE_ICP: break; default: dprintk(SYNX_ERR, "recovery not supported on %u\n", id); diff --git a/msm/synx/synx_api.h b/msm/synx/synx_api.h index 523e646666..a293d66d45 100644 --- a/msm/synx/synx_api.h +++ b/msm/synx/synx_api.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SYNX_API_H__ @@ -166,6 +166,7 @@ struct synx_queue_desc { * @SYNX_CLIENT_VID_CTX0 : Video Client 0 * @SYNX_CLIENT_NSP_CTX0 : NSP Client 0 * @SYNX_CLIENT_IFE_CTX0 : IFE Client 0 + * @SYNX_CLIENT_ICP_CTX0 : ICP Client 0 */ enum synx_client_id { SYNX_CLIENT_NATIVE = 0, @@ -180,6 +181,7 @@ enum synx_client_id { SYNX_CLIENT_VID_CTX0, SYNX_CLIENT_NSP_CTX0, SYNX_CLIENT_IFE_CTX0, + SYNX_CLIENT_ICP_CTX0, SYNX_CLIENT_MAX, }; diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c index 0184d3ec48..3a18a8473c 100644 --- a/msm/synx/synx_global.c +++ b/msm/synx/synx_global.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -151,6 +151,8 @@ u32 synx_global_map_core_id(enum synx_core_id id) host_id = IPCMEM_VPU; break; case SYNX_CORE_EVA: host_id = IPCMEM_CVP; break; + case SYNX_CORE_ICP: + host_id = IPCMEM_CAM; break; default: host_id = IPCMEM_NUM_HOSTS; dprintk(SYNX_ERR, "invalid core id\n"); diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 4e7646cd4d..592d713a4b 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SYNX_SHARED_MEM_H__ @@ -18,12 +18,14 @@ * SYNX_CORE_NSP : NSP core * SYNX_CORE_EVA : EVA core * SYNX_CORE_IRIS : IRIS core + * SYNX_CORE_ICP : ICP core */ enum synx_core_id { SYNX_CORE_APSS = 0, SYNX_CORE_NSP, SYNX_CORE_EVA, SYNX_CORE_IRIS, + SYNX_CORE_ICP, SYNX_CORE_MAX, }; diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 689a571c42..9b88943b18 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1511,6 +1511,8 @@ u32 synx_util_map_client_id_to_core( switch (id) { case SYNX_CLIENT_NATIVE: core_id = SYNX_CORE_APSS; break; + case SYNX_CLIENT_ICP_CTX0: + core_id = SYNX_CORE_ICP; break; case SYNX_CLIENT_EVA_CTX0: core_id = SYNX_CORE_EVA; break; case SYNX_CLIENT_VID_CTX0: From 82fbaea349505da39ed173285914810e38bbd5f1 Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Thu, 2 Feb 2023 20:36:58 +0530 Subject: [PATCH 08/42] msm: synx: ipclite: IPCLite Debug Below dynamic debug mechanisms are added: 1. Sysfs based control for kernel logs 2. In-memory logging 3. Debug Structures Change-Id: I1da118881b5e79ddd2ada91749da13233e360e16 Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.c | 637 ++++++++++++++++++++++++++++++++------ msm/synx/ipclite.h | 98 +++++- msm/synx/ipclite_client.h | 4 +- 3 files changed, 632 insertions(+), 107 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index 90daaab0fd..969a52dd21 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -19,6 +19,8 @@ #include #include +#include + #include "ipclite_client.h" #include "ipclite.h" @@ -28,18 +30,152 @@ #define VMID_CDSP 30 #define GLOBAL_ATOMICS_ENABLED 1 #define GLOBAL_ATOMICS_DISABLED 0 +#define FIFO_FULL_RESERVE 8 +#define FIFO_ALIGNMENT 8 static struct ipclite_info *ipclite; static struct ipclite_client synx_client; static struct ipclite_client test_client; -struct ipclite_hw_mutex_ops *ipclite_hw_mutex; -struct mutex ssr_mutex; -uint32_t channel_status_info[IPCMEM_NUM_HOSTS]; +static struct ipclite_hw_mutex_ops *ipclite_hw_mutex; +static struct ipclite_debug_info *ipclite_dbg_info; +static struct ipclite_debug_struct *ipclite_dbg_struct; +static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem; +static struct mutex ssr_mutex; +static struct kobject *sysfs_kobj; -u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; +static uint32_t channel_status_info[IPCMEM_NUM_HOSTS]; +static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; +static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO; +static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump; -#define FIFO_FULL_RESERVE 8 -#define FIFO_ALIGNMENT 8 +static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...) +{ + uint32_t local_index = 0; + va_list pArgs; + + va_start(pArgs, psztStr); + + /* Incrementing the index atomically and storing the index in local variable */ + local_index = ipclite_global_atomic_inc((ipclite_atomic_int32_t *) + &ipclite_dbg_info->debug_log_index); + local_index %= IPCLITE_LOG_BUF_SIZE; + + /* Writes data on the index location */ + vsnprintf(ipclite_dbg_inmem->IPCLITELog[local_index], IPCLITE_LOG_MSG_SIZE, psztStr, pArgs); + + va_end(pArgs); +} + +static void ipclite_dump_debug_struct(void) +{ + int i, host; + struct ipclite_debug_struct *temp_dbg_struct; + + /* Check if debug structures are initialized */ + if (!ipclite_dbg_info || !ipclite_dbg_struct) { + pr_err("Debug Structures not initialized\n"); + return; + } + + /* Check if debug structures are enabled before printing */ + if (!(ipclite_debug_control & IPCLITE_DBG_STRUCT)) { + pr_err("Debug Structures not enabled\n"); + return; + } + + /* Dumping the debug structures */ + pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n"); + + for (host = 0; host < IPCMEM_NUM_HOSTS; host++) { + if (ipclite->ipcmem.toc->recovery.configured_core[host]) { + temp_dbg_struct = (struct ipclite_debug_struct *) + (((char *)ipclite_dbg_struct) + + (sizeof(*temp_dbg_struct) * host)); + + pr_info("---------- Host ID: %d dbg_mem:%p ----------\n", + host, temp_dbg_struct); + pr_info("Total Signals Sent : %d Total Signals Received : %d\n", + temp_dbg_struct->dbg_info_overall.total_numsig_sent, + temp_dbg_struct->dbg_info_overall.total_numsig_recv); + pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n", + temp_dbg_struct->dbg_info_overall.last_sent_host_id, + temp_dbg_struct->dbg_info_overall.last_recv_host_id); + pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n", + temp_dbg_struct->dbg_info_overall.last_sigid_sent, + temp_dbg_struct->dbg_info_overall.last_sigid_recv); + + for (i = 0; i < IPCMEM_NUM_HOSTS; i++) { + if (ipclite->ipcmem.toc->recovery.configured_core[i]) { + pr_info("----------> Host ID : %d Host ID : %d Channel State: %d\n", + host, i, ipclite->ipcmem.toc->toc_entry[host][i].status); + pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n", + temp_dbg_struct->dbg_info_host[i].numsig_sent, + temp_dbg_struct->dbg_info_host[i].numsig_recv); + pr_info("No. of Interrupts Received : %d\n", + temp_dbg_struct->dbg_info_host[i].num_intr); + pr_info("TX Write Index : %d TX Read Index : %d\n", + temp_dbg_struct->dbg_info_host[i].tx_wr_index, + temp_dbg_struct->dbg_info_host[i].tx_rd_index); + pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0], + temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]); + pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1], + temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]); + pr_info("RX Write Index : %d RX Read Index : %d\n", + temp_dbg_struct->dbg_info_host[i].rx_wr_index, + temp_dbg_struct->dbg_info_host[i].rx_rd_index); + pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0], + temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]); + pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1], + temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]); + } + } + } + } + return; +} + +static void ipclite_dump_inmem_logs(void) +{ + int i; + uint32_t local_index = 0; + + /* Check if debug and inmem structures are initialized */ + if (!ipclite_dbg_info || !ipclite_dbg_inmem) { + pr_err("Debug structures not initialized\n"); + return; + } + + /* Check if debug structures are enabled before printing */ + if (!(ipclite_debug_control & IPCLITE_INMEM_LOG)) { + pr_err("In-Memory Logs not enabled\n"); + return; + } + + /* Dumping the debug in-memory logs */ + pr_info("------------------- Dumping In-Memory Logs -------------------\n"); + + /* Storing the index atomically in local variable */ + local_index = ipclite_global_atomic_load_u32((ipclite_atomic_uint32_t *) + &ipclite_dbg_info->debug_log_index); + + /* Printing from current index till the end of buffer */ + for (i = local_index % IPCLITE_LOG_BUF_SIZE; i < IPCLITE_LOG_BUF_SIZE; i++) { + if (ipclite_dbg_inmem->IPCLITELog[i][0]) + pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]); + } + + /* Printing from 0th index to current-1 index */ + for (i = 0; i < local_index % IPCLITE_LOG_BUF_SIZE; i++) { + if (ipclite_dbg_inmem->IPCLITELog[i][0]) + pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]); + } + + return; +} static void ipclite_hw_mutex_acquire(void) { @@ -50,12 +186,14 @@ static void ipclite_hw_mutex_acquire(void) ret = hwspin_lock_timeout_irqsave(ipclite->hwlock, HWSPINLOCK_TIMEOUT, &ipclite->ipclite_hw_mutex->flags); - if (ret) - pr_err("Hw mutex lock acquire failed\n"); + if (ret) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed\n"); + return; + } ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS; - pr_debug("Hw mutex lock acquired\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n"); } } } @@ -68,7 +206,7 @@ static void ipclite_hw_mutex_release(void) IPCMEM_INVALID_HOST; hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->ipclite_hw_mutex->flags); - pr_debug("Hw mutex lock release\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n"); } } } @@ -76,14 +214,12 @@ static void ipclite_hw_mutex_release(void) void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data) { atomic_set(addr, data); - pr_debug("%s new_val = %d\n", __func__, (*(uint32_t *)addr)); } EXPORT_SYMBOL(ipclite_atomic_init_u32); void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data) { atomic_set(addr, data); - pr_debug("%s new_val = %d\n", __func__, (*(int32_t *)addr)); } EXPORT_SYMBOL(ipclite_atomic_init_i32); @@ -93,7 +229,6 @@ void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t dat ipclite->ipclite_hw_mutex->acquire(); atomic_set(addr, data); - pr_debug("%s new_val = %d\n", __func__, (*(uint32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -106,7 +241,6 @@ void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data) ipclite->ipclite_hw_mutex->acquire(); atomic_set(addr, data); - pr_debug("%s new_val = %d\n", __func__, (*(int32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -121,7 +255,6 @@ uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr) ipclite->ipclite_hw_mutex->acquire(); ret = atomic_read(addr); - pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -138,7 +271,6 @@ int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr) ipclite->ipclite_hw_mutex->acquire(); ret = atomic_read(addr); - pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(int32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -156,7 +288,6 @@ uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *a ipclite->ipclite_hw_mutex->acquire(); ret = atomic_fetch_or(mask, addr); - pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -174,7 +305,6 @@ uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t ipclite->ipclite_hw_mutex->acquire(); ret = atomic_fetch_and(~mask, addr); - pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -191,7 +321,6 @@ int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr) ipclite->ipclite_hw_mutex->acquire(); ret = atomic_fetch_add(1, addr); - pr_debug("%s ret = %d new_val = %d\n", __func__, ret, (*(int32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -208,7 +337,6 @@ int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr) ipclite->ipclite_hw_mutex->acquire(); ret = atomic_fetch_sub(1, addr); - pr_debug("%s ret = %d new_val = %d\n", __func__, ret, (*(int32_t *)addr)); /* callback to release hw mutex lock if atomic support is not enabled */ ipclite->ipclite_hw_mutex->release(); @@ -225,7 +353,9 @@ static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo) head = le32_to_cpu(*rx_fifo->head); tail = le32_to_cpu(*rx_fifo->tail); - pr_debug("head=%d, tail=%d\n", head, tail); + + IPCLITE_OS_LOG(IPCLITE_DBG, "head=%d, tail=%d\n", head, tail); + if (head < tail) len = rx_fifo->length - tail + head; else @@ -233,7 +363,9 @@ static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo) if (WARN_ON_ONCE(len > rx_fifo->length)) len = 0; - pr_debug("len=%d\n", len); + + IPCLITE_OS_LOG(IPCLITE_DBG, "len=%d\n", len); + return len; } @@ -260,7 +392,7 @@ static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo, } static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo, - size_t count) + size_t count, uint32_t core_id) { u32 tail; @@ -271,6 +403,24 @@ static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo, tail %= rx_fifo->length; *rx_fifo->tail = cpu_to_le32(tail); + + /* Storing the debug data in debug structures */ + if (ipclite_debug_control & IPCLITE_DBG_STRUCT) { + ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[1] = + ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0]; + ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0] = + ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index; + ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index = *rx_fifo->head; + + ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[1] = + ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0]; + ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0] = + ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index; + ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index = *rx_fifo->tail; + + ipclite_dbg_struct->dbg_info_overall.total_numsig_recv++; + ipclite_dbg_struct->dbg_info_host[core_id].numsig_recv++; + } } static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo) @@ -322,7 +472,7 @@ static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo, } static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo, - const void *data, size_t dlen) + const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id) { unsigned int head; @@ -337,7 +487,29 @@ static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo, wmb(); *tx_fifo->head = cpu_to_le32(head); - pr_debug("head = %d\n", *tx_fifo->head); + + IPCLITE_OS_LOG(IPCLITE_DBG, "head : %d core_id : %d signal_id : %d\n", + *tx_fifo->head, core_id, signal_id); + + /* Storing the debug data in debug structures */ + if (ipclite_debug_control & IPCLITE_DBG_STRUCT) { + ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[1] = + ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0]; + ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0] = + ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index; + ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index = *tx_fifo->head; + + ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[1] = + ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0]; + ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0] = + ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index; + ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index = *tx_fifo->tail; + + ipclite_dbg_struct->dbg_info_overall.total_numsig_sent++; + ipclite_dbg_struct->dbg_info_host[core_id].numsig_sent++; + ipclite_dbg_struct->dbg_info_overall.last_sent_host_id = core_id; + ipclite_dbg_struct->dbg_info_overall.last_sigid_sent = signal_id; + } } static size_t ipclite_rx_avail(struct ipclite_channel *channel) @@ -354,7 +526,7 @@ static void ipclite_rx_peak(struct ipclite_channel *channel, static void ipclite_rx_advance(struct ipclite_channel *channel, size_t count) { - channel->rx_fifo->advance(channel->rx_fifo, count); + channel->rx_fifo->advance(channel->rx_fifo, count, channel->remote_pid); } static size_t ipclite_tx_avail(struct ipclite_channel *channel) @@ -365,7 +537,8 @@ static size_t ipclite_tx_avail(struct ipclite_channel *channel) static void ipclite_tx_write(struct ipclite_channel *channel, const void *data, size_t dlen) { - channel->tx_fifo->write(channel->tx_fifo, data, dlen); + channel->tx_fifo->write(channel->tx_fifo, data, dlen, channel->remote_pid, + channel->irq_info->signal_id); } static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail) @@ -374,7 +547,8 @@ static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail) int ret = 0; if (avail < sizeof(data)) { - pr_err("Not enough data in fifo\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n", + channel->remote_pid, channel->irq_info->signal_id); return -EAGAIN; } @@ -395,7 +569,8 @@ static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail) int ret = 0; if (avail < sizeof(data)) { - pr_err("Not enough data in fifo\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n", + channel->remote_pid, channel->irq_info->signal_id); return -EAGAIN; } @@ -418,10 +593,19 @@ static irqreturn_t ipclite_intr(int irq, void *data) int ret = 0; uint64_t msg; - pr_debug("Interrupt received\n"); irq_info = (struct ipclite_irq_info *)data; channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]); + IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt received from Core : %d Signal : %d\n", + channel->remote_pid, irq_info->signal_id); + + /* Storing the debug data in debug structures */ + if (ipclite_debug_control & IPCLITE_DBG_STRUCT) { + ipclite_dbg_struct->dbg_info_host[channel->remote_pid].num_intr++; + ipclite_dbg_struct->dbg_info_overall.last_recv_host_id = channel->remote_pid; + ipclite_dbg_struct->dbg_info_overall.last_sigid_recv = irq_info->signal_id; + } + if (irq_info->signal_id == IPCLITE_MSG_SIGNAL) { for (;;) { avail = ipclite_rx_avail(channel); @@ -430,10 +614,9 @@ static irqreturn_t ipclite_intr(int irq, void *data) ret = ipclite_rx_data(channel, avail); } - pr_debug("checking messages in rx_fifo done\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n"); } else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) { - /* check_version_compatibility();*/ - pr_debug("version matching sequence completed\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Versioning is currently not enabled\n"); } else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) { for (;;) { avail = ipclite_rx_avail(channel); @@ -442,10 +625,12 @@ static irqreturn_t ipclite_intr(int irq, void *data) ret = ipclite_rx_test_data(channel, avail); } - pr_debug("checking messages in rx_fifo done\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n"); } else { - pr_err("wrong interrupt signal received, signal_id =%d\n", irq_info->signal_id); + IPCLITE_OS_LOG(IPCLITE_ERR, "Wrong Interrupt Signal from core : %d signal : %d\n", + channel->remote_pid, irq_info->signal_id); } + return IRQ_HANDLED; } @@ -472,12 +657,12 @@ static int ipclite_tx(struct ipclite_channel *channel, return ret; } -int ipclite_ssr_update(int32_t proc_id) +static int ipclite_send_debug_info(int32_t proc_id) { int ret = 0; if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { - pr_debug("Invalid proc_id %d\n", proc_id); + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); return -EINVAL; } @@ -485,33 +670,70 @@ int ipclite_ssr_update(int32_t proc_id) if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { channel_status_info[proc_id] = CHANNEL_ACTIVE; } else { - pr_err("Cannot send msg to remote client. Channel inactive\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); + return -IPCLITE_EINCHAN; + } + } + + ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, + NULL); + if (ret < IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, + "Debug Signal sending failed to Core : %d Signal : %d ret : %d\n", + proc_id, IPCLITE_DEBUG_SIGNAL, ret); + return -IPCLITE_FAILURE; + } + + IPCLITE_OS_LOG(IPCLITE_DBG, + "Debug Signal send completed to core : %d signal : %d ret : %d\n", + proc_id, IPCLITE_DEBUG_SIGNAL, ret); + return IPCLITE_SUCCESS; +} + +int ipclite_ssr_update(int32_t proc_id) +{ + int ret = 0; + + if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); + return -EINVAL; + } + + if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { + if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { + channel_status_info[proc_id] = CHANNEL_ACTIVE; + } else { + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); return -IPCLITE_EINCHAN; } } ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, NULL); - if (ret < 0) { - pr_debug("Signal sending failed to core : %d ret : %d\n", proc_id, ret); - return ret; + if (ret < IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, + "SSR Signal sending failed to Core : %d Signal : %d ret : %d\n", + proc_id, IPCLITE_SSR_SIGNAL, ret); + return -IPCLITE_FAILURE; } - pr_debug("SSR update send completed with ret=%d\n", ret); - return ret; + IPCLITE_OS_LOG(IPCLITE_DBG, + "SSR Signal send completed to core : %d signal : %d ret : %d\n", + proc_id, IPCLITE_SSR_SIGNAL, ret); + return IPCLITE_SUCCESS; } void ipclite_recover(enum ipcmem_host_type core_id) { int ret, i, host, host0, host1; - pr_debug("IPCLite Recover - Crashed Core : %d\n", core_id); + IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id); /* verify and reset the hw mutex lock */ if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) { ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; hwspin_unlock_raw(ipclite->hwlock); - pr_debug("HW Lock Reset\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n"); } mutex_lock(&ssr_mutex); @@ -530,9 +752,9 @@ void ipclite_recover(enum ipcmem_host_type core_id) channel_status_info[core_id] = ipclite->ipcmem.toc->toc_entry[host0][host1].status; } - pr_debug("Global Channel Status : [%d][%d] : %d\n", host0, host1, + IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host0, host1, ipclite->ipcmem.toc->toc_entry[host0][host1].status); - pr_debug("Global Channel Status : [%d][%d] : %d\n", host1, host0, + IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host1, host0, ipclite->ipcmem.toc->toc_entry[host1][host0].status); } @@ -540,8 +762,10 @@ void ipclite_recover(enum ipcmem_host_type core_id) *(ipclite->channel[core_id].tx_fifo->head) = 0; *(ipclite->channel[core_id].rx_fifo->tail) = 0; - pr_debug("TX Fifo Reset : %d\n", *(ipclite->channel[core_id].tx_fifo->head)); - pr_debug("RX Fifo Reset : %d\n", *(ipclite->channel[core_id].rx_fifo->tail)); + IPCLITE_OS_LOG(IPCLITE_DBG, "TX Fifo Reset : %d\n", + *(ipclite->channel[core_id].tx_fifo->head)); + IPCLITE_OS_LOG(IPCLITE_DBG, "RX Fifo Reset : %d\n", + *(ipclite->channel[core_id].rx_fifo->tail)); /* Increment the Global Channel Status for APPS and crashed core*/ ipclite_global_atomic_inc((ipclite_atomic_int32_t *) @@ -554,15 +778,24 @@ void ipclite_recover(enum ipcmem_host_type core_id) /* Update other cores about SSR */ for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { - if (host != core_id) { + if (host != core_id && ipclite->ipcmem.toc->recovery.configured_core[host]) { ret = ipclite_ssr_update(host); - if (ret < 0) - pr_debug("Failed to send the SSR update %d\n", host); + if (ret < IPCLITE_SUCCESS) + IPCLITE_OS_LOG(IPCLITE_ERR, + "Failed to send SSR update to core : %d\n", host); else - pr_debug("SSR update sent to host %d\n", host); + IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host); } } mutex_unlock(&ssr_mutex); + + /* Dump the debug information */ + if (ipclite_debug_dump & IPCLITE_DUMP_SSR) { + ipclite_dump_debug_struct(); + ipclite_dump_inmem_logs(); + } + + return; } EXPORT_SYMBOL(ipclite_recover); @@ -571,7 +804,7 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data) int ret = 0; if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { - pr_err("Invalid proc_id %d\n", proc_id); + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); return -EINVAL; } @@ -579,14 +812,16 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data) if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { channel_status_info[proc_id] = CHANNEL_ACTIVE; } else { - pr_err("Cannot send msg to remote client. Channel inactive\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); return -IPCLITE_EINCHAN; } } ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_MSG_SIGNAL); - pr_debug("Message send completed with ret=%d\n", ret); + + IPCLITE_OS_LOG(IPCLITE_DBG, "Message send complete to core : %d signal : %d ret : %d\n", + proc_id, IPCLITE_MSG_SIGNAL, ret); return ret; } EXPORT_SYMBOL(ipclite_msg_send); @@ -594,13 +829,13 @@ EXPORT_SYMBOL(ipclite_msg_send); int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv) { if (!cb_func_ptr) { - pr_err("Invalid callback pointer\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n"); return -EINVAL; } synx_client.callback = cb_func_ptr; synx_client.priv_data = priv; synx_client.reg_complete = 1; - pr_debug("Client Registration completed\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Client Registration completed\n"); return 0; } EXPORT_SYMBOL(ipclite_register_client); @@ -610,7 +845,7 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data) int ret = 0; if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { - pr_err("Invalid proc_id %d\n", proc_id); + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); return -EINVAL; } @@ -618,14 +853,16 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data) if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { channel_status_info[proc_id] = CHANNEL_ACTIVE; } else { - pr_err("Cannot send msg to remote client. Channel inactive\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); return -IPCLITE_EINCHAN; } } ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_TEST_SIGNAL); - pr_debug("Message send completed with ret=%d\n", ret); + + IPCLITE_OS_LOG(IPCLITE_DBG, "Test Msg send complete to core : %d signal : %d ret : %d\n", + proc_id, IPCLITE_TEST_SIGNAL, ret); return ret; } EXPORT_SYMBOL(ipclite_test_msg_send); @@ -633,13 +870,13 @@ EXPORT_SYMBOL(ipclite_test_msg_send); int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv) { if (!cb_func_ptr) { - pr_err("Invalid callback pointer\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n"); return -EINVAL; } test_client.callback = cb_func_ptr; test_client.priv_data = priv; test_client.reg_complete = 1; - pr_debug("Test Client Registration Completed\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Test Client Registration Completed\n"); return 0; } EXPORT_SYMBOL(ipclite_register_test_client); @@ -655,7 +892,7 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name) np = of_parse_phandle(dev->of_node, name, 0); if (!np) { - pr_err("No %s specified\n", name); + IPCLITE_OS_LOG(IPCLITE_ERR, "No %s specified\n", name); return -EINVAL; } @@ -671,7 +908,7 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name) if (!ipclite->ipcmem.mem.virt_base) return -ENOMEM; - pr_debug("aux_base = %lx, size=%d,virt_base=%p\n", + IPCLITE_OS_LOG(IPCLITE_DBG, "aux_base = %lx, size=%d,virt_base=%p\n", ipclite->ipcmem.mem.aux_base, ipclite->ipcmem.mem.size, ipclite->ipcmem.mem.virt_base); @@ -684,10 +921,10 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) int i = 0; ipcmem->toc = ipcmem->mem.virt_base; - pr_debug("toc_base = %p\n", ipcmem->toc); + IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc); ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE; - pr_debug("toc->hdr.size = %d\n", ipcmem->toc->hdr.size); + IPCLITE_OS_LOG(IPCLITE_DBG, "toc->hdr.size = %d\n", ipcmem->toc->hdr.size); /*Fill in global partition details*/ ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry; @@ -695,13 +932,13 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) ((char *)ipcmem->mem.virt_base + ipcmem_toc_global_partition_entry.base_offset); - pr_debug("base_offset =%x,ipcmem->global_partition = %p\n", + IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n", ipcmem_toc_global_partition_entry.base_offset, ipcmem->global_partition); ipcmem->global_partition->hdr = global_partition_hdr; - pr_debug("hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", + IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", ipcmem->global_partition->hdr.partition_type, ipcmem->global_partition->hdr.region_offset, ipcmem->global_partition->hdr.region_size); @@ -710,7 +947,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) for (i = 0; i < MAX_PARTITION_COUNT; i++) { host0 = ipcmem_toc_partition_entries[i].host0; host1 = ipcmem_toc_partition_entries[i].host1; - pr_debug("host0 = %d, host1=%d\n", host0, host1); + IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1); ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i]; ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i]; @@ -741,7 +978,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) ((char *)ipcmem->mem.virt_base + ipcmem_toc_partition_entries[i].base_offset); - pr_debug("partition[%d] = %p,partition_base_offset[%d]=%lx\n", + IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx\n", i, ipcmem->partition[i], i, ipcmem_toc_partition_entries[i].base_offset); @@ -750,7 +987,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) else ipcmem->partition[i]->hdr = default_partition_hdr; - pr_debug("hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", + IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", ipcmem->partition[i]->hdr.type, ipcmem->partition[i]->hdr.desc_offset, ipcmem->partition[i]->hdr.desc_size); @@ -760,7 +997,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) wmb(); ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED; - pr_debug("Ipcmem init completed\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n"); } @@ -784,9 +1021,10 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n { int ret = 0; u32 index; - char strs[5][9] = {"msg", "mem-init", "version", "test", "ssr"}; struct ipclite_irq_info *irq_info; struct device *dev; + char strs[MAX_CHANNEL_SIGNALS][IPCLITE_SIGNAL_LABEL_SIZE] = { + "msg", "mem-init", "version", "test", "ssr", "debug"}; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) @@ -795,10 +1033,10 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n dev->parent = parent; dev->of_node = node; dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node); - pr_debug("Registering %s device\n", dev_name(parent->parent)); + IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent)); ret = device_register(dev); if (ret) { - pr_err("failed to register ipclite child node\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite child node\n"); put_device(dev); return ret; } @@ -806,36 +1044,36 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n ret = of_property_read_u32(dev->of_node, "index", &index); if (ret) { - pr_err("failed to parse index\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse index\n"); goto err_dev; } irq_info = &channel->irq_info[index]; - pr_debug("irq_info[%d]=%p\n", index, irq_info); + IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d]=%p\n", index, irq_info); irq_info->mbox_client.dev = dev; irq_info->mbox_client.knows_txdone = true; irq_info->mbox_chan = mbox_request_channel(&irq_info->mbox_client, 0); - pr_debug("irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan); + IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan); if (IS_ERR(irq_info->mbox_chan)) { if (PTR_ERR(irq_info->mbox_chan) != -EPROBE_DEFER) - pr_err("failed to acquire IPC channel\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to acquire IPC channel\n"); goto err_dev; } snprintf(irq_info->irqname, 32, "ipclite-signal-%s", strs[index]); irq_info->irq = of_irq_get(dev->of_node, 0); - pr_debug("irq[%d] = %d\n", index, irq_info->irq); + IPCLITE_OS_LOG(IPCLITE_DBG, "irq[%d] = %d\n", index, irq_info->irq); irq_info->signal_id = index; ret = devm_request_irq(dev, irq_info->irq, ipclite_intr, IRQF_NO_SUSPEND | IRQF_SHARED, irq_info->irqname, irq_info); if (ret) { - pr_err("failed to request IRQ\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to request IRQ\n"); goto err_dev; } - pr_debug("Interrupt init completed, ret = %d\n", ret); + IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt init completed, ret = %d\n", ret); return 0; err_dev: @@ -848,6 +1086,12 @@ int32_t get_global_partition_info(struct global_region_info *global_ipcmem) { struct ipcmem_global_partition *global_partition; + /* Check added to verify ipclite is initialized */ + if (!ipclite) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n"); + return -ENOMEM; + } + if (!global_ipcmem) return -EINVAL; @@ -856,7 +1100,7 @@ int32_t get_global_partition_info(struct global_region_info *global_ipcmem) global_partition->hdr.region_offset); global_ipcmem->size = (size_t)(global_partition->hdr.region_size); - pr_debug("base = %p, size=%lx\n", global_ipcmem->virt_base, + IPCLITE_OS_LOG(IPCLITE_DBG, "base = %p, size=%lx\n", global_ipcmem->virt_base, global_ipcmem->size); return 0; } @@ -871,7 +1115,7 @@ static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_m static void ipclite_channel_release(struct device *dev) { - pr_info("Releasing ipclite channel\n"); + IPCLITE_OS_LOG(IPCLITE_INFO, "Releasing ipclite channel\n"); kfree(dev); } @@ -901,10 +1145,10 @@ static int ipclite_channel_init(struct device *parent, dev->of_node = node; dev->release = ipclite_channel_release; dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node); - pr_debug("Registering %s device\n", dev_name(parent->parent)); + IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent)); ret = device_register(dev); if (ret) { - pr_err("failed to register ipclite device\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite device\n"); put_device(dev); kfree(dev); return ret; @@ -918,7 +1162,7 @@ static int ipclite_channel_init(struct device *parent, dev_err(dev, "failed to parse qcom,remote-pid\n"); goto err_put_dev; } - pr_debug("remote_pid = %d, local_pid=%d\n", remote_pid, local_pid); + IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid); ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL); if (!ipclite_hw_mutex) { @@ -940,13 +1184,13 @@ static int ipclite_channel_init(struct device *parent, ret = -ENOMEM; goto err_put_dev; } - pr_debug("rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo); + IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo); partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, local_pid, remote_pid); - pr_debug("partition_hdr = %p\n", partition_hdr); + IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr); descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset); - pr_debug("descs = %p\n", descs); + IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs); if (local_pid < remote_pid) { tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset; @@ -999,13 +1243,13 @@ static int ipclite_channel_init(struct device *parent, ret = ipclite_channel_irq_init(dev, child, &ipclite->channel[remote_pid]); if (ret) { - pr_err("irq setup for ipclite channel failed\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "irq setup for ipclite channel failed\n"); goto err_put_dev; } } ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE; - pr_debug("Channel init completed, ret = %d\n", ret); + IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret); return ret; err_put_dev: @@ -1021,7 +1265,177 @@ static void probe_subsystem(struct device *dev, struct device_node *np) ret = ipclite_channel_init(dev, np); if (ret) - pr_err("IPCLite Channel init failed\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Channel init failed\n"); +} + +static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + int ret = 0, host = 0; + + /* Parse the string from Sysfs Interface */ + ret = kstrtoint(buf, 0, &ipclite_debug_level); + if (ret < IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value"); + return -IPCLITE_FAILURE; + } + + /* Check if debug structure is initialized */ + if (!ipclite_dbg_info) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n"); + return -ENOMEM; + } + + /* Update the Global Debug variable for FW cores */ + ipclite_dbg_info->debug_level = ipclite_debug_level; + + /* Memory Barrier to make sure all writes are completed */ + wmb(); + + /* Signal other cores for updating the debug information */ + for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { + if (ipclite->ipcmem.toc->recovery.configured_core[host]) { + ret = ipclite_send_debug_info(host); + if (ret < IPCLITE_SUCCESS) + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", + host); + else + IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host); + } + } + + return count; +} + +static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + int ret = 0, host = 0; + + /* Parse the string from Sysfs Interface */ + ret = kstrtoint(buf, 0, &ipclite_debug_control); + if (ret < IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value"); + return -IPCLITE_FAILURE; + } + + /* Check if debug structures are initialized */ + if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n"); + return -ENOMEM; + } + + /* Update the Global Debug variable for FW cores */ + ipclite_dbg_info->debug_control = ipclite_debug_control; + + /* Memory Barrier to make sure all writes are completed */ + wmb(); + + /* Signal other cores for updating the debug information */ + for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { + if (ipclite->ipcmem.toc->recovery.configured_core[host]) { + ret = ipclite_send_debug_info(host); + if (ret < IPCLITE_SUCCESS) + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", + host); + else + IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host); + } + } + + return count; +} + +static ssize_t ipclite_dbg_dump_write(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + int ret = 0; + + /* Parse the string from Sysfs Interface */ + ret = kstrtoint(buf, 0, &ipclite_debug_dump); + if (ret < IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value"); + return -IPCLITE_FAILURE; + } + + /* Check if debug structures are initialized */ + if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n"); + return -ENOMEM; + } + + /* Dump the debug information */ + if (ipclite_debug_dump & IPCLITE_DUMP_DBG_STRUCT) + ipclite_dump_debug_struct(); + + if (ipclite_debug_dump & IPCLITE_DUMP_INMEM_LOG) + ipclite_dump_inmem_logs(); + + return count; +} + +struct kobj_attribute sysfs_dbg_lvl = __ATTR(ipclite_debug_level, 0660, + NULL, ipclite_dbg_lvl_write); +struct kobj_attribute sysfs_dbg_ctrl = __ATTR(ipclite_debug_control, 0660, + NULL, ipclite_dbg_ctrl_write); +struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660, + NULL, ipclite_dbg_dump_write); + +static int ipclite_debug_sysfs_setup(void) +{ + /* Creating a directory in /sys/kernel/ */ + sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj); + if (!sysfs_kobj) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n"); + return -IPCLITE_FAILURE; + } + + /* Creating sysfs files/interfaces for debug */ + if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr)) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n"); + return -IPCLITE_FAILURE; + } + + if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr)) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n"); + return -IPCLITE_FAILURE; + } + + if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr)) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n"); + return -IPCLITE_FAILURE; + } + + return IPCLITE_SUCCESS; +} + +static int ipclite_debug_info_setup(void) +{ + /* Setting up the Debug Structures */ + ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base + + ipclite->ipcmem.mem.size) - IPCLITE_DEBUG_SIZE); + if (!ipclite_dbg_info) + return -EADDRNOTAVAIL; + + ipclite_dbg_struct = (struct ipclite_debug_struct *) + (((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) + + (sizeof(*ipclite_dbg_struct) * IPCMEM_APPS)); + if (!ipclite_dbg_struct) + return -EADDRNOTAVAIL; + + ipclite_dbg_inmem = (struct ipclite_debug_inmem_buf *) + (((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) + + (sizeof(*ipclite_dbg_struct) * IPCMEM_NUM_HOSTS)); + + if (!ipclite_dbg_inmem) + return -EADDRNOTAVAIL; + + IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n", + ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, IPCLITE_DEBUG_SIZE); + IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n", + ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem); + + return IPCLITE_SUCCESS; } static int ipclite_probe(struct platform_device *pdev) @@ -1048,22 +1462,23 @@ static int ipclite_probe(struct platform_device *pdev) ret = hwlock_id; goto error; } - pr_debug("Hwlock id retrieved, hwlock_id=%d\n", hwlock_id); + IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id); ipclite->hwlock = hwspin_lock_request_specific(hwlock_id); if (!ipclite->hwlock) { - pr_err("Failed to assign hwlock_id\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n"); ret = -ENXIO; goto error; } - pr_debug("Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock); + IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id assigned successfully, hwlock=%p\n", + ipclite->hwlock); /* Initializing Local Mutex Lock for SSR functionality */ mutex_init(&ssr_mutex); ret = map_ipcmem(ipclite, "memory-region"); if (ret) { - pr_err("failed to map ipcmem\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to map ipcmem\n"); goto release; } mem = &(ipclite->ipcmem.mem); @@ -1071,12 +1486,26 @@ static int ipclite_probe(struct platform_device *pdev) ret = set_ipcmem_access_control(ipclite); if (ret) { - pr_err("failed to set access control policy\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to set access control policy\n"); goto release; } ipcmem_init(&ipclite->ipcmem); + /* Set up sysfs for debug */ + ret = ipclite_debug_sysfs_setup(); + if (ret != IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n"); + goto release; + } + + /* Mapping Debug Memory */ + ret = ipclite_debug_info_setup(); + if (ret != IPCLITE_SUCCESS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n"); + goto release; + } + /* Setup Channel for each Remote Subsystem */ for_each_available_child_of_node(pn, cn) probe_subsystem(&pdev->dev, cn); @@ -1099,7 +1528,7 @@ static int ipclite_probe(struct platform_device *pdev) GLOBAL_ATOMICS_DISABLED; } - pr_debug("global_atomic_support : %d\n", + IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", ipclite->ipcmem.toc->ipclite_features.global_atomic_support); /* hw mutex callbacks */ @@ -1112,7 +1541,11 @@ static int ipclite_probe(struct platform_device *pdev) /* initialize hwlock owner to invalid host */ ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; - pr_info("IPCLite probe completed successfully\n"); + /* Update the Global Debug variable for FW cores */ + ipclite_dbg_info->debug_level = ipclite_debug_level; + ipclite_dbg_info->debug_control = ipclite_debug_control; + + IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite probe completed successfully\n"); return ret; mem_release: @@ -1125,7 +1558,7 @@ mem_release: release: kfree(ipclite); error: - pr_err("IPCLite probe failed\n"); + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n"); return ret; } diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h index 4d0a5d7232..3c1960ae90 100644 --- a/msm/synx/ipclite.h +++ b/msm/synx/ipclite.h @@ -14,7 +14,7 @@ #define ACTIVE_CHANNEL 0x1 #define IPCMEM_TOC_SIZE (4*1024) -#define MAX_CHANNEL_SIGNALS 5 +#define MAX_CHANNEL_SIGNALS 6 #define MAX_PARTITION_COUNT 11 /*11 partitions other than global partition*/ @@ -23,6 +23,7 @@ #define IPCLITE_VERSION_SIGNAL 2 #define IPCLITE_TEST_SIGNAL 3 #define IPCLITE_SSR_SIGNAL 4 +#define IPCLITE_DEBUG_SIGNAL 5 /** Flag definitions for the entries */ #define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01) @@ -45,8 +46,97 @@ #define CONFIGURED_CORE 1 +#define IPCLITE_DEBUG_SIZE (64 * 1024) +#define IPCLITE_DEBUG_INFO_SIZE 256 +#define IPCLITE_CORE_DBG_LABEL "APSS:" +#define IPCLITE_LOG_MSG_SIZE 100 +#define IPCLITE_LOG_BUF_SIZE 512 +#define IPCLITE_DBG_LABEL_SIZE 5 +#define IPCLITE_SIGNAL_LABEL_SIZE 10 +#define PREV_INDEX 2 + +#define IPCLITE_OS_LOG(__level, __fmt, arg...) \ + do { \ + if (ipclite_debug_level & __level) { \ + if (ipclite_debug_control & IPCLITE_DMESG_LOG) { \ + pr_info(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \ + ipclite_dbg_label[__level], ## arg); \ + } \ + if (ipclite_debug_control & IPCLITE_INMEM_LOG) { \ + IPCLITE_OS_INMEM_LOG(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \ + ipclite_dbg_label[__level], ## arg); \ + } \ + } \ + } while (0) + /*IPCMEM Structure Definitions*/ +enum ipclite_debug_level { + IPCLITE_ERR = 0x0001, + IPCLITE_WARN = 0x0002, + IPCLITE_INFO = 0x0004, + IPCLITE_DBG = 0x0008, +}; + +enum ipclite_debug_control { + IPCLITE_DMESG_LOG = 0x0001, + IPCLITE_DBG_STRUCT = 0x0002, + IPCLITE_INMEM_LOG = 0x0004, +}; + +enum ipclite_debug_dump { + IPCLITE_DUMP_DBG_STRUCT = 0x0001, + IPCLITE_DUMP_INMEM_LOG = 0x0002, + IPCLITE_DUMP_SSR = 0x0004, +}; + +static const char ipclite_dbg_label[][IPCLITE_DBG_LABEL_SIZE] = { + [IPCLITE_ERR] = "err", + [IPCLITE_WARN] = "warn", + [IPCLITE_INFO] = "info", + [IPCLITE_DBG] = "dbg" +}; + +struct ipclite_debug_info_host { + uint32_t numsig_sent; //no. of signals sent from the core + uint32_t numsig_recv; //no. of signals received on the core + uint32_t tx_wr_index; //write index of tx queue + uint32_t tx_rd_index; //read index of tx queue + uint32_t rx_wr_index; //write index of rx queue + uint32_t rx_rd_index; //read index of rx queue + uint32_t num_intr; //no. of interrupts received on the core + uint32_t prev_tx_wr_index[PREV_INDEX]; //previous write index of tx queue + uint32_t prev_tx_rd_index[PREV_INDEX]; //previous read index of tx queue + uint32_t prev_rx_wr_index[PREV_INDEX]; //previous write index of rx queue + uint32_t prev_rx_rd_index[PREV_INDEX]; //previous read index of rx queue +}; + +struct ipclite_debug_info_overall { + uint32_t total_numsig_sent; //total no. of signals sent + uint32_t total_numsig_recv; //total no. of signals received + uint32_t last_sent_host_id; //last signal sent to host + uint32_t last_recv_host_id; //last signal received from host + uint32_t last_sigid_sent; //last sent signal id + uint32_t last_sigid_recv; //last received signal id +}; + +struct ipclite_debug_info { + uint32_t debug_version; + uint32_t debug_level; + uint32_t debug_control; + uint32_t debug_dump; + uint32_t debug_log_index; +}; + +struct ipclite_debug_inmem_buf { + char IPCLITELog[IPCLITE_LOG_BUF_SIZE][IPCLITE_LOG_MSG_SIZE]; +}; + +struct ipclite_debug_struct { + struct ipclite_debug_info_overall dbg_info_overall; + struct ipclite_debug_info_host dbg_info_host[IPCMEM_NUM_HOSTS]; +}; + struct ipclite_features { uint32_t global_atomic_support; uint32_t version_finalised; @@ -130,13 +220,13 @@ struct ipclite_fifo { size_t (*avail)(struct ipclite_fifo *fifo); void (*peak)(struct ipclite_fifo *fifo, - void *data, size_t count); + void *data, size_t count); void (*advance)(struct ipclite_fifo *fifo, - size_t count); + size_t count, uint32_t core_id); void (*write)(struct ipclite_fifo *fifo, - const void *data, size_t dlen); + const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id); void (*reset)(struct ipclite_fifo *fifo); }; diff --git a/msm/synx/ipclite_client.h b/msm/synx/ipclite_client.h index 37849db432..419d9e2330 100644 --- a/msm/synx/ipclite_client.h +++ b/msm/synx/ipclite_client.h @@ -30,7 +30,9 @@ enum ipcmem_host_type { /** * IPCLite return codes */ -#define IPCLITE_EINCHAN 9 /**< Inactive Channel */ +#define IPCLITE_SUCCESS 0 /*< Success > */ +#define IPCLITE_FAILURE 1 /*< Failure > */ +#define IPCLITE_EINCHAN 9 /*< Inactive Channel */ struct global_region_info { void *virt_base; From a1529349b1d68c50bb07caed6df76e2b0ed71db5 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Fri, 17 Feb 2023 14:11:51 -0800 Subject: [PATCH 09/42] synx: Disable DBG level Disable DBG level to prevent log flood Change-Id: If3bf49e9a723ac0a94d5e7775292d9a5d550570f Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/msm/synx/synx_debugfs.c b/msm/synx/synx_debugfs.c index 8d11ae9ff6..711fa31424 100644 --- a/msm/synx/synx_debugfs.c +++ b/msm/synx/synx_debugfs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -23,7 +23,7 @@ int synx_columns = NAME_COLUMN | ID_COLUMN | EXPORT_SYMBOL(synx_columns); int synx_debug = SYNX_ERR | SYNX_WARN | - SYNX_INFO | SYNX_DBG; + SYNX_INFO; EXPORT_SYMBOL(synx_debug); void populate_bound_rows( From 33788f7297c90ca26c351b99a0e5f7ec0ab57c5e Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Fri, 17 Feb 2023 15:30:20 -0800 Subject: [PATCH 10/42] synx: Propagating changes from msm-5.10 Includes param change for async_wait(timeout) and other fixes Change-Id: If8ff795538bbfaf53ee1758561fbd2841e5a71c7 Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx.c | 24 ++++++++++++++++-- msm/synx/synx_api.h | 6 ++++- msm/synx/synx_global.c | 55 ++++++++++++++++++++++++++++++++++++++++-- msm/synx/synx_global.h | 10 ++++++++ msm/synx/synx_util.c | 35 ++++++++++++++++++++++++--- 5 files changed, 122 insertions(+), 8 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index df440eb416..993c355761 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -519,6 +519,13 @@ void synx_signal_handler(struct work_struct *cb_dispatch) dprintk(SYNX_ERR, "global status update of %u failed=%d\n", h_synx, rc); + /* + * We are decrementing the reference here assuming this code will be + * executed after handle is released. But in case if clients signal + * dma fence in middle of execution sequence, then we will put + * one reference thus deleting the global idx. As of now clients cannot + * signal dma fence. + */ synx_global_put_ref(idx); } @@ -573,6 +580,7 @@ fail: void synx_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { + s32 status; struct synx_signal_cb *signal_cb = container_of(cb, struct synx_signal_cb, fence_cb); @@ -581,7 +589,19 @@ void synx_fence_callback(struct dma_fence *fence, fence, signal_cb->handle); /* other signal_cb members would be set during cb registration */ - signal_cb->status = dma_fence_get_status_locked(fence); + status = dma_fence_get_status_locked(fence); + + /* + * dma_fence_get_status_locked API returns 1 if signaled, + * 0 if ACTIVE, + * and negative error code in case of any failure + */ + if (status == 1) + status = SYNX_STATE_SIGNALED_SUCCESS; + else if (status < 0) + status = SYNX_STATE_SIGNALED_EXTERNAL; + + signal_cb->status = status; INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler); queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch); @@ -2427,7 +2447,7 @@ int synx_ipc_callback(u32 client_id, if (IS_ERR_OR_NULL(signal_cb)) return -SYNX_NOMEM; - dprintk(SYNX_INFO, + dprintk(SYNX_DBG, "signal notification for %u received with status %u\n", handle, status); diff --git a/msm/synx/synx_api.h b/msm/synx/synx_api.h index a293d66d45..0c2cd78a1f 100644 --- a/msm/synx/synx_api.h +++ b/msm/synx/synx_api.h @@ -12,6 +12,8 @@ #include "synx_err.h" +#define SYNX_NO_TIMEOUT ((u64)-1) + /** * enum synx_create_flags - Flags passed during synx_create call * @@ -87,7 +89,7 @@ typedef void (*synx_callback)(s32 sync_obj, int status, void *data); * synx_user_callback - Callback function registered by clients * * User callback registered for non-blocking wait. Dispatched when - * synx object is signaled. + * synx object is signaled or timeout has expired. */ typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data); @@ -330,12 +332,14 @@ struct synx_import_params { * @cb_func : Pointer to callback func to be invoked * @userdata : Opaque pointer passed back with callback * @cancel_cb_func : Pointer to callback to ack cancellation (optional) + * @timeout_ms : Timeout in ms. SYNX_NO_TIMEOUT if no timeout. */ struct synx_callback_params { u32 h_synx; synx_user_callback_t cb_func; void *userdata; synx_user_callback_t cancel_cb_func; + u64 timeout_ms; }; /* Kernel APIs */ diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c index 3a18a8473c..9c474d535f 100644 --- a/msm/synx/synx_global.c +++ b/msm/synx/synx_global.c @@ -208,6 +208,24 @@ int synx_global_init_coredata(u32 h_synx) if (rc) return rc; synx_g_obj = &synx_gmem.table[idx]; + if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 || + synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 || + synx_g_obj->parents[0] != 0) { + dprintk(SYNX_ERR, + "entry not cleared for idx %u,\n" + "synx_g_obj->status %d,\n" + "synx_g_obj->refcount %d,\n" + "synx_g_obj->subscribers %d,\n" + "synx_g_obj->handle %u,\n" + "synx_g_obj->parents[0] %d\n", + idx, synx_g_obj->status, + synx_g_obj->refcount, + synx_g_obj->subscribers, + synx_g_obj->handle, + synx_g_obj->parents[0]); + synx_gmem_unlock(idx, &flags); + return -SYNX_INVALID; + } memset(synx_g_obj, 0, sizeof(*synx_g_obj)); /* set status to active */ synx_g_obj->status = SYNX_STATE_ACTIVE; @@ -332,6 +350,28 @@ int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id) return SYNX_SUCCESS; } +int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + synx_g_obj->subscribers &= ~(1UL << id); + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + u32 synx_global_get_parents_num(u32 idx) { int rc; @@ -528,7 +568,18 @@ static int synx_global_update_status_core(u32 idx, /* notify waiting clients on signal */ if (data) { /* notify wait client */ - for (i = 1; i < SYNX_CORE_MAX; i++) { + + /* In case of SSR, someone might be waiting on same core + * However, in other cases, synx_signal API will take care + * of signaling handles on same core and thus we don't need + * to send interrupt + */ + if (status == SYNX_STATE_SIGNALED_SSR) + i = 0; + else + i = 1; + + for (; i < SYNX_CORE_MAX ; i++) { if (!wait_cores[i]) continue; dprintk(SYNX_DBG, @@ -735,7 +786,7 @@ int synx_global_recover(enum synx_core_id core_id) const u32 size = SYNX_GLOBAL_MAX_OBJS; unsigned long flags; struct synx_global_coredata *synx_g_obj; - + bool update; int *clear_idx = NULL; if (!synx_gmem.table) diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 592d713a4b..99f246490f 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -179,6 +179,16 @@ int synx_global_get_subscribed_cores(u32 idx, bool *cores); */ int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id); +/** + * synx_global_clear_subscribed_core - Clear core as a subscriber core on global entry + * + * @param idx : Global entry index + * @param id : Core to be added as subscriber + * + * @return SYNX_SUCCESS on success. Negative error on failure. + */ +int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id); + /** * synx_global_get_status - Get status of the global entry * diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 9b88943b18..38ba62782c 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -223,11 +223,17 @@ int synx_util_init_group_coredata(struct synx_coredata *synx_obj, static void synx_util_destroy_coredata(struct kref *kref) { + int rc; struct synx_coredata *synx_obj = container_of(kref, struct synx_coredata, refcount); - if (synx_util_is_global_object(synx_obj)) + if (synx_util_is_global_object(synx_obj)) { + rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS); + if (rc) + dprintk(SYNX_ERR, "Failed to clear subscribers"); + synx_global_put_ref(synx_obj->global_idx); + } synx_util_object_destroy(synx_obj); } @@ -247,6 +253,7 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) u32 i; s32 sync_id; u32 type; + unsigned long flags; struct synx_cb_data *synx_cb, *synx_cb_temp; struct synx_bind_desc *bind_desc; struct bind_operations *bind_ops; @@ -297,6 +304,29 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) mutex_destroy(&synx_obj->obj_lock); synx_util_release_fence_entry((u64)synx_obj->fence); + + /* dma fence framework expects handles are signaled before release, + * so signal if active handle and has last refcount. Synx handles + * on other cores are still active to carry out usual callflow. + */ + if (!IS_ERR_OR_NULL(synx_obj->fence)) { + spin_lock_irqsave(synx_obj->fence->lock, flags); + if (kref_read(&synx_obj->fence->refcount) == 1 && + (synx_util_get_object_status_locked(synx_obj) == + SYNX_STATE_ACTIVE)) { + // set fence error to cancel + dma_fence_set_error(synx_obj->fence, + -SYNX_STATE_SIGNALED_CANCEL); + + rc = dma_fence_signal_locked(synx_obj->fence); + if (rc) + dprintk(SYNX_ERR, + "signaling fence %pK failed=%d\n", + synx_obj->fence, rc); + } + spin_unlock_irqrestore(synx_obj->fence->lock, flags); + } + dma_fence_put(synx_obj->fence); kfree(synx_obj); dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj); @@ -873,7 +903,6 @@ static void synx_util_cleanup_fence( if (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) { signal_cb->synx_obj = NULL; - signal_cb->handle = synx_obj->global_idx; synx_obj->signal_cb = NULL; /* * release reference held by signal cb and @@ -1162,7 +1191,7 @@ void synx_util_cb_dispatch(struct work_struct *cb_dispatch) client->id); } - dprintk(SYNX_INFO, + dprintk(SYNX_DBG, "callback dispatched for handle %u, status %u, data %pK\n", payload.h_synx, payload.status, payload.data); From 1fb50c327228d86e874ca4f6cd343b11d78956a8 Mon Sep 17 00:00:00 2001 From: NITIN LAXMIDAS NAIK Date: Thu, 9 Feb 2023 13:40:29 -0800 Subject: [PATCH 11/42] msm: synx: build: bazel build DDK change Add support for synx modules to be built with Bazel DDK framework for pineapple. Change-Id: I375ea8a722f2afdfd5a9354854675030ebd38d96 Signed-off-by: Ram Nagesh --- BUILD.bazel | 27 ++++++++++ pineapple.bzl | 15 ++++++ synx_module_build.bzl | 117 ++++++++++++++++++++++++++++++++++++++++++ synx_modules.bzl | 25 +++++++++ 4 files changed, 184 insertions(+) create mode 100644 BUILD.bazel create mode 100644 pineapple.bzl create mode 100644 synx_module_build.bzl create mode 100644 synx_modules.bzl diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 0000000000..fd3c2a523c --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,27 @@ +package( + default_visibility = [ + "//visibility:public", + ], +) + +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") + +driver_header_globs = [ + "include/uapi/synx/**/*.h", + "include/uapi/synx/media/**/*.h", + "msm/synx/**/*.h", +] + +ddk_headers( + name = "synx_headers", + hdrs = glob(driver_header_globs), + includes = [ + "include/uapi/synx", + "include/uapi/synx/media", + "msm/synx", + ], +) + +load(":pineapple.bzl", "define_pineapple") + +define_pineapple() diff --git a/pineapple.bzl b/pineapple.bzl new file mode 100644 index 0000000000..cf12ca27fa --- /dev/null +++ b/pineapple.bzl @@ -0,0 +1,15 @@ +load("//external_modules/synx-kernel:synx_modules.bzl", "synx_modules") +load("//external_modules/synx-kernel:synx_module_build.bzl", "define_consolidate_gki_modules") + +def define_pineapple(): + define_consolidate_gki_modules( + target = "pineapple", + registry = synx_modules, + modules = [ + "synx", + "ipclite", + ], + config_options = [ + "TARGET_SYNX_ENABLE", + ], + ) diff --git a/synx_module_build.bzl b/synx_module_build.bzl new file mode 100644 index 0000000000..6a77b09ef1 --- /dev/null +++ b/synx_module_build.bzl @@ -0,0 +1,117 @@ +load( + "//build/kernel/kleaf:kernel.bzl", + "ddk_module", + "ddk_submodule", + "kernel_module", + "kernel_modules_install", +) + +def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps): + processed_config_srcs = {} + processed_config_deps = {} + + for config_src_name in config_srcs: + config_src = config_srcs[config_src_name] + + if type(config_src) == "list": + processed_config_srcs[config_src_name] = {True: config_src} + else: + processed_config_srcs[config_src_name] = config_src + + for config_deps_name in config_deps: + config_dep = config_deps[config_deps_name] + + if type(config_dep) == "list": + processed_config_deps[config_deps_name] = {True: config_dep} + else: + processed_config_deps[config_deps_name] = config_dep + + module = struct( + name = name, + path = path, + srcs = srcs, + config_srcs = processed_config_srcs, + config_option = config_option, + deps = deps, + config_deps = processed_config_deps, + ) + + module_map[name] = module + +def _get_config_choices(map, options): + choices = [] + + for option in map: + choices.extend(map[option].get(option in options, [])) + + return choices + +def _get_kernel_build_options(modules, config_options): + all_options = {option: True for option in config_options} + all_options = all_options | {module.config_option: True for module in modules if module.config_option} + + return all_options + +def _get_kernel_build_module_srcs(module, options, formatter): + srcs = module.srcs + _get_config_choices(module.config_srcs, options) + module_path = "{}/".format(module.path) if module.path else "" + globbed_srcs = native.glob(["{}{}".format(module_path, formatter(src)) for src in srcs]) + + return globbed_srcs + +def _get_kernel_build_module_deps(module, options, formatter): + deps = module.deps + _get_config_choices(module.config_deps, options) + deps = [formatter(dep) for dep in deps] + + return deps + +def create_module_registry(hdrs = []): + module_map = {} + + def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps = [], config_deps = {}): + _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps) + + return struct( + register = register, + get = module_map.get, + hdrs = hdrs, + module_map = module_map, + ) + +def define_target_variant_modules(target, variant, registry, modules, config_options = []): + kernel_build = "{}_{}".format(target, variant) + kernel_build_label = "//msm-kernel:{}".format(kernel_build) + modules = [registry.get(module_name) for module_name in modules] + options = _get_kernel_build_options(modules, config_options) + build_print = lambda message: print("{}: {}".format(kernel_build, message)) + formatter = lambda s: s.replace("%b", kernel_build).replace("%t", target) + + headers = ["//msm-kernel:all_headers"] + registry.hdrs + all_module_rules = [] + + for module in modules: + rule_name = "{}_{}".format(kernel_build, module.name) + module_srcs = _get_kernel_build_module_srcs(module, options, formatter) + + if not module_srcs: + continue + + ddk_submodule( + name = rule_name, + srcs = module_srcs, + out = "{}.ko".format(module.name), + deps = headers + _get_kernel_build_module_deps(module, options, formatter), + local_defines = options.keys(), + ) + + all_module_rules.append(rule_name) + + ddk_module( + name = "{}_modules".format(kernel_build), + kernel_build = kernel_build_label, + deps = all_module_rules, + ) + +def define_consolidate_gki_modules(target, registry, modules, config_options = []): + define_target_variant_modules(target, "consolidate", registry, modules, config_options) + define_target_variant_modules(target, "gki", registry, modules, config_options) diff --git a/synx_modules.bzl b/synx_modules.bzl new file mode 100644 index 0000000000..e7865ace75 --- /dev/null +++ b/synx_modules.bzl @@ -0,0 +1,25 @@ +load(":synx_module_build.bzl", "create_module_registry") + +SYNX_KERNEL_ROOT = "synx-kernel" + +synx_modules = create_module_registry([":synx_headers"]) +register_synx_module = synx_modules.register + +register_synx_module( + name = "synx", + path = "msm", + srcs = [ + "synx/synx.c", + "synx/synx_global.c", + "synx/synx_util.c", + "synx/synx_debugfs.c", + ], +) + +register_synx_module( + name = "ipclite", + path = "msm", + srcs = [ + "synx/ipclite.c", + ], +) From 5ea1e02adad323902404c648b5796dfcfbfde295 Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Tue, 28 Feb 2023 20:26:34 +0530 Subject: [PATCH 12/42] msm: synx: ipclite: Switch to qcom_scm_assign_mem from hyp_assign_phys() hyp_assign_phys is replaced by the upstream API qcom_scm_assign_mem, as hyp_assign_phys is planned to be deprecated Change-Id: I4371675b881735b92cc12d3f87c7d171acda5a97 Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index 969a52dd21..996b4c749f 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -17,16 +17,13 @@ #include #include -#include +#include #include #include "ipclite_client.h" #include "ipclite.h" -#define VMID_HLOS 3 -#define VMID_SSC_Q6 5 -#define VMID_ADSP_Q6 6 #define VMID_CDSP 30 #define GLOBAL_ATOMICS_ENABLED 1 #define GLOBAL_ATOMICS_DISABLED 0 @@ -1005,14 +1002,18 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) static int set_ipcmem_access_control(struct ipclite_info *ipclite) { int ret = 0; - int srcVM[1] = {VMID_HLOS}; - int destVM[2] = {VMID_HLOS, VMID_CDSP}; - int destVMperm[2] = {PERM_READ | PERM_WRITE, - PERM_READ | PERM_WRITE}; + u64 srcVM = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm destVM[2]; - ret = hyp_assign_phys(ipclite->ipcmem.mem.aux_base, - ipclite->ipcmem.mem.size, srcVM, 1, - destVM, destVMperm, 2); + destVM[0].vmid = QCOM_SCM_VMID_HLOS; + destVM[0].perm = QCOM_SCM_PERM_RW; + + destVM[1].vmid = VMID_CDSP; + destVM[1].perm = QCOM_SCM_PERM_RW; + + ret = qcom_scm_assign_mem(ipclite->ipcmem.mem.aux_base, + ipclite->ipcmem.mem.size, &srcVM, + destVM, ARRAY_SIZE(destVM)); return ret; } From 525a2fe19a1573ab8005d1c0b5e8da7a0264cd10 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Wed, 15 Feb 2023 18:22:46 -0800 Subject: [PATCH 13/42] synx: adding a macro for invalid synx handle In use cases where some buffers are passed without a synx handle, the client needs to define an additional parameter to indicate whether the h_synx field is valid. This adds to the command size. Assigning h_synx=SYNX_INVALID_HANDLE can avoid this. Change-Id: Ibf9dcf9641236ab2ad4c106904f3f17c879486bf Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx_api.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/msm/synx/synx_api.h b/msm/synx/synx_api.h index 0c2cd78a1f..09a36d0003 100644 --- a/msm/synx/synx_api.h +++ b/msm/synx/synx_api.h @@ -14,6 +14,12 @@ #define SYNX_NO_TIMEOUT ((u64)-1) +/** + * SYNX_INVALID_HANDLE : client can assign the synx handle variable with this value + * when it doesn't hold a valid synx handle + */ +#define SYNX_INVALID_HANDLE 0 + /** * enum synx_create_flags - Flags passed during synx_create call * From 384846c39354b4e178c139f393b4ab403c36c843 Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Wed, 1 Mar 2023 18:26:47 +0530 Subject: [PATCH 14/42] msm: synx: ipclite dt-bindings moved from kernel to vendor The DT-bindings which has macros for IPCLite signal usage, has been moved from kernel SI to vendor SI as all IPCLite related changes can be done in synx-kernel vendor SI without any kernel change going forward. Change-Id: I5047684d043df25dd607bd5943791850adc1bac0 Signed-off-by: Chelliah Vinu R --- dt-bindings/ipclite-signals.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 dt-bindings/ipclite-signals.h diff --git a/dt-bindings/ipclite-signals.h b/dt-bindings/ipclite-signals.h new file mode 100644 index 0000000000..5df4b6d00b --- /dev/null +++ b/dt-bindings/ipclite-signals.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_IPCLITE_SIGNALS_H +#define __DT_BINDINGS_IPCLITE_SIGNALS_H + +/* Signal IDs for COMPUTE_L0 protocol */ +#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_MSG 0 +#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_MEM_INIT 1 +#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_VERSION 2 +#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_TEST 3 +#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_SSR 4 +#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_DEBUG 5 + +#endif From 7570178ce355bdcd3f1e0785a6b5b5e64fa6e1ce Mon Sep 17 00:00:00 2001 From: John Moon Date: Mon, 6 Mar 2023 14:11:18 -0800 Subject: [PATCH 15/42] msm: synx: build: Add copy_to_dist_dir rule Add copy_to_dist_dir to Bazel build to output kernel build outputs to dist dir. Change-Id: I7e142d1e8e8f9f81ed25b938ee7799969870be8d Signed-off-by: John Moon --- pineapple.bzl | 4 ++-- synx_module_build.bzl | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/pineapple.bzl b/pineapple.bzl index cf12ca27fa..301fa5bd4d 100644 --- a/pineapple.bzl +++ b/pineapple.bzl @@ -1,5 +1,5 @@ -load("//external_modules/synx-kernel:synx_modules.bzl", "synx_modules") -load("//external_modules/synx-kernel:synx_module_build.bzl", "define_consolidate_gki_modules") +load(":synx_modules.bzl", "synx_modules") +load(":synx_module_build.bzl", "define_consolidate_gki_modules") def define_pineapple(): define_consolidate_gki_modules( diff --git a/synx_module_build.bzl b/synx_module_build.bzl index 6a77b09ef1..d1e4639397 100644 --- a/synx_module_build.bzl +++ b/synx_module_build.bzl @@ -5,6 +5,7 @@ load( "kernel_module", "kernel_modules_install", ) +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps): processed_config_srcs = {} @@ -112,6 +113,16 @@ def define_target_variant_modules(target, variant, registry, modules, config_opt deps = all_module_rules, ) + copy_to_dist_dir( + name = "{}_modules_dist".format(kernel_build), + data = [":{}_modules".format(kernel_build)], + dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(kernel_build), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + ) + def define_consolidate_gki_modules(target, registry, modules, config_options = []): define_target_variant_modules(target, "consolidate", registry, modules, config_options) define_target_variant_modules(target, "gki", registry, modules, config_options) From d120b83d98799124f4204f8e828e56e12136c8f2 Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Thu, 9 Feb 2023 14:51:24 +0530 Subject: [PATCH 16/42] msm: synx: Adding merge API support for synx V2 This change enables clients to signal synx merged handles from same and other cores. This change also ensures that underlying child dma fences are signaled when composite synx handle is signaled from other core. Change-Id: Ib81bc2291c85b93fe11eddf5d0ce450bbe486c83 Signed-off-by: Urvesh Rathod --- msm/synx/synx.c | 166 ++++++++++++++++++++++++++++++++----------- msm/synx/synx_util.c | 42 +++++++++++ msm/synx/synx_util.h | 2 + 3 files changed, 169 insertions(+), 41 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 993c355761..3eb9372eaa 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -497,6 +497,42 @@ int synx_native_signal_fence(struct synx_coredata *synx_obj, return rc; } +int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status) +{ + int rc = SYNX_SUCCESS; + unsigned long flags; + int i = 0, num_fences = 0; + struct synx_coredata **synx_child_obj = NULL; + + rc = synx_get_child_coredata(synx_obj, &synx_child_obj, &num_fences); + if (rc != SYNX_SUCCESS) + return rc; + for(i = 0; i < num_fences; i++) + { + if (IS_ERR_OR_NULL(synx_child_obj[i]) || IS_ERR_OR_NULL(synx_child_obj[i]->fence)) { + dprintk(SYNX_ERR, "Invalid child coredata %d\n", i); + rc = -SYNX_NOENT; + goto fail; + } + mutex_lock(&synx_child_obj[i]->obj_lock); + spin_lock_irqsave(synx_child_obj[i]->fence->lock, flags); + if (synx_util_get_object_status_locked(synx_child_obj[i]) != SYNX_STATE_ACTIVE || + !synx_util_is_global_object(synx_child_obj[i])) + { + spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags); + mutex_unlock(&synx_child_obj[i]->obj_lock); + continue; + } + spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags); + + rc = synx_native_signal_fence(synx_child_obj[i], status); + mutex_unlock(&synx_child_obj[i]->obj_lock); + } +fail: + kfree(synx_child_obj); + return rc; +} + void synx_signal_handler(struct work_struct *cb_dispatch) { int rc = SYNX_SUCCESS; @@ -552,13 +588,25 @@ void synx_signal_handler(struct work_struct *cb_dispatch) mutex_lock(&synx_obj->obj_lock); - if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) - rc = synx_native_signal_fence(synx_obj, status); - - if (rc == SYNX_SUCCESS) - rc = synx_native_signal_core(synx_obj, status, - (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? - true : false, signal_cb->ext_sync_id); + if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) { + if (synx_util_is_merged_object(synx_obj)) { + rc = synx_native_signal_merged_fence(synx_obj, status); + if (rc != SYNX_SUCCESS) { + mutex_unlock(&synx_obj->obj_lock); + dprintk(SYNX_ERR, + "failed to signal merged fence for %u failed=%d\n", + h_synx, rc); + goto fail; + } + } + else { + rc = synx_native_signal_fence(synx_obj, status); + if (rc == SYNX_SUCCESS) + rc = synx_native_signal_core(synx_obj, status, + (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? + true : false, signal_cb->ext_sync_id); + } + } mutex_unlock(&synx_obj->obj_lock); @@ -766,14 +814,6 @@ int synx_async_wait(struct synx_session *session, } mutex_lock(&synx_obj->obj_lock); - if (synx_util_is_merged_object(synx_obj)) { - dprintk(SYNX_ERR, - "[sess :%llu] cannot async wait on merged handle %u\n", - client->id, params->h_synx); - rc = -SYNX_INVALID; - goto release; - } - synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC); if (IS_ERR_OR_NULL(synx_cb)) { rc = -SYNX_NOMEM; @@ -795,10 +835,17 @@ int synx_async_wait(struct synx_session *session, } if (synx_util_is_global_handle(params->h_synx) || - synx_util_is_global_object(synx_obj)) + synx_util_is_global_object(synx_obj)) { status = synx_global_test_status_set_wait( synx_util_global_idx(params->h_synx), SYNX_CORE_APSS); + if (status != SYNX_STATE_ACTIVE) { + if (synx_util_is_merged_object(synx_obj)) + synx_native_signal_merged_fence(synx_obj, status); + else + synx_native_signal_fence(synx_obj, status); + } + } else status = synx_util_get_object_status(synx_obj); @@ -862,10 +909,9 @@ int synx_cancel_async_wait( } mutex_lock(&synx_obj->obj_lock); - if (synx_util_is_merged_object(synx_obj) || - synx_util_is_external_object(synx_obj)) { + if (synx_util_is_external_object(synx_obj)) { dprintk(SYNX_ERR, - "cannot cancel wait on composite handle\n"); + "cannot cancel wait on external fence\n"); goto release; } @@ -944,14 +990,14 @@ EXPORT_SYMBOL(synx_cancel_async_wait); int synx_merge(struct synx_session *session, struct synx_merge_params *params) { - int rc, i, j = 0; - u32 h_child; - u32 count = 0; - u32 *h_child_list; + int rc, i, num_signaled = 0; + u32 count = 0, h_child, status = SYNX_STATE_ACTIVE; + u32 *h_child_list = NULL, *h_child_idx_list = NULL; struct synx_client *client; struct dma_fence **fences = NULL; - struct synx_coredata *synx_obj; struct synx_map_entry *map_entry; + struct synx_coredata *synx_obj, *synx_obj_child; + struct synx_handle_coredata *synx_data_child; if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params)) return -SYNX_INVALID; @@ -1008,36 +1054,67 @@ int synx_merge(struct synx_session *session, goto clear; } + h_child_list = kzalloc(count*4, GFP_KERNEL); + if (IS_ERR_OR_NULL(h_child_list)) { + rc = -SYNX_NOMEM; + goto clear; + } + + h_child_idx_list = kzalloc(count*4, GFP_KERNEL); + if (IS_ERR_OR_NULL(h_child_idx_list)) { + rc = -SYNX_NOMEM; + goto clear; + } + + for (i = 0; i < count; i++) { + h_child = synx_util_get_fence_entry((u64)fences[i], 1); + if (!synx_util_is_global_handle(h_child)) + continue; + + h_child_list[num_signaled] = h_child; + h_child_idx_list[num_signaled++] = synx_util_global_idx(h_child); + } + if (params->flags & SYNX_MERGE_GLOBAL_FENCE) { - h_child_list = kzalloc(count*4, GFP_KERNEL); - if (IS_ERR_OR_NULL(synx_obj)) { - rc = -SYNX_NOMEM; - goto clear; - } - - for (i = 0; i < count; i++) { - h_child = synx_util_get_fence_entry((u64)fences[i], 1); - if (!synx_util_is_global_handle(h_child)) - continue; - - h_child_list[j++] = synx_util_global_idx(h_child); - } - - rc = synx_global_merge(h_child_list, j, + rc = synx_global_merge(h_child_idx_list, num_signaled, synx_util_global_idx(*params->h_merged_obj)); if (rc != SYNX_SUCCESS) { dprintk(SYNX_ERR, "global merge failed\n"); + kfree(h_child_list); + kfree(h_child_idx_list); goto clear; } } + else { + for(i = 0; i < num_signaled; i++) { + status = synx_global_test_status_set_wait(synx_util_global_idx(h_child_list[i]), SYNX_CORE_APSS); + + if (status != SYNX_STATE_ACTIVE) { + synx_data_child = synx_util_acquire_handle(client, h_child_list[i]); + synx_obj_child = synx_util_obtain_object(synx_data_child); + + if (IS_ERR_OR_NULL(synx_obj_child)) { + dprintk(SYNX_ERR, + "[sess :%llu] invalid child handle %u\n", + client->id, h_child_list[i]); + continue; + } + + rc = synx_native_signal_fence(synx_obj_child, status); + if (rc != SYNX_SUCCESS) + dprintk(SYNX_ERR, "h_synx %u failed with status %d\n", h_child_list[i], rc); + } + } + } dprintk(SYNX_MEM, "[sess :%llu] merge allocated %u, core %pK, fence %pK\n", client->id, *params->h_merged_obj, synx_obj, synx_obj->fence); + kfree(h_child_list); + kfree(h_child_idx_list); synx_put_client(client); return SYNX_SUCCESS; - clear: synx_util_release_map_entry(map_entry); clean_up: @@ -1118,8 +1195,15 @@ int synx_wait(struct synx_session *session, if (synx_util_is_global_handle(h_synx)) { rc = synx_global_test_status_set_wait( synx_util_global_idx(h_synx), SYNX_CORE_APSS); - if (rc != SYNX_STATE_ACTIVE) + if (rc != SYNX_STATE_ACTIVE) { + mutex_lock(&synx_obj->obj_lock); + if (synx_util_is_merged_object(synx_obj)) + synx_native_signal_merged_fence(synx_obj, rc); + else + synx_native_signal_fence(synx_obj, rc); + mutex_unlock(&synx_obj->obj_lock); goto fail; + } } timeleft = dma_fence_wait_timeout(synx_obj->fence, (bool) 0, diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 38ba62782c..139c8a4e68 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -1205,6 +1205,48 @@ free: kfree(synx_cb); } +int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences) +{ + int rc = SYNX_SUCCESS; + int i = 0, handle_count = 0; + u32 h_child = 0; + struct dma_fence_array *array = NULL; + struct synx_coredata **synx_datas = NULL; + struct synx_map_entry *fence_entry = NULL; + + if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences)) + return -SYNX_INVALID; + if (dma_fence_is_array(synx_obj->fence)) { + array = to_dma_fence_array(synx_obj->fence); + if (IS_ERR_OR_NULL(array)) + return -SYNX_INVALID; + synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_datas)) + return -SYNX_NOMEM; + + for (i = 0; i < array->num_fences; i++) { + h_child = synx_util_get_fence_entry((u64)array->fences[i], 1); + fence_entry = synx_util_get_map_entry(h_child); + if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj)) + { + dprintk(SYNX_ERR, "Invalid handle access %u", h_child); + rc = -SYNX_NOENT; + goto fail; + } + + synx_datas[handle_count++] = fence_entry->synx_obj; + synx_util_release_map_entry(fence_entry); + } + } + + *child_synx_obj = synx_datas; + *num_fences = handle_count; + return rc; +fail: + kfree(synx_datas); + return rc; +} + u32 synx_util_get_fence_entry(u64 key, u32 global) { u32 h_synx = 0; diff --git a/msm/synx/synx_util.h b/msm/synx/synx_util.h index c1483cd81c..fc6c3508fb 100644 --- a/msm/synx/synx_util.h +++ b/msm/synx/synx_util.h @@ -178,4 +178,6 @@ void synx_util_map_import_params_to_create( struct bind_operations *synx_util_get_bind_ops(u32 type); u32 synx_util_map_client_id_to_core(enum synx_client_id id); +int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences); + #endif /* __SYNX_UTIL_H__ */ From 9040173668dee0f615a58d8c917ddaee2d550ec1 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Thu, 16 Feb 2023 15:16:15 -0800 Subject: [PATCH 17/42] synx: Propagating changes from msm-5.10 Includes async_wait(timeout) and other fixes Change-Id: I46871f7fd343287cbd7f9e6ec48efc8ef5ce049a Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 3eb9372eaa..3e0da3af16 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -799,6 +799,9 @@ int synx_async_wait(struct synx_session *session, if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params)) return -SYNX_INVALID; + if (params->timeout_ms != SYNX_NO_TIMEOUT) + return -SYNX_NOSUPPORT; + client = synx_get_client(session); if (IS_ERR_OR_NULL(client)) return -SYNX_INVALID; @@ -2061,6 +2064,7 @@ static int synx_handle_async_wait( params.h_synx = user_data.synx_obj; params.cb_func = synx_util_default_user_callback; params.userdata = (void *)user_data.payload[0]; + params.timeout_ms = user_data.payload[2]; rc = synx_async_wait(session, ¶ms); if (rc) From f549339d0bf3f872907c9bacc78eb395b537f7de Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Tue, 11 Apr 2023 12:25:24 +0530 Subject: [PATCH 18/42] msm: synx: Fix for dispatching callbacks in kernel fencing If wait-signal happens on APSS core, callbacks were not getting dispatched because of recent changes (CR3442156). This fix ensures callbacks are dispatched properly when handles are signaled. Change-Id: I0b11634327afa3575c12819a639e104b27e82707 Signed-off-by: Urvesh Rathod --- msm/synx/synx.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 3e0da3af16..c2ed11762a 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -599,15 +599,14 @@ void synx_signal_handler(struct work_struct *cb_dispatch) goto fail; } } - else { + else rc = synx_native_signal_fence(synx_obj, status); - if (rc == SYNX_SUCCESS) - rc = synx_native_signal_core(synx_obj, status, - (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? - true : false, signal_cb->ext_sync_id); - } } + if (rc == SYNX_SUCCESS && !synx_util_is_merged_object(synx_obj)) + rc = synx_native_signal_core(synx_obj, status, + (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? + true : false, signal_cb->ext_sync_id); mutex_unlock(&synx_obj->obj_lock); if (rc != SYNX_SUCCESS) From ec2db68e0fd76f1ba094017394aa554cc8773ecf Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Mon, 24 Apr 2023 23:58:06 +0530 Subject: [PATCH 19/42] msm: synx: ipclite: Remove Hyp Assign Hyp assign is removed from ipclite. S2 mapping for CDSP & LPASS will be assigned from DSP PIL driver going forward. Cleaned up ipclite probe exit on failure. Change-Id: I342da2bb89024c252eebd4411194093ac77401d5 Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.c | 32 +++----------------------------- 1 file changed, 3 insertions(+), 29 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index 996b4c749f..44633cbdd7 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -24,7 +24,6 @@ #include "ipclite_client.h" #include "ipclite.h" -#define VMID_CDSP 30 #define GLOBAL_ATOMICS_ENABLED 1 #define GLOBAL_ATOMICS_DISABLED 0 #define FIFO_FULL_RESERVE 8 @@ -997,26 +996,6 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n"); } - -/*Add VMIDs corresponding to EVA, CDSP and VPU to set IPCMEM access control*/ -static int set_ipcmem_access_control(struct ipclite_info *ipclite) -{ - int ret = 0; - u64 srcVM = BIT(QCOM_SCM_VMID_HLOS); - struct qcom_scm_vmperm destVM[2]; - - destVM[0].vmid = QCOM_SCM_VMID_HLOS; - destVM[0].perm = QCOM_SCM_PERM_RW; - - destVM[1].vmid = VMID_CDSP; - destVM[1].perm = QCOM_SCM_PERM_RW; - - ret = qcom_scm_assign_mem(ipclite->ipcmem.mem.aux_base, - ipclite->ipcmem.mem.size, &srcVM, - destVM, ARRAY_SIZE(destVM)); - return ret; -} - static int ipclite_channel_irq_init(struct device *parent, struct device_node *node, struct ipclite_channel *channel) { @@ -1461,7 +1440,7 @@ static int ipclite_probe(struct platform_device *pdev) if (hwlock_id != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to retrieve hwlock\n"); ret = hwlock_id; - goto error; + goto release; } IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id); @@ -1469,7 +1448,7 @@ static int ipclite_probe(struct platform_device *pdev) if (!ipclite->hwlock) { IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n"); ret = -ENXIO; - goto error; + goto release; } IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock); @@ -1485,12 +1464,6 @@ static int ipclite_probe(struct platform_device *pdev) mem = &(ipclite->ipcmem.mem); memset(mem->virt_base, 0, mem->size); - ret = set_ipcmem_access_control(ipclite); - if (ret) { - IPCLITE_OS_LOG(IPCLITE_ERR, "failed to set access control policy\n"); - goto release; - } - ipcmem_init(&ipclite->ipcmem); /* Set up sysfs for debug */ @@ -1558,6 +1531,7 @@ mem_release: */ release: kfree(ipclite); + ipclite = NULL; error: IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n"); return ret; From a1825820d5a216dccb2e48b7ab80a7c2493bee14 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Thu, 20 Apr 2023 11:20:59 -0700 Subject: [PATCH 20/42] msm: synx: Move dprintk outside spinlock synx_client_destroy prints a log holding a spinlock which causes watchdog bite due to excessive log. This change prints the log after releasing the lock. Change-Id: I42021fd8d07cc595a31a0396f138ac18bcb5bd0f Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx_util.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 139c8a4e68..0b0575bb70 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -1363,6 +1363,8 @@ static void synx_client_cleanup(struct work_struct *dispatch) struct synx_handle_coredata *curr; struct hlist_node *tmp; + dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n", + client->id, client->name); /* * go over all the remaining synx obj handles * un-released from this session and remove them. @@ -1390,8 +1392,6 @@ static void synx_client_destroy(struct kref *kref) container_of(kref, struct synx_client, refcount); hash_del(&client->node); - dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n", - client->id, client->name); INIT_WORK(&client->dispatch, synx_client_cleanup); queue_work(synx_dev->wq_cleanup, &client->dispatch); From 888868eca3e66e796b8c28022050b91c021fec5f Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Fri, 28 Apr 2023 17:48:35 +0530 Subject: [PATCH 21/42] msm: synx: Dipatching un-released callbacks during release This change ensures that any undispatched callbacks are released while destroying object if handle is not signaled. Change-Id: I18ee66b9a6ceb390df4d5f5c4f4cd77c5f9f2090 Signed-off-by: Urvesh Rathod --- msm/synx/synx_util.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 0b0575bb70..ad77d45aad 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -263,10 +263,13 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) list_for_each_entry_safe(synx_cb, synx_cb_temp, &synx_obj->reg_cbs_list, node) { dprintk(SYNX_ERR, - "cleaning up callback of session %pK\n", + "dipatching un-released callbacks of session %pK\n", synx_cb->session); + synx_cb->status = SYNX_STATE_SIGNALED_CANCEL; list_del_init(&synx_cb->node); - kfree(synx_cb); + queue_work(synx_dev->wq_cb, + &synx_cb->cb_dispatch); + dprintk(SYNX_VERB, "dispatched callback for fence %pKn", synx_obj->fence); } for (i = 0; i < synx_obj->num_bound_synxs; i++) { From 6b566f46391921ad42db200102d66e94c3f99c1e Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Mon, 24 Apr 2023 14:20:26 +0530 Subject: [PATCH 22/42] msm: synx: Releases global handle index if handle is not signaled This change provides fix for below issues : 1. If local handle is imported as global, synx takes extra reference on global handles which were not released because on signal callback data had local handle instead of global causing handle leak. 2. If all the child handles of merge fence are local and merged fence is global, upon merge its signaled incorrectly as num_child == 0 even if no one signaled merged fence. 3. During merge synx takes one reference each child dma fences. When merged fence is released, dma fence reference of child handles were not released causing handle/dma fence leak. This change signals underlying child fences if the merged handle is ACTIVE during release and release reference on dma fence. 4. If local handle is imported as global, map_count was not getting incremented because of which object was destroyed more than once. This change increases the map_count variable when local handle is imported as global or vice-versa. 5. In synx_signal API, synx_signal_offload_job followed by signaling dma fence. synx_signal_offload_job internally calls synx_signal_handler which signals dma fence and because of which sometimes synx_signal was returning failure. This fix ensures that synx_signal_handler does not overtake synx signal API. Change-Id: Ia8d2eb969514347cac30f8ae33ce2028119dfd47 Signed-off-by: Urvesh Rathod --- msm/synx/synx.c | 18 ++++++---------- msm/synx/synx_global.c | 25 +++++++++++++++++++++ msm/synx/synx_global.h | 10 +++++++++ msm/synx/synx_util.c | 49 +++++++++++++++++++++++++++++++++++++----- msm/synx/synx_util.h | 1 + 5 files changed, 87 insertions(+), 16 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index c2ed11762a..48ae8f5777 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -589,16 +589,8 @@ void synx_signal_handler(struct work_struct *cb_dispatch) mutex_lock(&synx_obj->obj_lock); if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) { - if (synx_util_is_merged_object(synx_obj)) { + if (synx_util_is_merged_object(synx_obj)) rc = synx_native_signal_merged_fence(synx_obj, status); - if (rc != SYNX_SUCCESS) { - mutex_unlock(&synx_obj->obj_lock); - dprintk(SYNX_ERR, - "failed to signal merged fence for %u failed=%d\n", - h_synx, rc); - goto fail; - } - } else rc = synx_native_signal_fence(synx_obj, status); } @@ -723,12 +715,15 @@ int synx_signal(struct synx_session *session, u32 h_synx, u32 status) goto fail; } + mutex_lock(&synx_obj->obj_lock); + if (synx_util_is_global_handle(h_synx) || synx_util_is_global_object(synx_obj)) rc = synx_global_update_status( synx_obj->global_idx, status); if (rc != SYNX_SUCCESS) { + mutex_unlock(&synx_obj->obj_lock); dprintk(SYNX_ERR, "[sess :%llu] status update %d failed=%d\n", client->id, h_synx, rc); @@ -744,7 +739,6 @@ int synx_signal(struct synx_session *session, u32 h_synx, u32 status) rc = synx_signal_offload_job(client, synx_obj, h_synx, status); - mutex_lock(&synx_obj->obj_lock); rc = synx_native_signal_fence(synx_obj, status); if (rc != SYNX_SUCCESS) dprintk(SYNX_ERR, @@ -1204,7 +1198,7 @@ int synx_wait(struct synx_session *session, else synx_native_signal_fence(synx_obj, rc); mutex_unlock(&synx_obj->obj_lock); - goto fail; + goto status; } } @@ -1218,6 +1212,7 @@ int synx_wait(struct synx_session *session, goto fail; } +status: mutex_lock(&synx_obj->obj_lock); rc = synx_util_get_object_status(synx_obj); mutex_unlock(&synx_obj->obj_lock); @@ -1437,6 +1432,7 @@ static struct synx_map_entry *synx_handle_conversion( } } } else { + synx_obj->map_count++; rc = synx_alloc_global_handle(h_synx); if (rc == SYNX_SUCCESS) { synx_obj->global_idx = diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c index 9c474d535f..edfbf2faec 100644 --- a/msm/synx/synx_global.c +++ b/msm/synx/synx_global.c @@ -328,6 +328,28 @@ int synx_global_get_subscribed_cores(u32 idx, bool *cores) return SYNX_SUCCESS; } +int synx_global_fetch_handle_details(u32 idx, u32 *h_synx) +{ + int rc; + unsigned long flags; + struct synx_global_coredata *synx_g_obj; + + if (!synx_gmem.table) + return -SYNX_NOMEM; + + if (IS_ERR_OR_NULL(h_synx) || !synx_is_valid_idx(idx)) + return -SYNX_INVALID; + + rc = synx_gmem_lock(idx, &flags); + if (rc) + return rc; + synx_g_obj = &synx_gmem.table[idx]; + *h_synx = synx_g_obj->handle; + synx_gmem_unlock(idx, &flags); + + return SYNX_SUCCESS; +} + int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id) { int rc; @@ -710,6 +732,9 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx) if (!synx_is_valid_idx(p_idx)) return -SYNX_INVALID; + if (num_list == 0) + return SYNX_SUCCESS; + while (j < num_list) { idx = idx_list[j]; diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 99f246490f..733be049e9 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -293,4 +293,14 @@ int synx_global_clean_cdsp_mem(void); int synx_global_dump_shared_memory(void); +/** + * synx_global_fetch_handle_details - Fetches the synx handle from + * global shared memory. + * + * @param idx : Global entry index whose handle is requested. + * + * @return SYNX_SUCCESS on success. Negative error on failure. + */ +int synx_global_fetch_handle_details(u32 idx, u32 *h_synx); + #endif /* __SYNX_SHARED_MEM_H__ */ diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 0b0575bb70..89a1a7925f 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -12,6 +12,7 @@ #include "synx_util.h" extern void synx_external_callback(s32 sync_obj, int status, void *data); +static u32 __fence_state(struct dma_fence *fence, bool locked); int synx_util_init_coredata(struct synx_coredata *synx_obj, struct synx_create_params *params, @@ -247,6 +248,38 @@ void synx_util_put_object(struct synx_coredata *synx_obj) kref_put(&synx_obj->refcount, synx_util_destroy_coredata); } +int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status) +{ + struct dma_fence_array *array = NULL; + u32 i; + int rc = 0; + + if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence)) + return -SYNX_INVALID; + + if (dma_fence_is_array(synx_obj->fence)) { + array = to_dma_fence_array(synx_obj->fence); + if (IS_ERR_OR_NULL(array)) + return -SYNX_INVALID; + + for (i = 0; i < array->num_fences; i++) { + if (kref_read(&array->fences[i]->refcount) == 1 && + __fence_state(array->fences[i], false) == SYNX_STATE_ACTIVE) { + dma_fence_set_error(array->fences[i], + -SYNX_STATE_SIGNALED_CANCEL); + + rc = dma_fence_signal(array->fences[i]); + if (rc) + dprintk(SYNX_ERR, + "signaling child fence %pK failed=%d\n", + array->fences[i], rc); + } + dma_fence_put(array->fences[i]); + } + } + return rc; +} + void synx_util_object_destroy(struct synx_coredata *synx_obj) { int rc; @@ -311,7 +344,10 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) */ if (!IS_ERR_OR_NULL(synx_obj->fence)) { spin_lock_irqsave(synx_obj->fence->lock, flags); - if (kref_read(&synx_obj->fence->refcount) == 1 && + if (synx_util_is_merged_object(synx_obj) && + synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) + rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL); + else if (kref_read(&synx_obj->fence->refcount) == 1 && (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE)) { // set fence error to cancel @@ -319,12 +355,12 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) -SYNX_STATE_SIGNALED_CANCEL); rc = dma_fence_signal_locked(synx_obj->fence); - if (rc) - dprintk(SYNX_ERR, - "signaling fence %pK failed=%d\n", - synx_obj->fence, rc); } spin_unlock_irqrestore(synx_obj->fence->lock, flags); + if (rc) + dprintk(SYNX_ERR, + "signaling fence %pK failed=%d\n", + synx_obj->fence, rc); } dma_fence_put(synx_obj->fence); @@ -873,6 +909,7 @@ static void synx_util_cleanup_fence( unsigned long flags; u32 g_status; u32 f_status; + u32 h_synx = 0; mutex_lock(&synx_obj->obj_lock); synx_obj->map_count--; @@ -903,6 +940,8 @@ static void synx_util_cleanup_fence( if (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) { signal_cb->synx_obj = NULL; + synx_global_fetch_handle_details(synx_obj->global_idx, &h_synx); + signal_cb->handle = h_synx; synx_obj->signal_cb = NULL; /* * release reference held by signal cb and diff --git a/msm/synx/synx_util.h b/msm/synx/synx_util.h index fc6c3508fb..95f54c4bb2 100644 --- a/msm/synx/synx_util.h +++ b/msm/synx/synx_util.h @@ -60,6 +60,7 @@ static inline bool synx_util_is_external_object( struct synx_coredata *synx_obj) { if (synx_obj && + !(synx_obj->type & SYNX_CREATE_MERGED_FENCE) && (synx_obj->type & SYNX_CREATE_DMA_FENCE)) return true; From f26363379504f14b8862f4556d2f213041e5fa5c Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Mon, 27 Mar 2023 20:45:36 +0530 Subject: [PATCH 23/42] msm: synx: Enabling async wait support on merged fence This change enables support to perform async wait on pure local, pure global, combination of local and global synx fence and nested merge fence. Change-Id: I51a1d1998dca997db52bfa5c393bda3e7c2af985 Signed-off-by: Urvesh Rathod --- msm/synx/synx.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 48ae8f5777..682ba14d2e 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -550,11 +550,13 @@ void synx_signal_handler(struct work_struct *cb_dispatch) idx = (IS_ERR_OR_NULL(synx_obj)) ? synx_util_global_idx(h_synx) : synx_obj->global_idx; - rc = synx_global_update_status(idx, status); - if (rc != SYNX_SUCCESS) - dprintk(SYNX_ERR, - "global status update of %u failed=%d\n", - h_synx, rc); + if (synx_global_get_status(idx) == SYNX_STATE_ACTIVE) { + rc = synx_global_update_status(idx, status); + if (rc != SYNX_SUCCESS) + dprintk(SYNX_ERR, + "global status update of %u failed=%d\n", + h_synx, rc); + } /* * We are decrementing the reference here assuming this code will be * executed after handle is released. But in case if clients signal @@ -562,7 +564,8 @@ void synx_signal_handler(struct work_struct *cb_dispatch) * one reference thus deleting the global idx. As of now clients cannot * signal dma fence. */ - synx_global_put_ref(idx); + if (IS_ERR_OR_NULL(synx_obj)) + synx_global_put_ref(idx); } /* @@ -595,7 +598,15 @@ void synx_signal_handler(struct work_struct *cb_dispatch) rc = synx_native_signal_fence(synx_obj, status); } - if (rc == SYNX_SUCCESS && !synx_util_is_merged_object(synx_obj)) + if (rc != SYNX_SUCCESS) { + mutex_unlock(&synx_obj->obj_lock); + dprintk(SYNX_ERR, + "failed to signal fence %u with err=%d\n", + h_synx, rc); + goto fail; + } + + if (rc == SYNX_SUCCESS) rc = synx_native_signal_core(synx_obj, status, (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? true : false, signal_cb->ext_sync_id); @@ -1040,6 +1051,10 @@ int synx_merge(struct synx_session *session, goto clean_up; } + rc = synx_util_add_callback(synx_obj, *params->h_merged_obj); + if (rc != SYNX_SUCCESS) + goto clear; + rc = synx_util_init_handle(client, synx_obj, params->h_merged_obj, map_entry); if (rc) { From aa30245061f3c2005bea9160561e65a2ad6df7ed Mon Sep 17 00:00:00 2001 From: NITIN LAXMIDAS NAIK Date: Thu, 4 May 2023 16:13:32 -0700 Subject: [PATCH 24/42] msm: synx: Enable bazel compilation for synx Add new macro to enable bazel compilation by default Change-Id: I16543286b579be5fa920a313cb40498d48f3259b Signed-off-by: NITIN LAXMIDAS NAIK --- Android.mk | 6 ++++-- pineapple.bzl | 2 +- synx_modules.bzl | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Android.mk b/Android.mk index 53da29e886..4f86e48296 100644 --- a/Android.mk +++ b/Android.mk @@ -21,12 +21,15 @@ KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) DLKM_DIR := $(TOP)/device/qcom/common/dlkm LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true +LOCAL_MODULE_KO_DIRS := msm/synx/synx-driver.ko msm/synx/ipclite.ko include $(CLEAR_VARS) # For incremental compilation LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) LOCAL_MODULE := synx-driver-symvers LOCAL_MODULE_KBUILD_NAME := Module.symvers +#LOCAL_MODULE_STEM := Module.symvers LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) # Include kp_module.ko in the /vendor/lib/modules (vendor.img) # BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE) @@ -37,7 +40,7 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) $(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES)) LOCAL_MODULE := synx-driver.ko -LOCAL_MODULE_KBUILD_NAME := msm/synx-driver.ko +LOCAL_MODULE_KBUILD_NAME := msm/synx/synx-driver.ko LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk @@ -57,7 +60,6 @@ $(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY)) $(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES)) $(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES)) $(info DLKM_DIR = $(DLKM_DIR)) - include $(DLKM_DIR)/Build_external_kernelmodule.mk diff --git a/pineapple.bzl b/pineapple.bzl index 301fa5bd4d..5baba7f38e 100644 --- a/pineapple.bzl +++ b/pineapple.bzl @@ -6,7 +6,7 @@ def define_pineapple(): target = "pineapple", registry = synx_modules, modules = [ - "synx", + "synx-driver", "ipclite", ], config_options = [ diff --git a/synx_modules.bzl b/synx_modules.bzl index e7865ace75..7cbee89a75 100644 --- a/synx_modules.bzl +++ b/synx_modules.bzl @@ -6,7 +6,7 @@ synx_modules = create_module_registry([":synx_headers"]) register_synx_module = synx_modules.register register_synx_module( - name = "synx", + name = "synx-driver", path = "msm", srcs = [ "synx/synx.c", From e7e3b4aaac23727158a096436722c7c9b8ae5f92 Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Mon, 1 May 2023 11:03:49 +0530 Subject: [PATCH 25/42] msm: synx: Custom signal support This change ensures clients can send anything greater than 64 as custom status from APSS to other cores. Change-Id: Ib7f507e666fe0b60c5fc09f90652a09e15634376 Signed-off-by: Urvesh Rathod --- msm/synx/synx.c | 135 +++++++++++++++++++++++++++++++++------- msm/synx/synx_global.c | 80 +++++++++++------------- msm/synx/synx_global.h | 3 + msm/synx/synx_private.h | 3 +- msm/synx/synx_util.c | 16 ++--- 5 files changed, 163 insertions(+), 74 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 682ba14d2e..2fde5d1c07 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -483,6 +483,11 @@ int synx_native_signal_fence(struct synx_coredata *synx_obj, return -SYNX_ALREADY; } + synx_obj->status = status; + + if (status >= SYNX_DMA_FENCE_STATE_MAX) + status = SYNX_DMA_FENCE_STATE_MAX - 1; + /* set fence error to model {signal w/ error} */ if (status != SYNX_STATE_SIGNALED_SUCCESS) dma_fence_set_error(synx_obj->fence, -status); @@ -514,6 +519,7 @@ int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status) rc = -SYNX_NOENT; goto fail; } + mutex_lock(&synx_child_obj[i]->obj_lock); spin_lock_irqsave(synx_child_obj[i]->fence->lock, flags); if (synx_util_get_object_status_locked(synx_child_obj[i]) != SYNX_STATE_ACTIVE || @@ -525,6 +531,7 @@ int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status) } spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags); + status = synx_global_get_status(synx_child_obj[i]->global_idx); rc = synx_native_signal_fence(synx_child_obj[i], status); mutex_unlock(&synx_child_obj[i]->obj_lock); } @@ -533,6 +540,80 @@ fail: return rc; } +u32 synx_get_child_status(struct synx_coredata *synx_obj) +{ + u32 h_child = 0, i = 0; + u32 status = SYNX_DMA_FENCE_STATE_MAX - 1, child_status = SYNX_STATE_ACTIVE; + struct dma_fence_array *array = NULL; + struct synx_map_entry *fence_entry = NULL; + struct synx_coredata *synx_child_obj = NULL; + + if (!dma_fence_is_array(synx_obj->fence)) + return status; + + array = to_dma_fence_array(synx_obj->fence); + if (IS_ERR_OR_NULL(array)) + goto bail; + + for (i = 0; i < array->num_fences; i++) { + h_child = synx_util_get_fence_entry((u64)array->fences[i], 1); + if (h_child == 0) + h_child = synx_util_get_fence_entry((u64)array->fences[i], 0); + + if (h_child == 0) + continue; + + fence_entry = synx_util_get_map_entry(h_child); + if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj)) { + dprintk(SYNX_ERR, "Invalid handle access %u", h_child); + goto bail; + } + synx_child_obj = fence_entry->synx_obj; + + mutex_lock(&synx_child_obj->obj_lock); + if (synx_util_is_global_object(synx_child_obj)) + child_status = synx_global_get_status(synx_child_obj->global_idx); + else + child_status = synx_child_obj->status; + mutex_unlock(&synx_child_obj->obj_lock); + synx_util_release_map_entry(fence_entry); + + dprintk(SYNX_VERB, "Child handle %u status %d", h_child, child_status); + if (child_status != SYNX_STATE_ACTIVE && + (status == SYNX_DMA_FENCE_STATE_MAX - 1 || + (child_status > SYNX_STATE_SIGNALED_SUCCESS && + child_status <= SYNX_STATE_SIGNALED_MAX))) + status = child_status; + } +bail: + return status; +} + +u32 synx_custom_get_status(struct synx_coredata *synx_obj, u32 status) +{ + u32 custom_status = status; + u32 parent_global_status = + synx_util_is_global_object(synx_obj) ? + synx_global_get_status(synx_obj->global_idx) : SYNX_STATE_ACTIVE; + + if (IS_ERR_OR_NULL(synx_obj)) + goto bail; + + mutex_lock(&synx_obj->obj_lock); + if (synx_util_is_merged_object(synx_obj)) { + if (parent_global_status == SYNX_STATE_ACTIVE) + synx_obj->status = synx_get_child_status(synx_obj); + else + synx_obj->status = parent_global_status; + } + + custom_status = synx_obj->status; + mutex_unlock(&synx_obj->obj_lock); + +bail: + return custom_status; +} + void synx_signal_handler(struct work_struct *cb_dispatch) { int rc = SYNX_SUCCESS; @@ -544,6 +625,13 @@ void synx_signal_handler(struct work_struct *cb_dispatch) u32 h_synx = signal_cb->handle; u32 status = signal_cb->status; + if (signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) { + status = synx_custom_get_status(synx_obj, status); + dprintk(SYNX_VERB, + "handle %d will be updated with status %d\n", + h_synx, status); + } + if ((signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) && (synx_util_is_global_handle(h_synx) || synx_util_is_global_object(synx_obj))) { @@ -590,8 +678,8 @@ void synx_signal_handler(struct work_struct *cb_dispatch) } mutex_lock(&synx_obj->obj_lock); - - if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) { + if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC && + synx_util_get_object_status(synx_obj) == SYNX_STATE_ACTIVE) { if (synx_util_is_merged_object(synx_obj)) rc = synx_native_signal_merged_fence(synx_obj, status); else @@ -648,8 +736,12 @@ void synx_fence_callback(struct dma_fence *fence, */ if (status == 1) status = SYNX_STATE_SIGNALED_SUCCESS; - else if (status < 0) + else if (status == -SYNX_STATE_SIGNALED_CANCEL) + status = SYNX_STATE_SIGNALED_CANCEL; + else if (status < 0 && status >= -SYNX_STATE_SIGNALED_MAX) status = SYNX_STATE_SIGNALED_EXTERNAL; + else + status = (u32)-status; signal_cb->status = status; @@ -707,7 +799,10 @@ int synx_signal(struct synx_session *session, u32 h_synx, u32 status) if (IS_ERR_OR_NULL(client)) return -SYNX_INVALID; - if (status <= SYNX_STATE_ACTIVE) { + if (status <= SYNX_STATE_ACTIVE || + !(status == SYNX_STATE_SIGNALED_SUCCESS || + status == SYNX_STATE_SIGNALED_CANCEL || + status > SYNX_STATE_SIGNALED_MAX)) { dprintk(SYNX_ERR, "[sess :%llu] signaling with wrong status: %u\n", client->id, status); @@ -727,7 +822,6 @@ int synx_signal(struct synx_session *session, u32 h_synx, u32 status) } mutex_lock(&synx_obj->obj_lock); - if (synx_util_is_global_handle(h_synx) || synx_util_is_global_object(synx_obj)) rc = synx_global_update_status( @@ -997,7 +1091,7 @@ EXPORT_SYMBOL(synx_cancel_async_wait); int synx_merge(struct synx_session *session, struct synx_merge_params *params) { - int rc, i, num_signaled = 0; + int rc = SYNX_SUCCESS, i, num_signaled = 0; u32 count = 0, h_child, status = SYNX_STATE_ACTIVE; u32 *h_child_list = NULL, *h_child_idx_list = NULL; struct synx_client *client; @@ -1073,6 +1167,7 @@ int synx_merge(struct synx_session *session, h_child_idx_list = kzalloc(count*4, GFP_KERNEL); if (IS_ERR_OR_NULL(h_child_idx_list)) { + kfree(h_child_list); rc = -SYNX_NOMEM; goto clear; } @@ -1110,10 +1205,14 @@ int synx_merge(struct synx_session *session, client->id, h_child_list[i]); continue; } - - rc = synx_native_signal_fence(synx_obj_child, status); + mutex_lock(&synx_obj_child->obj_lock); + if (synx_obj->status == SYNX_STATE_ACTIVE) + rc = synx_native_signal_fence(synx_obj_child, status); + mutex_unlock(&synx_obj_child->obj_lock); if (rc != SYNX_SUCCESS) dprintk(SYNX_ERR, "h_synx %u failed with status %d\n", h_child_list[i], rc); + + synx_util_release_handle(synx_data_child); } } } @@ -1361,7 +1460,7 @@ EXPORT_SYMBOL(synx_bind); int synx_get_status(struct synx_session *session, u32 h_synx) { - int rc = 0; + int rc = 0, status = 0; struct synx_client *client; struct synx_handle_coredata *synx_data; struct synx_coredata *synx_obj; @@ -1381,23 +1480,13 @@ int synx_get_status(struct synx_session *session, goto fail; } - if (synx_util_is_global_handle(h_synx)) { - rc = synx_global_get_status( - synx_util_global_idx(h_synx)); - if (rc != SYNX_STATE_ACTIVE) { - dprintk(SYNX_VERB, - "[sess :%llu] handle %u in status %d\n", - client->id, h_synx, rc); - goto fail; - } - } - mutex_lock(&synx_obj->obj_lock); - rc = synx_util_get_object_status(synx_obj); + status = synx_util_get_object_status(synx_obj); + rc = synx_obj->status; mutex_unlock(&synx_obj->obj_lock); dprintk(SYNX_VERB, - "[sess :%llu] handle %u status %d\n", - client->id, h_synx, rc); + "[sess :%llu] handle %u synx coredata status %d and dma fence status %d\n", + client->id, h_synx, rc, status); fail: synx_util_release_handle(synx_data); diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c index edfbf2faec..7a10a3d653 100644 --- a/msm/synx/synx_global.c +++ b/msm/synx/synx_global.c @@ -460,7 +460,7 @@ u32 synx_global_get_status(u32 idx) { int rc; unsigned long flags; - u32 status; + u32 status = SYNX_STATE_ACTIVE; struct synx_global_coredata *synx_g_obj; if (!synx_gmem.table) @@ -473,7 +473,8 @@ u32 synx_global_get_status(u32 idx) if (rc) return rc; synx_g_obj = &synx_gmem.table[idx]; - status = synx_g_obj->status; + if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child == 0) + status = synx_g_obj->status; synx_gmem_unlock(idx, &flags); return status; @@ -500,8 +501,10 @@ u32 synx_global_test_status_set_wait(u32 idx, synx_global_print_data(synx_g_obj, __func__); status = synx_g_obj->status; /* if handle is still ACTIVE */ - if (status == SYNX_STATE_ACTIVE) + if (status == SYNX_STATE_ACTIVE || synx_g_obj->num_child != 0) { synx_g_obj->waiters |= (1UL << id); + status = SYNX_STATE_ACTIVE; + } else dprintk(SYNX_DBG, "handle %u already signaled %u", synx_g_obj->handle, synx_g_obj->status); @@ -533,21 +536,17 @@ static int synx_global_update_status_core(u32 idx, if (synx_g_obj->num_child != 0) { /* composite handle */ synx_g_obj->num_child--; + if (synx_g_obj->status == SYNX_STATE_ACTIVE || + (status > SYNX_STATE_SIGNALED_SUCCESS && + status <= SYNX_STATE_SIGNALED_MAX)) + synx_g_obj->status = status; + if (synx_g_obj->num_child == 0) { - if (synx_g_obj->status == SYNX_STATE_ACTIVE) { - synx_g_obj->status = - (status == SYNX_STATE_SIGNALED_SUCCESS) ? - SYNX_STATE_SIGNALED_SUCCESS : SYNX_STATE_SIGNALED_ERROR; - data |= synx_g_obj->status; - synx_global_get_waiting_cores_locked(synx_g_obj, - wait_cores); - synx_global_get_parents_locked(synx_g_obj, h_parents); - } else { - data = 0; - dprintk(SYNX_WARN, - "merged handle %u already in state %u\n", - synx_g_obj->handle, synx_g_obj->status); - } + data |= synx_g_obj->status; + synx_global_get_waiting_cores_locked(synx_g_obj, + wait_cores); + synx_global_get_parents_locked(synx_g_obj, h_parents); + /* release ref held by constituting handles */ synx_g_obj->refcount--; if (synx_g_obj->refcount == 0) { @@ -555,15 +554,6 @@ static int synx_global_update_status_core(u32 idx, sizeof(*synx_g_obj)); clear = true; } - } else if (status != SYNX_STATE_SIGNALED_SUCCESS) { - synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR; - data |= synx_g_obj->status; - synx_global_get_waiting_cores_locked(synx_g_obj, - wait_cores); - synx_global_get_parents_locked(synx_g_obj, h_parents); - dprintk(SYNX_WARN, - "merged handle %u signaled with error state\n", - synx_g_obj->handle); } else { /* pending notification from handles */ data = 0; @@ -723,8 +713,8 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx) struct synx_global_coredata *synx_g_obj; u32 i, j = 0; u32 idx; - bool sig_error = false; u32 num_child = 0; + u32 parent_status = SYNX_STATE_ACTIVE; if (!synx_gmem.table) return -SYNX_NOMEM; @@ -746,18 +736,26 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx) goto fail; synx_g_obj = &synx_gmem.table[idx]; - if (synx_g_obj->status == SYNX_STATE_ACTIVE) { - for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { - if (synx_g_obj->parents[i] == 0) { - synx_g_obj->parents[i] = p_idx; - break; - } + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + if (synx_g_obj->parents[i] == 0) { + synx_g_obj->parents[i] = p_idx; + break; } - num_child++; - } else if (synx_g_obj->status > - SYNX_STATE_SIGNALED_SUCCESS) { - sig_error = true; } + if (synx_g_obj->status == SYNX_STATE_ACTIVE) + num_child++; + else if (synx_g_obj->status > + SYNX_STATE_SIGNALED_SUCCESS && + synx_g_obj->status <= SYNX_STATE_SIGNALED_MAX) + parent_status = synx_g_obj->status; + else if (parent_status == SYNX_STATE_ACTIVE) + parent_status = synx_g_obj->status; + + if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child != 0) + num_child++; + + dprintk(SYNX_MEM, "synx_obj->status %d parent status %d\n", + synx_g_obj->status, parent_status); synx_gmem_unlock(idx, &flags); if (i >= SYNX_GLOBAL_MAX_PARENTS) { @@ -773,13 +771,9 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx) goto fail; synx_g_obj = &synx_gmem.table[p_idx]; synx_g_obj->num_child += num_child; - if (sig_error) - synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR; - else if (synx_g_obj->num_child != 0) + if (synx_g_obj->num_child != 0) synx_g_obj->refcount++; - else if (synx_g_obj->num_child == 0 && - synx_g_obj->status == SYNX_STATE_ACTIVE) - synx_g_obj->status = SYNX_STATE_SIGNALED_SUCCESS; + synx_g_obj->status = parent_status; synx_global_print_data(synx_g_obj, __func__); synx_gmem_unlock(p_idx, &flags); diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 733be049e9..074c2b6a79 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -54,6 +54,9 @@ enum synx_core_id { #define SYNX_STATE_SIGNALED_EXTERNAL 5 #define SYNX_STATE_SIGNALED_SSR 6 +/* dma fence states */ +#define SYNX_DMA_FENCE_STATE_MAX 4096 + /** * struct synx_global_coredata - Synx global object, used for book keeping * of all metadata associated with each individual global entry diff --git a/msm/synx/synx_private.h b/msm/synx/synx_private.h index 3caa261f38..f9ef273aa7 100644 --- a/msm/synx/synx_private.h +++ b/msm/synx/synx_private.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SYNX_PRIVATE_H__ @@ -149,6 +149,7 @@ struct synx_coredata { struct mutex obj_lock; struct kref refcount; u32 type; + u32 status; u32 num_bound_synxs; struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS]; struct list_head reg_cbs_list; diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 86c14fe38b..94dc12c6cc 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -108,6 +108,7 @@ int synx_util_init_coredata(struct synx_coredata *synx_obj, if (rc != SYNX_SUCCESS) goto clean; + synx_obj->status = synx_util_get_object_status(synx_obj); return SYNX_SUCCESS; clean: @@ -217,6 +218,7 @@ int synx_util_init_group_coredata(struct synx_coredata *synx_obj, kref_init(&synx_obj->refcount); mutex_init(&synx_obj->obj_lock); INIT_LIST_HEAD(&synx_obj->reg_cbs_list); + synx_obj->status = synx_util_get_object_status(synx_obj); synx_util_activate(synx_obj); return rc; @@ -731,7 +733,7 @@ static u32 __fence_state(struct dma_fence *fence, bool locked) static u32 __fence_group_state(struct dma_fence *fence, bool locked) { u32 i = 0; - u32 state = SYNX_STATE_INVALID; + u32 state = SYNX_STATE_INVALID, parent_state = SYNX_STATE_INVALID; struct dma_fence_array *array = NULL; u32 intr, actv_cnt, sig_cnt, err_cnt; @@ -747,6 +749,8 @@ static u32 __fence_group_state(struct dma_fence *fence, bool locked) for (i = 0; i < array->num_fences; i++) { intr = __fence_state(array->fences[i], locked); + if (err_cnt == 0) + parent_state = intr; switch (intr) { case SYNX_STATE_ACTIVE: actv_cnt++; @@ -755,7 +759,7 @@ static u32 __fence_group_state(struct dma_fence *fence, bool locked) sig_cnt++; break; default: - err_cnt++; + intr > SYNX_STATE_SIGNALED_MAX ? sig_cnt++ : err_cnt++; } } @@ -763,12 +767,10 @@ static u32 __fence_group_state(struct dma_fence *fence, bool locked) "group cnt stats act:%u, sig: %u, err: %u\n", actv_cnt, sig_cnt, err_cnt); - if (err_cnt) - state = SYNX_STATE_SIGNALED_ERROR; - else if (actv_cnt) + if (actv_cnt) state = SYNX_STATE_ACTIVE; - else if (sig_cnt == array->num_fences) - state = SYNX_STATE_SIGNALED_SUCCESS; + else + state = parent_state; return state; } From 26a5a7df0d8dc86f84281f26f751eab14b769319 Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Fri, 17 Mar 2023 22:38:42 +0530 Subject: [PATCH 26/42] msm: synx: ipclite: TOC Restructuring 1. Offset based TOC setup, which should be parsed by the FWs to build required structures. 2. Dynamic Partitioning support - where the enabled hosts' info is parsed from DT and only required partitions are allocated in the global memory. 3. Magic Number based TOC header data integrity. 4. Clean ups - Channel status moved to partition header - Use only standard kernel return codes Backward Compatibility Scenario: Older APPSS code will have toc.size in place of magic number, hence the value will be 4096, which should be detected by the FW to use older structures. Change-Id: I776eca4bdd997e983d35ef1e1f068cf73cdb72f7 Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.c | 559 +++++++++++++++++++++++--------------- msm/synx/ipclite.h | 339 ++++++++++------------- msm/synx/ipclite_client.h | 9 +- 3 files changed, 486 insertions(+), 421 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index 44633cbdd7..ce0a6b734d 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ @@ -39,11 +39,22 @@ static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem; static struct mutex ssr_mutex; static struct kobject *sysfs_kobj; -static uint32_t channel_status_info[IPCMEM_NUM_HOSTS]; +static uint32_t enabled_hosts; +static uint32_t partitions; static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO; static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump; +static inline bool is_host_enabled(uint32_t host) +{ + return (1U & (enabled_hosts >> host)); +} + +static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1) +{ + return (h0 == h1 && h0 != IPCMEM_APPS); +} + static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...) { uint32_t local_index = 0; @@ -83,52 +94,51 @@ static void ipclite_dump_debug_struct(void) pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n"); for (host = 0; host < IPCMEM_NUM_HOSTS; host++) { - if (ipclite->ipcmem.toc->recovery.configured_core[host]) { - temp_dbg_struct = (struct ipclite_debug_struct *) - (((char *)ipclite_dbg_struct) + - (sizeof(*temp_dbg_struct) * host)); + if (!is_host_enabled(host)) + continue; + temp_dbg_struct = (struct ipclite_debug_struct *) + (((char *)ipclite_dbg_struct) + + (sizeof(*temp_dbg_struct) * host)); - pr_info("---------- Host ID: %d dbg_mem:%p ----------\n", - host, temp_dbg_struct); - pr_info("Total Signals Sent : %d Total Signals Received : %d\n", - temp_dbg_struct->dbg_info_overall.total_numsig_sent, - temp_dbg_struct->dbg_info_overall.total_numsig_recv); - pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n", - temp_dbg_struct->dbg_info_overall.last_sent_host_id, - temp_dbg_struct->dbg_info_overall.last_recv_host_id); - pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n", - temp_dbg_struct->dbg_info_overall.last_sigid_sent, - temp_dbg_struct->dbg_info_overall.last_sigid_recv); + pr_info("---------- Host ID: %d dbg_mem:%p ----------\n", + host, temp_dbg_struct); + pr_info("Total Signals Sent : %d Total Signals Received : %d\n", + temp_dbg_struct->dbg_info_overall.total_numsig_sent, + temp_dbg_struct->dbg_info_overall.total_numsig_recv); + pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n", + temp_dbg_struct->dbg_info_overall.last_sent_host_id, + temp_dbg_struct->dbg_info_overall.last_recv_host_id); + pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n", + temp_dbg_struct->dbg_info_overall.last_sigid_sent, + temp_dbg_struct->dbg_info_overall.last_sigid_recv); - for (i = 0; i < IPCMEM_NUM_HOSTS; i++) { - if (ipclite->ipcmem.toc->recovery.configured_core[i]) { - pr_info("----------> Host ID : %d Host ID : %d Channel State: %d\n", - host, i, ipclite->ipcmem.toc->toc_entry[host][i].status); - pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n", - temp_dbg_struct->dbg_info_host[i].numsig_sent, - temp_dbg_struct->dbg_info_host[i].numsig_recv); - pr_info("No. of Interrupts Received : %d\n", - temp_dbg_struct->dbg_info_host[i].num_intr); - pr_info("TX Write Index : %d TX Read Index : %d\n", - temp_dbg_struct->dbg_info_host[i].tx_wr_index, - temp_dbg_struct->dbg_info_host[i].tx_rd_index); - pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n", - temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0], - temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]); - pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n", - temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1], - temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]); - pr_info("RX Write Index : %d RX Read Index : %d\n", - temp_dbg_struct->dbg_info_host[i].rx_wr_index, - temp_dbg_struct->dbg_info_host[i].rx_rd_index); - pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n", - temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0], - temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]); - pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n", - temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1], - temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]); - } - } + for (i = 0; i < IPCMEM_NUM_HOSTS; i++) { + if (!is_host_enabled(i)) + continue; + pr_info("----------> Host ID : %d Host ID : %d\n", host, i); + pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n", + temp_dbg_struct->dbg_info_host[i].numsig_sent, + temp_dbg_struct->dbg_info_host[i].numsig_recv); + pr_info("No. of Interrupts Received : %d\n", + temp_dbg_struct->dbg_info_host[i].num_intr); + pr_info("TX Write Index : %d TX Read Index : %d\n", + temp_dbg_struct->dbg_info_host[i].tx_wr_index, + temp_dbg_struct->dbg_info_host[i].tx_rd_index); + pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0], + temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]); + pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1], + temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]); + pr_info("RX Write Index : %d RX Read Index : %d\n", + temp_dbg_struct->dbg_info_host[i].rx_wr_index, + temp_dbg_struct->dbg_info_host[i].rx_rd_index); + pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0], + temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]); + pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n", + temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1], + temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]); } } return; @@ -178,7 +188,7 @@ static void ipclite_hw_mutex_acquire(void) int32_t ret; if (ipclite != NULL) { - if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) { + if (!global_atomic_support) { ret = hwspin_lock_timeout_irqsave(ipclite->hwlock, HWSPINLOCK_TIMEOUT, &ipclite->ipclite_hw_mutex->flags); @@ -187,7 +197,7 @@ static void ipclite_hw_mutex_acquire(void) return; } - ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS; + ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS; IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n"); } @@ -197,9 +207,8 @@ static void ipclite_hw_mutex_acquire(void) static void ipclite_hw_mutex_release(void) { if (ipclite != NULL) { - if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) { - ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = - IPCMEM_INVALID_HOST; + if (!global_atomic_support) { + ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->ipclite_hw_mutex->flags); IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n"); @@ -636,6 +645,15 @@ static int ipclite_tx(struct ipclite_channel *channel, unsigned long flags; int ret = 0; + if (channel->status != ACTIVE) { + if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) { + channel->status = ACTIVE; + } else { + IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Channel not active\n"); + return -EOPNOTSUPP; + } + } + spin_lock_irqsave(&channel->tx_lock, flags); if (ipclite_tx_avail(channel) < dlen) { spin_unlock_irqrestore(&channel->tx_lock, flags); @@ -656,102 +674,98 @@ static int ipclite_tx(struct ipclite_channel *channel, static int ipclite_send_debug_info(int32_t proc_id) { int ret = 0; + struct ipclite_channel *channel; if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); return -EINVAL; } + channel = &ipclite->channel[proc_id]; - if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { - if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { - channel_status_info[proc_id] = CHANNEL_ACTIVE; + if (channel->status != ACTIVE) { + if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) { + channel->status = ACTIVE; } else { IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); - return -IPCLITE_EINCHAN; + return -EOPNOTSUPP; } } - ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, - NULL); - if (ret < IPCLITE_SUCCESS) { + ret = mbox_send_message(channel->irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, NULL); + if (ret < 0) { IPCLITE_OS_LOG(IPCLITE_ERR, "Debug Signal sending failed to Core : %d Signal : %d ret : %d\n", proc_id, IPCLITE_DEBUG_SIGNAL, ret); - return -IPCLITE_FAILURE; + return ret; } IPCLITE_OS_LOG(IPCLITE_DBG, "Debug Signal send completed to core : %d signal : %d ret : %d\n", proc_id, IPCLITE_DEBUG_SIGNAL, ret); - return IPCLITE_SUCCESS; + return 0; } int ipclite_ssr_update(int32_t proc_id) { int ret = 0; + struct ipclite_channel *channel; if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); return -EINVAL; } + channel = &ipclite->channel[proc_id]; - if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { - if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { - channel_status_info[proc_id] = CHANNEL_ACTIVE; + if (channel->status != ACTIVE) { + if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) { + channel->status = ACTIVE; } else { IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); - return -IPCLITE_EINCHAN; + return -EOPNOTSUPP; } } - ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, - NULL); - if (ret < IPCLITE_SUCCESS) { + ret = mbox_send_message(channel->irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, NULL); + if (ret < 0) { IPCLITE_OS_LOG(IPCLITE_ERR, "SSR Signal sending failed to Core : %d Signal : %d ret : %d\n", proc_id, IPCLITE_SSR_SIGNAL, ret); - return -IPCLITE_FAILURE; + return ret; } IPCLITE_OS_LOG(IPCLITE_DBG, "SSR Signal send completed to core : %d signal : %d ret : %d\n", proc_id, IPCLITE_SSR_SIGNAL, ret); - return IPCLITE_SUCCESS; + return 0; } void ipclite_recover(enum ipcmem_host_type core_id) { - int ret, i, host, host0, host1; + int ret, host, host0, host1; + uint32_t p; IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id); /* verify and reset the hw mutex lock */ - if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) { - ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; + if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) { + ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; hwspin_unlock_raw(ipclite->hwlock); IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n"); } mutex_lock(&ssr_mutex); /* Set the Global Channel Status to 0 to avoid Race condition */ - for (i = 0; i < MAX_PARTITION_COUNT; i++) { - host0 = ipcmem_toc_partition_entries[i].host0; - host1 = ipcmem_toc_partition_entries[i].host1; + for (p = 0; p < partitions; p++) { + host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0; + host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1; + if (host0 != core_id && host1 != core_id) + continue; - if (host0 == core_id || host1 == core_id) { + ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) + (&(ipclite->ipcmem.partition[p]->hdr.status)), 0); - ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) - (&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0); - ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) - (&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0); - - channel_status_info[core_id] = - ipclite->ipcmem.toc->toc_entry[host0][host1].status; - } - IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host0, host1, - ipclite->ipcmem.toc->toc_entry[host0][host1].status); - IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host1, host0, - ipclite->ipcmem.toc->toc_entry[host1][host0].status); + IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", + host0, host1, ipclite->ipcmem.partition[p]->hdr.status); } /* Resets the TX/RX queue */ @@ -765,23 +779,19 @@ void ipclite_recover(enum ipcmem_host_type core_id) /* Increment the Global Channel Status for APPS and crashed core*/ ipclite_global_atomic_inc((ipclite_atomic_int32_t *) - (&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status))); - ipclite_global_atomic_inc((ipclite_atomic_int32_t *) - (&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status))); + ipclite->channel[core_id].gstatus_ptr); - channel_status_info[core_id] = - ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status; + ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr; /* Update other cores about SSR */ for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { - if (host != core_id && ipclite->ipcmem.toc->recovery.configured_core[host]) { - ret = ipclite_ssr_update(host); - if (ret < IPCLITE_SUCCESS) - IPCLITE_OS_LOG(IPCLITE_ERR, - "Failed to send SSR update to core : %d\n", host); - else - IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host); - } + if (!is_host_enabled(host) || host == core_id) + continue; + ret = ipclite_ssr_update(host); + if (ret < 0) + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host); + else + IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host); } mutex_unlock(&ssr_mutex); @@ -804,15 +814,6 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data) return -EINVAL; } - if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { - if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { - channel_status_info[proc_id] = CHANNEL_ACTIVE; - } else { - IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); - return -IPCLITE_EINCHAN; - } - } - ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_MSG_SIGNAL); @@ -845,15 +846,6 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data) return -EINVAL; } - if (channel_status_info[proc_id] != CHANNEL_ACTIVE) { - if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) { - channel_status_info[proc_id] = CHANNEL_ACTIVE; - } else { - IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); - return -IPCLITE_EINCHAN; - } - } - ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_TEST_SIGNAL); @@ -911,25 +903,67 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name) return ret; } -static void ipcmem_init(struct ipclite_mem *ipcmem) +/** + * insert_magic_number() - Inserts the magic number in toc header + * + * Function computes a simple checksum of the contents in toc header + * and stores the result in magic_number field in the toc header + */ +static void insert_magic_number(void) { - int host, host0, host1; - int i = 0; + uint32_t *block = ipclite->ipcmem.mem.virt_base; + size_t size = sizeof(struct ipcmem_toc_header) / sizeof(uint32_t); - ipcmem->toc = ipcmem->mem.virt_base; - IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc); + for (int i = 1; i < size; i++) + block[0] ^= block[i]; - ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE; - IPCLITE_OS_LOG(IPCLITE_DBG, "toc->hdr.size = %d\n", ipcmem->toc->hdr.size); + block[0] = ~block[0]; +} +static int32_t setup_toc(struct ipclite_mem *ipcmem) +{ + size_t offset = 0; + void *virt_base = ipcmem->mem.virt_base; + struct ipcmem_offsets *offsets = &ipcmem->toc->offsets; + struct ipcmem_toc_data *toc_data = &ipcmem->toc_data; + + /* Setup Offsets */ + offsets->host_info = offset += IPCMEM_TOC_VAR_OFFSET; + offsets->global_entry = offset += sizeof(struct ipcmem_host_info); + offsets->partition_info = offset += sizeof(struct ipcmem_partition_entry); + offsets->partition_entry = offset += sizeof(struct ipcmem_partition_info); + // offsets->debug = virt_base + size - 64K; + /* Offset to be used for any new structure added in toc (after partition_entry) */ + // offsets->new_struct = offset += sizeof(struct ipcmem_partition_entry)*IPCMEM_NUM_HOSTS; + + IPCLITE_OS_LOG(IPCLITE_DBG, "toc_data offsets:"); + IPCLITE_OS_LOG(IPCLITE_DBG, "host_info = 0x%X", offsets->host_info); + IPCLITE_OS_LOG(IPCLITE_DBG, "global_entry = 0x%X", offsets->global_entry); + IPCLITE_OS_LOG(IPCLITE_DBG, "partition_info = 0x%X", offsets->partition_info); + IPCLITE_OS_LOG(IPCLITE_DBG, "partition_entry = 0x%X", offsets->partition_entry); + + /* Point structures to the appropriate offset in TOC */ + toc_data->host_info = ADD_OFFSET(virt_base, offsets->host_info); + toc_data->global_entry = ADD_OFFSET(virt_base, offsets->global_entry); + toc_data->partition_info = ADD_OFFSET(virt_base, offsets->partition_info); + toc_data->partition_entry = ADD_OFFSET(virt_base, offsets->partition_entry); + + return 0; +} + +static void setup_global_partition(struct ipclite_mem *ipcmem, uint32_t base_offset) +{ /*Fill in global partition details*/ - ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry; - ipcmem->global_partition = (struct ipcmem_global_partition *) - ((char *)ipcmem->mem.virt_base + - ipcmem_toc_global_partition_entry.base_offset); + ipcmem->toc_data.global_entry->base_offset = base_offset; + ipcmem->toc_data.global_entry->size = GLOBAL_PARTITION_SIZE; + ipcmem->toc_data.global_entry->flags = GLOBAL_PARTITION_FLAGS; + ipcmem->toc_data.global_entry->host0 = IPCMEM_GLOBAL_HOST; + ipcmem->toc_data.global_entry->host1 = IPCMEM_GLOBAL_HOST; + + ipcmem->global_partition = ADD_OFFSET(ipcmem->mem.virt_base, base_offset); IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n", - ipcmem_toc_global_partition_entry.base_offset, + base_offset, ipcmem->global_partition); ipcmem->global_partition->hdr = global_partition_hdr; @@ -938,55 +972,112 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) ipcmem->global_partition->hdr.partition_type, ipcmem->global_partition->hdr.region_offset, ipcmem->global_partition->hdr.region_size); +} - /* Fill in each IPCMEM TOC entry from ipcmem_toc_partition_entries config*/ - for (i = 0; i < MAX_PARTITION_COUNT; i++) { - host0 = ipcmem_toc_partition_entries[i].host0; - host1 = ipcmem_toc_partition_entries[i].host1; - IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1); +static void update_partition(struct ipclite_mem *ipcmem, uint32_t p) +{ + int host0 = ipcmem->toc_data.partition_entry[p].host0; + int host1 = ipcmem->toc_data.partition_entry[p].host1; - ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i]; - ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i]; + IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1); - if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) { - /* Updating the Global Channel Status for APPS Loopback */ - ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE; - ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE; + ipcmem->partition[p] = ADD_OFFSET(ipcmem->mem.virt_base, + ipcmem->toc_data.partition_entry[p].base_offset); - /* Updating Local Channel Status */ - channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status; + IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx", + p, ipcmem->partition[p], + p, ipcmem->toc_data.partition_entry[p].base_offset); - } else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) { - /* Updating the Global Channel Status */ - ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS; - ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS; + if (host0 == host1) + ipcmem->partition[p]->hdr = loopback_partition_hdr; + else + ipcmem->partition[p]->hdr = default_partition_hdr; - /* Updating Local Channel Status */ - if (host0 == IPCMEM_APPS) - host = host1; - else if (host1 == IPCMEM_APPS) - host = host0; + IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d", + ipcmem->partition[p]->hdr.type, + ipcmem->partition[p]->hdr.desc_offset, + ipcmem->partition[p]->hdr.desc_size); +} - channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status; +static int32_t setup_partitions(struct ipclite_mem *ipcmem, uint32_t base_offset) +{ + uint32_t p, host0, host1; + uint32_t num_entry = 0; + + /*Fill in each valid ipcmem partition table entry*/ + for (host0 = 0; host0 < IPCMEM_NUM_HOSTS; host0++) { + if (!is_host_enabled(host0)) + continue; + for (host1 = host0; host1 < IPCMEM_NUM_HOSTS; host1++) { + if (!is_host_enabled(host1) || is_loopback_except_apps(host0, host1)) + continue; + ipcmem->toc_data.partition_entry[num_entry].base_offset = base_offset; + ipcmem->toc_data.partition_entry[num_entry].size = DEFAULT_PARTITION_SIZE; + ipcmem->toc_data.partition_entry[num_entry].flags = DEFAULT_PARTITION_FLAGS; + ipcmem->toc_data.partition_entry[num_entry].host0 = host0; + ipcmem->toc_data.partition_entry[num_entry].host1 = host1; + + base_offset += DEFAULT_PARTITION_SIZE; + num_entry++; } + } + IPCLITE_OS_LOG(IPCLITE_DBG, "total partitions = %u", num_entry); - ipcmem->partition[i] = (struct ipcmem_partition *) - ((char *)ipcmem->mem.virt_base + - ipcmem_toc_partition_entries[i].base_offset); + ipcmem->partition = kcalloc(num_entry, sizeof(*ipcmem->partition), GFP_KERNEL); + if (!ipcmem->partition) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Partition Allocation failed"); + return -ENOMEM; + } - IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx\n", - i, ipcmem->partition[i], - i, ipcmem_toc_partition_entries[i].base_offset); + /*Update appropriate partition based on partition entries*/ + for (p = 0; p < num_entry; p++) + update_partition(ipcmem, p); - if (host0 == host1) - ipcmem->partition[i]->hdr = loopback_partition_hdr; - else - ipcmem->partition[i]->hdr = default_partition_hdr; + /*Set up info to parse partition entries*/ + ipcmem->toc_data.partition_info->num_entries = partitions = num_entry; + ipcmem->toc_data.partition_info->entry_size = sizeof(struct ipcmem_partition_entry); + return 0; +} - IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n", - ipcmem->partition[i]->hdr.type, - ipcmem->partition[i]->hdr.desc_offset, - ipcmem->partition[i]->hdr.desc_size); +static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn) +{ + int ret; + uint32_t remote_pid; + uint32_t host_count = 0; + uint32_t gmem_offset = 0; + struct device_node *cn; + + for_each_available_child_of_node(pn, cn) { + of_property_read_u32(cn, "qcom,remote-pid", &remote_pid); + if (remote_pid < IPCMEM_NUM_HOSTS) { + enabled_hosts |= BIT_MASK(remote_pid); + host_count++; + } + } + IPCLITE_OS_LOG(IPCLITE_DBG, "enabled_hosts = 0x%X", enabled_hosts); + IPCLITE_OS_LOG(IPCLITE_DBG, "host_count = %u", host_count); + + ipcmem->toc = ipcmem->mem.virt_base; + IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc); + + ret = setup_toc(ipcmem); + if (ret) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up toc"); + return ret; + } + + /*Set up host related info*/ + ipcmem->toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; + ipcmem->toc_data.host_info->configured_host = enabled_hosts; + + gmem_offset += IPCMEM_TOC_SIZE; + setup_global_partition(ipcmem, gmem_offset); + + gmem_offset += GLOBAL_PARTITION_SIZE; + ret = setup_partitions(ipcmem, gmem_offset); + if (ret) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up partitions"); + return ret; } /*Making sure all writes for ipcmem configurations are completed*/ @@ -994,6 +1085,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem) ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED; IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n"); + return 0; } static int ipclite_channel_irq_init(struct device *parent, struct device_node *node, @@ -1089,8 +1181,22 @@ EXPORT_SYMBOL(get_global_partition_info); static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid, int remote_pid) { - return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base + - ipcmem.toc->toc_entry[local_pid][remote_pid].base_offset); + uint32_t p; + uint32_t found = -1; + + for (p = 0; p < partitions; p++) { + if (ipcmem.toc_data.partition_entry[p].host0 == local_pid + && ipcmem.toc_data.partition_entry[p].host1 == remote_pid) { + found = p; + break; + } + } + + if (found < partitions) + return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base + + ipcmem.toc_data.partition_entry[found].base_offset); + else + return NULL; } static void ipclite_channel_release(struct device *dev) @@ -1166,9 +1272,13 @@ static int ipclite_channel_init(struct device *parent, } IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo); - partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, - local_pid, remote_pid); + partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, local_pid, remote_pid); IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr); + if (!partition_hdr) { + ret = -ENOMEM; + goto err_put_dev; + } + descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset); IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs); @@ -1216,6 +1326,7 @@ static int ipclite_channel_init(struct device *parent, ipclite->channel[remote_pid].remote_pid = remote_pid; ipclite->channel[remote_pid].tx_fifo = tx_fifo; ipclite->channel[remote_pid].rx_fifo = rx_fifo; + ipclite->channel[remote_pid].gstatus_ptr = &partition_hdr->status; spin_lock_init(&ipclite->channel[remote_pid].tx_lock); @@ -1228,12 +1339,19 @@ static int ipclite_channel_init(struct device *parent, } } - ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE; + /* Updating Local & Global Channel Status */ + if (remote_pid == IPCMEM_APPS) { + *ipclite->channel[remote_pid].gstatus_ptr = ACTIVE; + ipclite->channel[remote_pid].status = ACTIVE; + } else { + *ipclite->channel[remote_pid].gstatus_ptr = IN_PROGRESS; + ipclite->channel[remote_pid].status = IN_PROGRESS; + } IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret); return ret; err_put_dev: - ipclite->channel[remote_pid].channel_status = 0; + ipclite->channel[remote_pid].status = INACTIVE; device_unregister(dev); kfree(dev); return ret; @@ -1255,9 +1373,9 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj, /* Parse the string from Sysfs Interface */ ret = kstrtoint(buf, 0, &ipclite_debug_level); - if (ret < IPCLITE_SUCCESS) { + if (ret < 0) { IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value"); - return -IPCLITE_FAILURE; + return ret; } /* Check if debug structure is initialized */ @@ -1274,14 +1392,13 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj, /* Signal other cores for updating the debug information */ for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { - if (ipclite->ipcmem.toc->recovery.configured_core[host]) { - ret = ipclite_send_debug_info(host); - if (ret < IPCLITE_SUCCESS) - IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", - host); - else - IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host); - } + if (!is_host_enabled(host)) + continue; + ret = ipclite_send_debug_info(host); + if (ret < 0) + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host); + else + IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host); } return count; @@ -1294,9 +1411,9 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj, /* Parse the string from Sysfs Interface */ ret = kstrtoint(buf, 0, &ipclite_debug_control); - if (ret < IPCLITE_SUCCESS) { + if (ret < 0) { IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value"); - return -IPCLITE_FAILURE; + return ret; } /* Check if debug structures are initialized */ @@ -1313,14 +1430,13 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj, /* Signal other cores for updating the debug information */ for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { - if (ipclite->ipcmem.toc->recovery.configured_core[host]) { - ret = ipclite_send_debug_info(host); - if (ret < IPCLITE_SUCCESS) - IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", - host); - else - IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host); - } + if (!is_host_enabled(host)) + continue; + ret = ipclite_send_debug_info(host); + if (ret < 0) + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host); + else + IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host); } return count; @@ -1333,9 +1449,9 @@ static ssize_t ipclite_dbg_dump_write(struct kobject *kobj, /* Parse the string from Sysfs Interface */ ret = kstrtoint(buf, 0, &ipclite_debug_dump); - if (ret < IPCLITE_SUCCESS) { + if (ret < 0) { IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value"); - return -IPCLITE_FAILURE; + return ret; } /* Check if debug structures are initialized */ @@ -1363,37 +1479,42 @@ struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660, static int ipclite_debug_sysfs_setup(void) { + int ret = 0; + /* Creating a directory in /sys/kernel/ */ sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj); if (!sysfs_kobj) { IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n"); - return -IPCLITE_FAILURE; + return -ENOMEM; } /* Creating sysfs files/interfaces for debug */ - if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr)) { + ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr); + if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n"); - return -IPCLITE_FAILURE; + return ret; } - if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr)) { + ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr); + if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n"); - return -IPCLITE_FAILURE; + return ret; } - if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr)) { + ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr); + if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n"); - return -IPCLITE_FAILURE; + return ret; } - return IPCLITE_SUCCESS; + return ret; } static int ipclite_debug_info_setup(void) { /* Setting up the Debug Structures */ ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base + - ipclite->ipcmem.mem.size) - IPCLITE_DEBUG_SIZE); + ipclite->ipcmem.mem.size) - DEBUG_PARTITION_SIZE); if (!ipclite_dbg_info) return -EADDRNOTAVAIL; @@ -1411,11 +1532,11 @@ static int ipclite_debug_info_setup(void) return -EADDRNOTAVAIL; IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n", - ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, IPCLITE_DEBUG_SIZE); + ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, DEBUG_PARTITION_SIZE); IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n", ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem); - return IPCLITE_SUCCESS; + return 0; } static int ipclite_probe(struct platform_device *pdev) @@ -1464,18 +1585,22 @@ static int ipclite_probe(struct platform_device *pdev) mem = &(ipclite->ipcmem.mem); memset(mem->virt_base, 0, mem->size); - ipcmem_init(&ipclite->ipcmem); + ret = ipcmem_init(&ipclite->ipcmem, pn); + if (ret) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up IPCMEM"); + goto release; + } /* Set up sysfs for debug */ ret = ipclite_debug_sysfs_setup(); - if (ret != IPCLITE_SUCCESS) { + if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n"); goto release; } /* Mapping Debug Memory */ ret = ipclite_debug_info_setup(); - if (ret != IPCLITE_SUCCESS) { + if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n"); goto release; } @@ -1495,15 +1620,12 @@ static int ipclite_probe(struct platform_device *pdev) mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0); if (global_atomic_support) { - ipclite->ipcmem.toc->ipclite_features.global_atomic_support = - GLOBAL_ATOMICS_ENABLED; - } else { - ipclite->ipcmem.toc->ipclite_features.global_atomic_support = - GLOBAL_ATOMICS_DISABLED; + ipclite->ipcmem.toc->hdr.feature_mask |= GLOBAL_ATOMIC_SUPPORT_BMSK; } + IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", global_atomic_support); - IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", - ipclite->ipcmem.toc->ipclite_features.global_atomic_support); + /* Should be called after all Global TOC related init is done */ + insert_magic_number(); /* hw mutex callbacks */ ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire; @@ -1512,9 +1634,6 @@ static int ipclite_probe(struct platform_device *pdev) /* store to ipclite structure */ ipclite->ipclite_hw_mutex = ipclite_hw_mutex; - /* initialize hwlock owner to invalid host */ - ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST; - /* Update the Global Debug variable for FW cores */ ipclite_dbg_info->debug_level = ipclite_debug_level; ipclite_dbg_info->debug_control = ipclite_debug_control; diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h index 3c1960ae90..543f8e0dc1 100644 --- a/msm/synx/ipclite.h +++ b/msm/synx/ipclite.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.. */ #include #include @@ -14,9 +14,10 @@ #define ACTIVE_CHANNEL 0x1 #define IPCMEM_TOC_SIZE (4*1024) +#define IPCMEM_TOC_VAR_OFFSET 0x100 #define MAX_CHANNEL_SIGNALS 6 -#define MAX_PARTITION_COUNT 11 /*11 partitions other than global partition*/ +#define GLOBAL_ATOMIC_SUPPORT_BMSK 0x1UL #define IPCLITE_MSG_SIGNAL 0 #define IPCLITE_MEM_INIT_SIGNAL 1 @@ -26,13 +27,13 @@ #define IPCLITE_DEBUG_SIGNAL 5 /** Flag definitions for the entries */ -#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01) -#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION (0x02) -#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION \ - (IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION | \ - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION) +#define IPCMEM_FLAGS_ENABLE_READ_PROTECTION (0x01) +#define IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION (0x02) +#define IPCMEM_FLAGS_ENABLE_RW_PROTECTION \ + (IPCMEM_FLAGS_ENABLE_READ_PROTECTION | \ + IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION) -#define IPCMEM_TOC_ENTRY_FLAGS_IGNORE_PARTITION (0x00000004) +#define IPCMEM_FLAGS_IGNORE_PARTITION (0x00000004) /*Hardcoded macro to identify local host on each core*/ #define LOCAL_HOST IPCMEM_APPS @@ -40,13 +41,6 @@ /* Timeout (ms) for the trylock of remote spinlocks */ #define HWSPINLOCK_TIMEOUT 1000 -#define CHANNEL_INACTIVE 0 -#define CHANNEL_ACTIVATE_IN_PROGRESS 1 -#define CHANNEL_ACTIVE 2 - -#define CONFIGURED_CORE 1 - -#define IPCLITE_DEBUG_SIZE (64 * 1024) #define IPCLITE_DEBUG_INFO_SIZE 256 #define IPCLITE_CORE_DBG_LABEL "APSS:" #define IPCLITE_LOG_MSG_SIZE 100 @@ -55,6 +49,8 @@ #define IPCLITE_SIGNAL_LABEL_SIZE 10 #define PREV_INDEX 2 +#define ADD_OFFSET(x, y) ((void *)((size_t)x + y)) + #define IPCLITE_OS_LOG(__level, __fmt, arg...) \ do { \ if (ipclite_debug_level & __level) { \ @@ -69,7 +65,18 @@ } \ } while (0) -/*IPCMEM Structure Definitions*/ +/** + * enum ipclite_channel_status - channel status + * + * INACTIVE : Channel uninitialized or init failed + * IN_PROGRESS : Channel init passed, awaiting confirmation from remote host + * ACTIVE : Channel init passed in local and remote host, thus active + */ +enum ipclite_channel_status { + INACTIVE = 0, + IN_PROGRESS = 1, + ACTIVE = 2, +}; enum ipclite_debug_level { IPCLITE_ERR = 0x0001, @@ -97,6 +104,11 @@ static const char ipclite_dbg_label[][IPCLITE_DBG_LABEL_SIZE] = { [IPCLITE_DBG] = "dbg" }; +/** + * IPCMEM Debug Structure Definitions + * - Present in Local Memory + */ + struct ipclite_debug_info_host { uint32_t numsig_sent; //no. of signals sent from the core uint32_t numsig_recv; //no. of signals received on the core @@ -137,60 +149,77 @@ struct ipclite_debug_struct { struct ipclite_debug_info_host dbg_info_host[IPCMEM_NUM_HOSTS]; }; -struct ipclite_features { - uint32_t global_atomic_support; - uint32_t version_finalised; +/** + * IPCMEM TOC Structure Definitions + * - Present in toc in shared memory + */ + +struct ipcmem_host_info { + uint32_t hwlock_owner; + uint32_t configured_host; }; -struct ipclite_recover { - uint32_t global_atomic_hwlock_owner; - uint32_t configured_core[IPCMEM_NUM_HOSTS]; -}; - -struct ipcmem_partition_header { - uint32_t type; /*partition type*/ - uint32_t desc_offset; /*descriptor offset*/ - uint32_t desc_size; /*descriptor size*/ - uint32_t fifo0_offset; /*fifo 0 offset*/ - uint32_t fifo0_size; /*fifo 0 size*/ - uint32_t fifo1_offset; /*fifo 1 offset*/ - uint32_t fifo1_size; /*fifo 1 size*/ -}; - -struct ipcmem_toc_entry { +struct ipcmem_partition_entry { uint32_t base_offset; /*partition offset from IPCMEM base*/ uint32_t size; /*partition size*/ uint32_t flags; /*partition flags if required*/ uint32_t host0; /*subsystem 0 who can access this partition*/ uint32_t host1; /*subsystem 1 who can access this partition*/ - uint32_t status; /*partition active status*/ + uint32_t reserved; /*legacy partition active status*/ }; +struct ipcmem_partition_info { + uint32_t num_entries; /* Number of channel partitions */ + uint32_t entry_size; /* Size of partition_entry structure */ +}; + +struct ipcmem_offsets { + uint32_t host_info; + uint32_t global_entry; + uint32_t partition_info; + uint32_t partition_entry; + uint32_t debug; + uint32_t reserved; /*Padded for 64-bit alignment*/ +}; + +/** + * Any change in TOC header size can only be accomodated with + * major version change, as it is not backward compatible. + */ struct ipcmem_toc_header { - uint32_t size; - uint32_t init_done; + uint32_t magic_number; /*Checksum of TOC*/ + uint32_t init_done; /*TOC initialization status*/ + uint32_t major_version; + uint32_t minor_version; + uint64_t feature_mask; + uint32_t reserved[6]; /*Padded for future use and 64-bit alignment*/ }; +/** + * struct ipcmem_toc - Table of contents in ipcmem + * + * @hdr : Header to check for toc integrity, version and features + * @offsets : List of offsetted structures and partition entries + * available in the toc data region (ipcmem_toc_data) + */ struct ipcmem_toc { struct ipcmem_toc_header hdr; - struct ipcmem_toc_entry toc_entry_global; - struct ipcmem_toc_entry toc_entry[IPCMEM_NUM_HOSTS][IPCMEM_NUM_HOSTS]; - /* Need to have a better implementation here */ - /* as ipcmem is 4k and if host number increases */ - /* it would create problems*/ - struct ipclite_features ipclite_features; - struct ipclite_recover recovery; + struct ipcmem_offsets offsets; + + /* --------------------------------------- + * ipcmem_toc_data @ 256-byte offset + * struct ipcmem_host_info host_info; + * struct ipcmem_partition_entry global_entry; + * struct ipcmem_partition_info partition_info; + * struct ipcmem_partition_entry partition_entry[num_entries]; + * --------------------------------------- + */ }; -struct ipcmem_region { - u64 aux_base; - void __iomem *virt_base; - uint32_t size; -}; - -struct ipcmem_partition { - struct ipcmem_partition_header hdr; -}; +/** + * IPCMEM Partition Structure Definitions + * - Present in partitions in shared memory + */ struct global_partition_header { uint32_t partition_type; @@ -202,13 +231,55 @@ struct ipcmem_global_partition { struct global_partition_header hdr; }; +struct ipcmem_partition_header { + uint32_t type; /*partition type*/ + uint32_t desc_offset; /*descriptor offset*/ + uint32_t desc_size; /*descriptor size*/ + uint32_t fifo0_offset; /*fifo 0 offset*/ + uint32_t fifo0_size; /*fifo 0 size*/ + uint32_t fifo1_offset; /*fifo 1 offset*/ + uint32_t fifo1_size; /*fifo 1 size*/ + uint32_t status; /*partition status*/ +}; + +struct ipcmem_partition { + struct ipcmem_partition_header hdr; +}; + +/** + * IPCMEM Helper Structure Definitions + * - Present in local memory + * - Can have pointers to toc and partitions in shared memory + */ + +/*Pointers to offsetted structures in TOC*/ +struct ipcmem_toc_data { + struct ipcmem_host_info *host_info; + struct ipcmem_partition_entry *global_entry; + struct ipcmem_partition_info *partition_info; + struct ipcmem_partition_entry *partition_entry; +}; + +struct ipcmem_region { + u64 aux_base; + void __iomem *virt_base; + uint32_t size; +}; + struct ipclite_mem { struct ipcmem_toc *toc; + struct ipcmem_toc_data toc_data; struct ipcmem_region mem; struct ipcmem_global_partition *global_partition; - struct ipcmem_partition *partition[MAX_PARTITION_COUNT]; + struct ipcmem_partition **partition; }; +/** + * IPCLite Structure Definitions + * - Present in local memory + * - Can have pointers to partitions in shared memory + */ + struct ipclite_fifo { uint32_t length; @@ -265,7 +336,8 @@ struct ipclite_channel { uint32_t channel_version; uint32_t version_finalised; - uint32_t channel_status; + uint32_t *gstatus_ptr; + uint32_t status; }; /*Single structure that defines everything about IPCLite*/ @@ -277,157 +349,36 @@ struct ipclite_info { struct ipclite_hw_mutex_ops *ipclite_hw_mutex; }; -const struct ipcmem_toc_entry ipcmem_toc_global_partition_entry = { - /* Global partition. */ - 4 * 1024, - 128 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_GLOBAL_HOST, - IPCMEM_GLOBAL_HOST, -}; +/*Default partition parameters*/ +#define DEFAULT_PARTITION_TYPE 0x0 +#define DEFAULT_PARTITION_STATUS INACTIVE +#define DEFAULT_PARTITION_HDR_SIZE 1024 -const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = { - /* Global partition. */ - /* { - * 4 * 1024, - * 128 * 1024, - * IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - * IPCMEM_GLOBAL_HOST, - * IPCMEM_GLOBAL_HOST, - * }, - */ - - /* APPS<->CDSP partition. */ - { - 132 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_APPS, - IPCMEM_CDSP, - CHANNEL_INACTIVE, - }, - /* APPS<->CVP (EVA) partition. */ - { - 164 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_APPS, - IPCMEM_CVP, - CHANNEL_INACTIVE, - }, - /* APPS<->CAM (ICP) partition. */ - { - 196 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_APPS, - IPCMEM_CAM, - CHANNEL_INACTIVE, - }, - /* APPS<->VPU (IRIS) partition. */ - { - 228 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_APPS, - IPCMEM_VPU, - CHANNEL_INACTIVE, - }, - /* CDSP<->CVP (EVA) partition. */ - { - 260 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CDSP, - IPCMEM_CVP, - CHANNEL_INACTIVE, - }, - /* CDSP<->CAM (ICP) partition. */ - { - 292 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CDSP, - IPCMEM_CAM, - CHANNEL_INACTIVE, - }, - /* CDSP<->VPU (IRIS) partition. */ - { - 324 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CDSP, - IPCMEM_VPU, - CHANNEL_INACTIVE, - }, - /* CVP<->CAM (ICP) partition. */ - { - 356 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CVP, - IPCMEM_CAM, - CHANNEL_INACTIVE, - }, - /* CVP<->VPU (IRIS) partition. */ - { - 388 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CVP, - IPCMEM_VPU, - CHANNEL_INACTIVE, - }, - /* CAM<->VPU (IRIS) partition. */ - { - 420 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_CAM, - IPCMEM_VPU, - CHANNEL_INACTIVE, - }, - /* APPS<->APPS partition. */ - { - 454 * 1024, - 32 * 1024, - IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - IPCMEM_APPS, - IPCMEM_APPS, - CHANNEL_INACTIVE, - } - /* Last entry uses invalid hosts and no protections to signify the end. */ - /* { - * 0, - * 0, - * IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION, - * IPCMEM_INVALID_HOST, - * IPCMEM_INVALID_HOST, - * } - */ -}; - -/*D:wefault partition parameters*/ -#define DEFAULT_PARTITION_TYPE 0x0 -#define DEFAULT_PARTITION_HDR_SIZE 1024 - -#define DEFAULT_DESCRIPTOR_OFFSET 1024 -#define DEFAULT_DESCRIPTOR_SIZE (3*1024) +#define DEFAULT_DESCRIPTOR_OFFSET 1024 +#define DEFAULT_DESCRIPTOR_SIZE (3*1024) #define DEFAULT_FIFO0_OFFSET (4*1024) #define DEFAULT_FIFO0_SIZE (8*1024) #define DEFAULT_FIFO1_OFFSET (12*1024) #define DEFAULT_FIFO1_SIZE (8*1024) +#define DEFAULT_PARTITION_SIZE (32*1024) +#define DEFAULT_PARTITION_FLAGS IPCMEM_FLAGS_ENABLE_RW_PROTECTION + /*Loopback partition parameters*/ -#define LOOPBACK_PARTITION_TYPE 0x1 +#define LOOPBACK_PARTITION_TYPE 0x1 /*Global partition parameters*/ -#define GLOBAL_PARTITION_TYPE 0xFF +#define GLOBAL_PARTITION_TYPE 0xFF #define GLOBAL_PARTITION_HDR_SIZE (4*1024) #define GLOBAL_REGION_OFFSET (4*1024) #define GLOBAL_REGION_SIZE (124*1024) +#define GLOBAL_PARTITION_SIZE (128*1024) +#define GLOBAL_PARTITION_FLAGS IPCMEM_FLAGS_ENABLE_RW_PROTECTION + +/*Debug partition parameters*/ +#define DEBUG_PARTITION_SIZE (64*1024) const struct ipcmem_partition_header default_partition_hdr = { DEFAULT_PARTITION_TYPE, @@ -437,6 +388,7 @@ const struct ipcmem_partition_header default_partition_hdr = { DEFAULT_FIFO0_SIZE, DEFAULT_FIFO1_OFFSET, DEFAULT_FIFO1_SIZE, + DEFAULT_PARTITION_STATUS, }; /* TX and RX FIFO point to same location for such loopback partition type @@ -450,6 +402,7 @@ const struct ipcmem_partition_header loopback_partition_hdr = { DEFAULT_FIFO0_SIZE, DEFAULT_FIFO0_OFFSET, DEFAULT_FIFO0_SIZE, + DEFAULT_PARTITION_STATUS, }; const struct global_partition_header global_partition_hdr = { diff --git a/msm/synx/ipclite_client.h b/msm/synx/ipclite_client.h index 419d9e2330..ff5948fc62 100644 --- a/msm/synx/ipclite_client.h +++ b/msm/synx/ipclite_client.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __IPCLITE_CLIENT_H__ #define __IPCLITE_CLIENT_H__ @@ -27,13 +27,6 @@ enum ipcmem_host_type { IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */ }; -/** - * IPCLite return codes - */ -#define IPCLITE_SUCCESS 0 /*< Success > */ -#define IPCLITE_FAILURE 1 /*< Failure > */ -#define IPCLITE_EINCHAN 9 /*< Inactive Channel */ - struct global_region_info { void *virt_base; uint32_t size; From d87e10c6947cd9bd77aba5e782a1af11146c2214 Mon Sep 17 00:00:00 2001 From: Urvesh Rathod Date: Tue, 18 Apr 2023 14:51:32 +0530 Subject: [PATCH 27/42] msm: synx: async_wait timeout changes Adding timeout parameter for async wait so the callback will be invoked on timer expiry if not signalled. Change-Id: Ia31f59021f00befed5317fdac262d823c659c6bf Signed-off-by: Ram Nagesh Signed-off-by: Urvesh Rathod --- msm/synx/synx.c | 70 ++++++++++++++++++++++++++++++++++++++--- msm/synx/synx_global.h | 1 + msm/synx/synx_private.h | 3 ++ msm/synx/synx_util.c | 12 +++++++ 4 files changed, 82 insertions(+), 4 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 2fde5d1c07..ab51ea9b6a 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -882,6 +882,52 @@ static int synx_match_payload(struct synx_kernel_payload *cb_payload, return rc; } +/* Timer Callback function. This will be called when timer expires */ +void synx_timer_cb(struct timer_list *data) +{ + struct synx_client *client; + struct synx_handle_coredata *synx_data; + struct synx_coredata *synx_obj; + struct synx_cb_data *synx_cb = container_of(data, struct synx_cb_data, synx_timer); + + client = synx_get_client(synx_cb->session); + if (IS_ERR_OR_NULL(client)) { + dprintk(SYNX_ERR, + "invalid session data 0x%x in cb payload\n", + synx_cb->session); + return; + } + synx_data = synx_util_acquire_handle(client, synx_cb->h_synx); + synx_obj = synx_util_obtain_object(synx_data); + if (IS_ERR_OR_NULL(synx_obj)) { + dprintk(SYNX_ERR, + "[sess :0x%llx] invalid handle access 0x%x\n", + synx_cb->session, synx_cb->h_synx); + return; + } + dprintk(SYNX_VERB, + "Timer expired for synx_cb 0x%x timeout 0x%llx. Deleting the timer.\n", + synx_cb, synx_cb->timeout); + + synx_cb->status = SYNX_STATE_TIMEOUT; + del_timer(&synx_cb->synx_timer); + list_del_init(&synx_cb->node); + queue_work(synx_dev->wq_cb, &synx_cb->cb_dispatch); +} + +static int synx_start_timer(struct synx_cb_data *synx_cb) +{ + int rc = 0; + + timer_setup(&synx_cb->synx_timer, synx_timer_cb, 0); + rc = mod_timer(&synx_cb->synx_timer, jiffies + msecs_to_jiffies(synx_cb->timeout)); + dprintk(SYNX_VERB, + "Timer started for synx_cb 0x%x timeout 0x%llx\n", + synx_cb, synx_cb->timeout); + return rc; +} + + int synx_async_wait(struct synx_session *session, struct synx_callback_params *params) { @@ -897,9 +943,6 @@ int synx_async_wait(struct synx_session *session, if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params)) return -SYNX_INVALID; - if (params->timeout_ms != SYNX_NO_TIMEOUT) - return -SYNX_NOSUPPORT; - client = synx_get_client(session); if (IS_ERR_OR_NULL(client)) return -SYNX_INVALID; @@ -952,6 +995,8 @@ int synx_async_wait(struct synx_session *session, synx_cb->session = session; synx_cb->idx = idx; + synx_cb->h_synx = params->h_synx; + INIT_WORK(&synx_cb->cb_dispatch, synx_util_cb_dispatch); /* add callback if object still ACTIVE, dispatch if SIGNALED */ @@ -959,6 +1004,17 @@ int synx_async_wait(struct synx_session *session, dprintk(SYNX_VERB, "[sess :%llu] callback added for handle %u\n", client->id, params->h_synx); + synx_cb->timeout = params->timeout_ms; + if (params->timeout_ms != SYNX_NO_TIMEOUT) { + rc = synx_start_timer(synx_cb); + if (rc != SYNX_SUCCESS) { + dprintk(SYNX_ERR, + "[sess :%llu] timer start failed - synx_cb: 0x%x, params->timeout_ms: 0x%llx, handle: 0x%x, ret : %d\n", + client->id, synx_cb, params->timeout_ms, + params->h_synx, rc); + goto release; + } + } list_add(&synx_cb->node, &synx_obj->reg_cbs_list); } else { synx_cb->status = status; @@ -1024,7 +1080,7 @@ int synx_cancel_async_wait( status = synx_util_get_object_status(synx_obj); if (status != SYNX_STATE_ACTIVE) { dprintk(SYNX_ERR, - "handle %u already signaled cannot cancel\n", + "handle %u already signaled or timed out, cannot cancel\n", params->h_synx); rc = -SYNX_INVALID; goto release; @@ -1052,6 +1108,12 @@ int synx_cancel_async_wait( cb_payload = &client->cb_table[synx_cb->idx]; ret = synx_match_payload(&cb_payload->kernel_cb, &payload); + if (synx_cb->timeout != SYNX_NO_TIMEOUT) { + dprintk(SYNX_VERB, + "Deleting timer synx_cb 0x%x, timeout 0x%llx\n", + synx_cb, synx_cb->timeout); + del_timer(&synx_cb->synx_timer); + } switch (ret) { case 1: /* queue the cancel cb work */ diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 074c2b6a79..9e22120204 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -53,6 +53,7 @@ enum synx_core_id { #define SYNX_STATE_SIGNALED_ERROR 3 #define SYNX_STATE_SIGNALED_EXTERNAL 5 #define SYNX_STATE_SIGNALED_SSR 6 +#define SYNX_STATE_TIMEOUT 7 /* dma fence states */ #define SYNX_DMA_FENCE_STATE_MAX 4096 diff --git a/msm/synx/synx_private.h b/msm/synx/synx_private.h index f9ef273aa7..25328fa39b 100644 --- a/msm/synx/synx_private.h +++ b/msm/synx/synx_private.h @@ -101,7 +101,10 @@ struct synx_kernel_payload { struct synx_cb_data { struct synx_session *session; u32 idx; + u32 h_synx; u32 status; + struct timer_list synx_timer; + u64 timeout; struct work_struct cb_dispatch; struct list_head node; }; diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 94dc12c6cc..86d3e593da 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -301,6 +301,12 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) "dipatching un-released callbacks of session %pK\n", synx_cb->session); synx_cb->status = SYNX_STATE_SIGNALED_CANCEL; + if (synx_cb->timeout != SYNX_NO_TIMEOUT) { + dprintk(SYNX_VERB, + "Deleting timer synx_cb 0x%x, timeout 0x%llx\n", + synx_cb, synx_cb->timeout); + del_timer(&synx_cb->synx_timer); + } list_del_init(&synx_cb->node); queue_work(synx_dev->wq_cb, &synx_cb->cb_dispatch); @@ -1175,6 +1181,12 @@ void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status) list_for_each_entry_safe(synx_cb, synx_cb_temp, &synx_obj->reg_cbs_list, node) { synx_cb->status = status; + if (synx_cb->timeout != SYNX_NO_TIMEOUT) { + dprintk(SYNX_VERB, + "Deleting timer synx_cb 0x%x, timeout 0x%llx\n", + synx_cb, synx_cb->timeout); + del_timer(&synx_cb->synx_timer); + } list_del_init(&synx_cb->node); queue_work(synx_dev->wq_cb, &synx_cb->cb_dispatch); From d0224b1e5f02b23272642e7e9b38d9a03ce5e381 Mon Sep 17 00:00:00 2001 From: Amir Suhail Date: Wed, 10 May 2023 12:23:43 +0530 Subject: [PATCH 28/42] msm: synx: Updating Header File Updating comments for structures and functions to reflect the latest changes supported by framework. Change-Id: Ie990df2790f5c19f1addd569f2f4a8c6a74d468e Signed-off-by: Amir Suhail --- msm/synx/synx_api.h | 194 ++++++++++++++++++++++++----------------- msm/synx/synx_global.h | 8 -- 2 files changed, 115 insertions(+), 87 deletions(-) diff --git a/msm/synx/synx_api.h b/msm/synx/synx_api.h index 09a36d0003..b94482bcc5 100644 --- a/msm/synx/synx_api.h +++ b/msm/synx/synx_api.h @@ -20,17 +20,27 @@ */ #define SYNX_INVALID_HANDLE 0 +/* synx object states */ +#define SYNX_STATE_INVALID 0 // Invalid synx object +#define SYNX_STATE_ACTIVE 1 // Synx object has not been signaled +#define SYNX_STATE_SIGNALED_ERROR 3 // Synx object signaled with error +#define SYNX_STATE_SIGNALED_EXTERNAL 5 // Synx object was signaled by external dma client. +#define SYNX_STATE_SIGNALED_SSR 6 // Synx object signaled with SSR +#define SYNX_STATE_TIMEOUT 7 // Callback status for synx object in case of timeout + /** - * enum synx_create_flags - Flags passed during synx_create call + * enum synx_create_flags - Flags passed during synx_create call. * - * SYNX_CREATE_LOCAL_FENCE : Instructs the framework to create local synx object + * SYNX_CREATE_LOCAL_FENCE : Instructs the framework to create local synx object, + * for local synchronization i.e. within same core. * SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object + * for global synchronization i.e. across supported core. * SYNX_CREATE_DMA_FENCE : Create a synx object by wrapping the provided dma fence. * Need to pass the dma_fence ptr through fence variable - * if this flag is set. + * if this flag is set. (NOT SUPPORTED) * SYNX_CREATE_CSL_FENCE : Create a synx object with provided csl fence. * Establishes interop with the csl fence through - * bind operations. + * bind operations. (NOT SUPPORTED) */ enum synx_create_flags { SYNX_CREATE_LOCAL_FENCE = 0x01, @@ -42,24 +52,41 @@ enum synx_create_flags { /** * enum synx_init_flags - Session initialization flag + * SYNX_INIT_DEFAULT : Initialization flag to be passed + * when initializing session + * SYNX_INIT_MAX : Used for internal checks */ enum synx_init_flags { - SYNX_INIT_MAX = 0x01, + SYNX_INIT_DEFAULT = 0x00, + SYNX_INIT_MAX = 0x01, }; /** * enum synx_import_flags - Import flags * - * SYNX_IMPORT_LOCAL_FENCE : Instructs the framework to create local synx object - * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object - * SYNX_IMPORT_SYNX_FENCE : Import native Synx handle for synchronization + * SYNX_IMPORT_LOCAL_FENCE : Instructs the framework to create local synx object, + * for local synchronization i.e. within same core. + * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object, + * for global synchronization i.e. across supported core. + * SYNX_IMPORT_SYNX_FENCE : Import native Synx handle for synchronization. * Need to pass the Synx handle ptr through fence variable - * if this flag is set. - * SYNX_IMPORT_DMA_FENCE : Import dma fence.and crate Synx handle for interop + * if this flag is set. Client must pass: + * a. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_LOCAL_FENCE + * to import a synx handle as local synx handle. + * b. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_GLOBAL_FENCE + * to import a synx handle as global synx handle. + * SYNX_IMPORT_DMA_FENCE : Import dma fence and create Synx handle for interop. * Need to pass the dma_fence ptr through fence variable - * if this flag is set. + * if this flag is set. Client must pass: + * a. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_LOCAL_FENCE + * to import a dma fence and create local synx handle + * for interop. + * b. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_GLOBAL_FENCE + * to import a dma fence and create global synx handle + * for interop. * SYNX_IMPORT_EX_RELEASE : Flag to inform relaxed invocation where release call * need not be called by client on this handle after import. + * (NOT SUPPORTED) */ enum synx_import_flags { SYNX_IMPORT_LOCAL_FENCE = 0x01, @@ -95,7 +122,7 @@ typedef void (*synx_callback)(s32 sync_obj, int status, void *data); * synx_user_callback - Callback function registered by clients * * User callback registered for non-blocking wait. Dispatched when - * synx object is signaled or timeout has expired. + * synx object is signaled or timed-out with status of synx object. */ typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data); @@ -119,9 +146,10 @@ struct bind_operations { }; /** - * synx_bind_client_type : External fence supported for bind + * synx_bind_client_type : External fence supported for bind (NOT SUPPORTED) * * SYNX_TYPE_CSL : Camera CSL fence + * SYNX_MAX_BIND_TYPES : Used for internal checks */ enum synx_bind_client_type { SYNX_TYPE_CSL = 0, @@ -129,7 +157,7 @@ enum synx_bind_client_type { }; /** - * struct synx_register_params - External registration parameters + * struct synx_register_params - External registration parameters (NOT SUPPORTED) * * @ops : Bind operations struct * @name : External client name @@ -144,8 +172,10 @@ struct synx_register_params { /** * struct synx_queue_desc - Memory descriptor of the queue allocated by - * the fence driver for each client during - * register. + * the fence driver for each client during + * register. (Clients need not pass any pointer + * in synx_initialize_params. It is for future + * use). * * @vaddr : CPU virtual address of the queue. * @dev_addr : Physical address of the memory object. @@ -196,8 +226,10 @@ enum synx_client_id { /** * struct synx_session - Client session identifier * - * @type : Session type + * @type : Session type. + * Internal Member. (Do not access/modify) * @client : Pointer to client session + * Internal Member. (Do not access/modify) */ struct synx_session { u32 type; @@ -209,7 +241,8 @@ struct synx_session { * * @name : Client session name * Only first 64 bytes are accepted, rest will be ignored - * @ptr : Pointer to queue descriptor (filled by function) + * @ptr : Memory descriptor of queue allocated by fence during + * device register. (filled by function) * @id : Client identifier * @flags : Synx initialization flags */ @@ -228,16 +261,8 @@ struct synx_initialization_params { * Only first 64 bytes are accepted, * rest will be ignored * @h_synx : Pointer to synx object handle (filled by function) - * @fence : Pointer to external fence - * @flags : Synx flags for customization (mentioned below) - * - * SYNX_CREATE_GLOBAL_FENCE - Hints the framework to create global synx object - * If flag not set, hints framework to create a local synx object. - * SYNX_CREATE_DMA_FENCE - Wrap synx object with dma fence. - * Need to pass the dma_fence ptr through 'fence' variable if this flag is set. - * SYNX_CREATE_BIND_FENCE - Create a synx object with provided external fence. - * Establishes interop with supported external fence through bind operations. - * Need to fill synx_external_desc structure if this flag is set. + * @fence : Pointer to external dma fence or csl fence. (NOT SUPPORTED) + * @flags : Synx flags for customization */ struct synx_create_params { @@ -250,10 +275,19 @@ struct synx_create_params { /** * enum synx_merge_flags - Handle merge flags * - * SYNX_MERGE_LOCAL_FENCE : Create local composite object. - * SYNX_MERGE_GLOBAL_FENCE : Create global composite object. - * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects - * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object + * SYNX_MERGE_LOCAL_FENCE : Create local composite synx object. To be passed along + * with SYNX_MERGE_NOTIFY_ON_ALL. + * SYNX_MERGE_GLOBAL_FENCE : Create global composite synx object. To be passed along + * with SYNX_MERGE_NOTIFY_ON_ALL. + * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects. + * Clients must pass: + * a. SYNX_MERGE_LOCAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL + * to create local composite synx object and notify + * it when all child synx objects are signaled. + * b. SYNX_MERGE_GLOBAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL + * to create global composite synx object and notify + * it when all child synx objects are signaled. + * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object. (NOT SUPPORTED) */ enum synx_merge_flags { SYNX_MERGE_LOCAL_FENCE = 0x01, @@ -267,8 +301,8 @@ enum synx_merge_flags { * * @h_synxs : Pointer to a array of synx handles to be merged * @flags : Merge flags - * @num_objs : Number of synx objs in the block - * @h_merged_obj : Merged synx object handle (filled by function) + * @num_objs : Number of synx handles to be merged (in array h_synxs). + * @h_merged_obj : Merged synx handle (filled by function) */ struct synx_merge_params { u32 *h_synxs; @@ -296,8 +330,8 @@ enum synx_import_type { * The new handle/s should be used by importing * process for all synx api operations and * for sharing with FW cores. - * @flags : Synx flags - * @fence : Pointer to external fence + * @flags : Synx import flags + * @fence : Pointer to DMA fence fd or synx handle. */ struct synx_import_indv_params { u32 *new_h_synx; @@ -308,8 +342,8 @@ struct synx_import_indv_params { /** * struct synx_import_arr_params - Synx import arr parameters * - * @list : Array of synx_import_indv_params pointers - * @num_fences : No of fences passed to framework + * @list : List of synx_import_indv_params + * @num_fences : Number of fences or synx handles to be imported */ struct synx_import_arr_params { struct synx_import_indv_params *list; @@ -320,8 +354,8 @@ struct synx_import_arr_params { * struct synx_import_params - Synx import parameters * * @type : Import params type filled by client - * @indv : Params to import an individual handle/fence - * @arr : Params to import an array of handles/fences + * @indv : Params to import an individual handle or fence + * @arr : Params to import an array of handles or fences */ struct synx_import_params { enum synx_import_type type; @@ -335,9 +369,9 @@ struct synx_import_params { * struct synx_callback_params - Synx callback parameters * * @h_synx : Synx object handle - * @cb_func : Pointer to callback func to be invoked - * @userdata : Opaque pointer passed back with callback - * @cancel_cb_func : Pointer to callback to ack cancellation (optional) + * @cb_func : Pointer to callback func to be invoked. + * @userdata : Opaque pointer passed back with callback as data + * @cancel_cb_func : Pointer to callback to ack cancellation * @timeout_ms : Timeout in ms. SYNX_NO_TIMEOUT if no timeout. */ struct synx_callback_params { @@ -350,7 +384,7 @@ struct synx_callback_params { /* Kernel APIs */ -/* synx_register_ops - Register operations for external synchronization +/* synx_register_ops - Register operations for external synchronization (NOT SUPPORTED) * * Register with synx for enabling external synchronization through bind * @@ -365,7 +399,7 @@ struct synx_callback_params { int synx_register_ops(const struct synx_register_params *params); /** - * synx_deregister_ops - De-register external synchronization operations + * synx_deregister_ops - De-register external synchronization operations (NOT SUPPORTED) * * @param params : Pointer to register params * @@ -388,47 +422,48 @@ struct synx_session *synx_initialize(struct synx_initialization_params *params); * * @param session : Session ptr (returned from synx_initialize) * - * @return Status of operation. SYNX_SUCCESS in case of success. + * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise. */ int synx_uninitialize(struct synx_session *session); /** * synx_create - Creates a synx object * - * Creates a new synx obj and returns the handle to client. + * Creates a new synx obj and returns the handle to client. There can be + * maximum of 4095 global synx handles or local synx handles across + * sessions. * * @param session : Session ptr (returned from synx_initialize) * @param params : Pointer to create params * - * @return Status of operation. SYNX_SUCCESS in case of success. - * -SYNX_INVALID will be returned if params were invalid. - * -SYNX_NOMEM will be returned if the kernel can't allocate space for - * synx object. + * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise. */ int synx_create(struct synx_session *session, struct synx_create_params *params); /** * synx_async_wait - Registers a callback with a synx object * - * @param session : Session ptr (returned from synx_initialize) - * @param params : Callback params + * Clients can register maximum of 64 callbacks functions per + * synx session. Clients should register callback functions with minimal computation. * - * @return Status of operation. SYNX_SUCCESS in case of success. - * -SYNX_INVALID will be returned if userdata is invalid. - * -SYNX_NOMEM will be returned if cb_func is invalid. + * @param session : Session ptr (returned from synx_initialize) + * @param params : Callback params. + * cancel_cb_func in callback params is optional with this API. + * + * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise. */ int synx_async_wait(struct synx_session *session, struct synx_callback_params *params); /** * synx_cancel_async_wait - De-registers a callback with a synx object * + * This API will cancel one instance of callback function (mapped + * with userdata and h_synx) provided in cb_func of callback params. + * * @param session : Session ptr (returned from synx_initialize) * @param params : Callback params * - * @return Status of operation. SYNX_SUCCESS in case of success. - * -SYNX_ALREADY if object has already been signaled, and cannot be cancelled. - * -SYNX_INVALID will be returned if userdata is invalid. - * -SYNX_NOMEM will be returned if cb_func is invalid. + * @return Status of operation.Negative in case of error, SYNX_SUCCESS otherwise. */ int synx_cancel_async_wait(struct synx_session *session, struct synx_callback_params *params); @@ -470,59 +505,57 @@ int synx_merge(struct synx_session *session, struct synx_merge_params *params); * Does a wait on the synx object identified by h_synx for a maximum * of timeout_ms milliseconds. Must not be called from interrupt context as * this API can sleep. - * Will return status if handle was signaled. Status can be from pre-defined - * states (enum synx_signal_status) or custom status sent by producer. * * @param session : Session ptr (returned from synx_initialize) * @param h_synx : Synx object handle to be waited upon * @param timeout_ms : Timeout in ms * - * @return Signal status. -SYNX_INVAL if synx object is in bad state or arguments - * are invalid, -SYNX_TIMEOUT if wait times out. + * @return Status of synx object if handle is signaled. -SYNX_INVAL if synx object + * is in bad state or arguments are invalid, -SYNX_TIMEOUT if wait times out. */ int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms); /** - * synx_get_status - Returns the status of the synx object + * synx_get_status - Returns the status of the synx object. + * + * This API should not be used in polling mode to know if the handle + * is signaled or not. + * Clients need to explicitly wait using synx_wait() or synx_async_wait() * * @param session : Session ptr (returned from synx_initialize) * @param h_synx : Synx object handle * - * @return Status of the synx object. + * @return Status of the synx object */ int synx_get_status(struct synx_session *session, u32 h_synx); /** - * synx_import - Imports (looks up) synx object from given handle/fence - * - * Import subscribes the client session for notification on signal - * of handles/fences. - * + * synx_import - Imports (looks up) synx object from given handle or fence + * * * @param session : Session ptr (returned from synx_initialize) * @param params : Pointer to import params * - * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state + * @return Status of operation. Negative in case of failure, SYNX_SUCCESS otherwise. */ int synx_import(struct synx_session *session, struct synx_import_params *params); /** * synx_get_fence - Get the native fence backing the synx object * - * Function returns the native fence. Clients need to - * acquire & release additional reference explicitly. + * Synx framework will take additional reference on dma fence and returns the native + * fence. Clients need to release additional reference explicitly by calling kref_put. * * @param session : Session ptr (returned from synx_initialize) * @param h_synx : Synx object handle * - * @return Fence pointer upon success, NULL or error in case of failure. + * @return Fence pointer in case of success and NULL in case of failure. */ void *synx_get_fence(struct synx_session *session, u32 h_synx); /** - * synx_release - Release the synx object + * synx_release - Release the synx object. * - * Decrements refcount of a synx object by 1, and destroys it - * if becomes 0. + * Every created, imported or merged synx object should be released. * * @param session : Session ptr (returned from synx_initialize) * @param h_synx : Synx object handle to be destroyed @@ -536,7 +569,7 @@ int synx_release(struct synx_session *session, u32 h_synx); * * Function should be called on HW hang/reset to * recover the Synx handles shared. This cleans up - * Synx handles held by the rest HW, and avoids + * synx handles owned by subsystem under hang/reset, and avoids * potential resource leaks. * * Function does not destroy the session, but only @@ -545,6 +578,9 @@ int synx_release(struct synx_session *session, u32 h_synx); * need to destroy the session explicitly through * synx_uninitialize API. * + * All the unsignaled handles owned/imported by the core at the time of reset + * will be signaled by synx framework on behalf of hung core with SYNX_STATE_SIGNALED_SSR. + * * @param id : Client ID of core to recover * * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 9e22120204..4f1b7edc2a 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -47,14 +47,6 @@ enum synx_core_id { #define SYNX_HWSPIN_TIMEOUT 500 #define SYNX_HWSPIN_ID 10 -/* internal signal states */ -#define SYNX_STATE_INVALID 0 -#define SYNX_STATE_ACTIVE 1 -#define SYNX_STATE_SIGNALED_ERROR 3 -#define SYNX_STATE_SIGNALED_EXTERNAL 5 -#define SYNX_STATE_SIGNALED_SSR 6 -#define SYNX_STATE_TIMEOUT 7 - /* dma fence states */ #define SYNX_DMA_FENCE_STATE_MAX 4096 From df0f5418d0a767c6225df1c5e23a74d8d89cf1a5 Mon Sep 17 00:00:00 2001 From: Kuldeep Singh Date: Wed, 28 Jun 2023 15:26:28 +0530 Subject: [PATCH 29/42] msm: synx: Added check while signaling merged handle Added a condition while dispatching callbacks so that in case of merged handles callback is dispatched only when all handles are signaled. Change-Id: Ida6cf2c8bab6f7d66d625f2b14e49418dd794d44 Signed-off-by: Kuldeep Singh --- msm/synx/synx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index ab51ea9b6a..3d425f3b75 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -694,7 +694,8 @@ void synx_signal_handler(struct work_struct *cb_dispatch) goto fail; } - if (rc == SYNX_SUCCESS) + if (rc == SYNX_SUCCESS && synx_util_get_object_status(synx_obj) + != SYNX_STATE_ACTIVE) rc = synx_native_signal_core(synx_obj, status, (signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ? true : false, signal_cb->ext_sync_id); From bfffddb83e58b5752e16bcb5d1cb704f59983bad Mon Sep 17 00:00:00 2001 From: Chelliah Vinu R Date: Thu, 11 May 2023 23:45:41 +0530 Subject: [PATCH 30/42] msm: synx: ipclite: Export HW Mutex APIs Cleaned up the HW Mutex functions to export APIs (acquire and release) for cross-core testing. Change-Id: I2ddda8b5fc67c80a8a372a2c4124b887f8130e14 Signed-off-by: Chelliah Vinu R --- msm/synx/ipclite.c | 136 +++++++++++++------------------------- msm/synx/ipclite.h | 19 ++---- msm/synx/ipclite_client.h | 26 ++++++-- 3 files changed, 72 insertions(+), 109 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index ce0a6b734d..741d36f81a 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -17,7 +17,6 @@ #include #include -#include #include @@ -32,15 +31,13 @@ static struct ipclite_info *ipclite; static struct ipclite_client synx_client; static struct ipclite_client test_client; -static struct ipclite_hw_mutex_ops *ipclite_hw_mutex; static struct ipclite_debug_info *ipclite_dbg_info; static struct ipclite_debug_struct *ipclite_dbg_struct; static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem; static struct mutex ssr_mutex; static struct kobject *sysfs_kobj; -static uint32_t enabled_hosts; -static uint32_t partitions; +static uint32_t enabled_hosts, partitions; static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO; static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump; @@ -55,7 +52,7 @@ static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1) return (h0 == h1 && h0 != IPCMEM_APPS); } -static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...) +static void ipclite_inmem_log(const char *psztStr, ...) { uint32_t local_index = 0; va_list pArgs; @@ -183,38 +180,41 @@ static void ipclite_dump_inmem_logs(void) return; } -static void ipclite_hw_mutex_acquire(void) +int ipclite_hw_mutex_acquire(void) { - int32_t ret; + int ret; - if (ipclite != NULL) { - if (!global_atomic_support) { - ret = hwspin_lock_timeout_irqsave(ipclite->hwlock, - HWSPINLOCK_TIMEOUT, - &ipclite->ipclite_hw_mutex->flags); - if (ret) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed\n"); - return; - } - - ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS; - - IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n"); - } + if (unlikely(!ipclite)) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized"); + return -ENOMEM; } + ret = hwspin_lock_timeout_irqsave(ipclite->hwlock, + HWSPINLOCK_TIMEOUT, &ipclite->hw_mutex_flags); + if (ret) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed"); + return ret; + } + ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS; + IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired"); + return ret; } +EXPORT_SYMBOL(ipclite_hw_mutex_acquire); -static void ipclite_hw_mutex_release(void) +int ipclite_hw_mutex_release(void) { - if (ipclite != NULL) { - if (!global_atomic_support) { - ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; - hwspin_unlock_irqrestore(ipclite->hwlock, - &ipclite->ipclite_hw_mutex->flags); - IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n"); - } + if (unlikely(!ipclite)) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized"); + return -ENOMEM; } + if (ipclite->ipcmem.toc_data.host_info->hwlock_owner != IPCMEM_APPS) + return -EINVAL; + + ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; + hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->hw_mutex_flags); + IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock released"); + return 0; } +EXPORT_SYMBOL(ipclite_hw_mutex_release); void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data) { @@ -230,25 +230,17 @@ EXPORT_SYMBOL(ipclite_atomic_init_i32); void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data) { - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; atomic_set(addr, data); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; } EXPORT_SYMBOL(ipclite_global_atomic_store_u32); void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data) { - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; atomic_set(addr, data); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; } EXPORT_SYMBOL(ipclite_global_atomic_store_i32); @@ -256,13 +248,9 @@ uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr) { uint32_t ret; - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; ret = atomic_read(addr); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; return ret; } @@ -272,13 +260,9 @@ int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr) { int32_t ret; - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; ret = atomic_read(addr); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; return ret; } @@ -289,13 +273,9 @@ uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *a uint32_t ret; uint32_t mask = (1 << nr); - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; ret = atomic_fetch_or(mask, addr); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; return ret; } @@ -306,13 +286,9 @@ uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t uint32_t ret; uint32_t mask = (1 << nr); - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; ret = atomic_fetch_and(~mask, addr); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; return ret; } @@ -322,13 +298,9 @@ int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr) { int32_t ret = 0; - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; ret = atomic_fetch_add(1, addr); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; return ret; } @@ -338,13 +310,9 @@ int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr) { int32_t ret = 0; - /* callback to acquire hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->acquire(); - + ATOMIC_HW_MUTEX_ACQUIRE; ret = atomic_fetch_sub(1, addr); - - /* callback to release hw mutex lock if atomic support is not enabled */ - ipclite->ipclite_hw_mutex->release(); + ATOMIC_HW_MUTEX_RELEASE; return ret; } @@ -1158,7 +1126,6 @@ int32_t get_global_partition_info(struct global_region_info *global_ipcmem) { struct ipcmem_global_partition *global_partition; - /* Check added to verify ipclite is initialized */ if (!ipclite) { IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n"); return -ENOMEM; @@ -1250,12 +1217,6 @@ static int ipclite_channel_init(struct device *parent, } IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid); - ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL); - if (!ipclite_hw_mutex) { - ret = -ENOMEM; - goto err_put_dev; - } - ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic); if (ret) { dev_err(dev, "failed to parse global_atomic\n"); @@ -1627,13 +1588,6 @@ static int ipclite_probe(struct platform_device *pdev) /* Should be called after all Global TOC related init is done */ insert_magic_number(); - /* hw mutex callbacks */ - ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire; - ipclite_hw_mutex->release = ipclite_hw_mutex_release; - - /* store to ipclite structure */ - ipclite->ipclite_hw_mutex = ipclite_hw_mutex; - /* Update the Global Debug variable for FW cores */ ipclite_dbg_info->debug_level = ipclite_debug_level; ipclite_dbg_info->debug_control = ipclite_debug_control; diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h index 543f8e0dc1..f89d35b41b 100644 --- a/msm/synx/ipclite.h +++ b/msm/synx/ipclite.h @@ -54,17 +54,18 @@ #define IPCLITE_OS_LOG(__level, __fmt, arg...) \ do { \ if (ipclite_debug_level & __level) { \ - if (ipclite_debug_control & IPCLITE_DMESG_LOG) { \ + if (ipclite_debug_control & IPCLITE_DMESG_LOG) \ pr_info(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \ ipclite_dbg_label[__level], ## arg); \ - } \ - if (ipclite_debug_control & IPCLITE_INMEM_LOG) { \ - IPCLITE_OS_INMEM_LOG(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \ + if (ipclite_debug_control & IPCLITE_INMEM_LOG) \ + ipclite_inmem_log(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \ ipclite_dbg_label[__level], ## arg); \ - } \ } \ } while (0) +#define ATOMIC_HW_MUTEX_ACQUIRE (global_atomic_support ?: ipclite_hw_mutex_acquire()) +#define ATOMIC_HW_MUTEX_RELEASE (global_atomic_support ?: ipclite_hw_mutex_release()) + /** * enum ipclite_channel_status - channel status * @@ -302,12 +303,6 @@ struct ipclite_fifo { void (*reset)(struct ipclite_fifo *fifo); }; -struct ipclite_hw_mutex_ops { - unsigned long flags; - void (*acquire)(void); - void (*release)(void); -}; - struct ipclite_irq_info { struct mbox_client mbox_client; struct mbox_chan *mbox_chan; @@ -346,7 +341,7 @@ struct ipclite_info { struct ipclite_channel channel[IPCMEM_NUM_HOSTS]; struct ipclite_mem ipcmem; struct hwspinlock *hwlock; - struct ipclite_hw_mutex_ops *ipclite_hw_mutex; + unsigned long hw_mutex_flags; }; /*Default partition parameters*/ diff --git a/msm/synx/ipclite_client.h b/msm/synx/ipclite_client.h index ff5948fc62..366ef74322 100644 --- a/msm/synx/ipclite_client.h +++ b/msm/synx/ipclite_client.h @@ -32,7 +32,7 @@ struct global_region_info { uint32_t size; }; -typedef int32_t (*IPCLite_Client)(uint32_t proc_id, int64_t data, void *priv); +typedef int (*IPCLite_Client)(uint32_t proc_id, int64_t data, void *priv); /** * ipclite_msg_send() - Sends message to remote client. @@ -42,7 +42,7 @@ typedef int32_t (*IPCLite_Client)(uint32_t proc_id, int64_t data, void *priv); * * @return Zero on successful registration, negative on failure. */ -int32_t ipclite_msg_send(int32_t proc_id, uint64_t data); +int ipclite_msg_send(int32_t proc_id, uint64_t data); /** * ipclite_register_client() - Registers client callback with framework. @@ -52,7 +52,7 @@ int32_t ipclite_msg_send(int32_t proc_id, uint64_t data); * * @return Zero on successful registration, negative on failure. */ -int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv); +int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv); /** * ipclite_test_msg_send() - Sends message to remote client. @@ -62,7 +62,7 @@ int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv); * * @return Zero on successful registration, negative on failure. */ -int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data); +int ipclite_test_msg_send(int32_t proc_id, uint64_t data); /** * ipclite_register_test_client() - Registers client callback with framework. @@ -72,7 +72,7 @@ int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data); * * @return Zero on successful registration, negative on failure. */ -int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv); +int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv); /** * get_global_partition_info() - Gets info about IPCMEM's global partitions. @@ -81,7 +81,7 @@ int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv); * * @return Zero on successful registration, negative on failure. */ -int32_t get_global_partition_info(struct global_region_info *global_ipcmem); +int get_global_partition_info(struct global_region_info *global_ipcmem); /** * ipclite_recover() - Recovers the ipclite if any core goes for SSR @@ -92,6 +92,20 @@ int32_t get_global_partition_info(struct global_region_info *global_ipcmem); */ void ipclite_recover(enum ipcmem_host_type core_id); +/** + * ipclite_hw_mutex_acquire() - Locks the hw mutex reserved for ipclite. + * + * @return Zero on successful acquire, negative on failure. + */ +int ipclite_hw_mutex_acquire(void); + +/** + * ipclite_hw_mutex_release() - Unlocks the hw mutex reserved for ipclite. + * + * @return Zero on successful release, negative on failure. + */ +int ipclite_hw_mutex_release(void); + /** * ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value. * From b0fd26d5a8a98e878a4652bce06203be7dbd19a9 Mon Sep 17 00:00:00 2001 From: Nagendra Jamadagni Date: Wed, 31 May 2023 01:15:19 +0530 Subject: [PATCH 31/42] msm: synx: ipclite: Testing for IPCLite 1. Enables user to test all features of IPCLite. 2. Sends testing pings and receives replies to verify proper functionality. 3. Allows end user to configure test with various testing parameters. Change-Id: Id22ee40990cfe750301d1c03d6215f49a03bae47 Signed-off-by: Nagendra Jamadagni --- Android.mk | 12 +- msm/Kbuild | 1 + msm/synx/test/ipclite_test.c | 1455 ++++++++++++++++++++++++++++++++++ msm/synx/test/ipclite_test.h | 118 +++ pineapple.bzl | 1 + synx_kernel_board.mk | 1 + synx_modules.bzl | 7 + 7 files changed, 1594 insertions(+), 1 deletion(-) create mode 100644 msm/synx/test/ipclite_test.c create mode 100644 msm/synx/test/ipclite_test.h diff --git a/Android.mk b/Android.mk index 4f86e48296..222403af9d 100644 --- a/Android.mk +++ b/Android.mk @@ -22,7 +22,7 @@ DLKM_DIR := $(TOP)/device/qcom/common/dlkm LOCAL_PATH := $(call my-dir) LOCAL_MODULE_DDK_BUILD := true -LOCAL_MODULE_KO_DIRS := msm/synx/synx-driver.ko msm/synx/ipclite.ko +LOCAL_MODULE_KO_DIRS := msm/synx/synx-driver.ko msm/synx/ipclite.ko msm/synx/test/ipclite_test.ko include $(CLEAR_VARS) # For incremental compilation @@ -53,6 +53,16 @@ LOCAL_MODULE := ipclite.ko LOCAL_MODULE_KBUILD_NAME := msm/synx/ipclite.ko LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) #BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE) +include $(DLKM_DIR)/Build_external_kernelmodule.mk + +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES)) +LOCAL_MODULE := ipclite_test.ko +LOCAL_MODULE_KBUILD_NAME := msm/synx/test/ipclite_test.ko +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +#BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE) # print out variables $(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) diff --git a/msm/Kbuild b/msm/Kbuild index 3e03ecf95f..7b41034fd7 100644 --- a/msm/Kbuild +++ b/msm/Kbuild @@ -34,4 +34,5 @@ endif obj-m += synx-driver.o obj-m += synx/ipclite.o +obj-m += synx/test/ipclite_test.o synx-driver-objs := synx/synx.o synx/synx_global.o synx/synx_util.o synx/synx_debugfs.o diff --git a/msm/synx/test/ipclite_test.c b/msm/synx/test/ipclite_test.c new file mode 100644 index 0000000000..b9e6a5821c --- /dev/null +++ b/msm/synx/test/ipclite_test.c @@ -0,0 +1,1455 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include +#include +#include +#include +#include "ipclite_test.h" + +struct kobject *sysfs_dir; + +static int threads_started, threads_completed, cores_completed; +static bool ssr_complete; +/* data_lock spinlock is used to increment ping counters in thread safe manner. + * core_wq to ensure all the cores have completed the test before next step. + * ssr_wq to wait during ssr operation. + * reply_wq to wait on replies to ping sent. + * thread_wq to wait on all threads local to APPS to complete + * test_done is a completion barrier which ensures test case is completed + * crash_done is a completion barrier which ensures ssr crash is completed + */ +DEFINE_SPINLOCK(data_lock); +DECLARE_WAIT_QUEUE_HEAD(core_wq); +DECLARE_WAIT_QUEUE_HEAD(ssr_wq); +DECLARE_WAIT_QUEUE_HEAD(reply_wq); +DECLARE_WAIT_QUEUE_HEAD(thread_wq); +DECLARE_COMPLETION(test_done); +DECLARE_COMPLETION(crash_done); + +static struct ipclite_thread_data wakeup_check, bg_pings; +static struct ipclite_thread_data thread_data; + +struct handle_t *handle_ptr; +static int handle_data[512]; +static struct ipclite_test_data *data; + +static void init_test_params(void) +{ + data->test_params.wait = 1; + data->test_params.num_pings = 1000; + data->test_params.num_itr = 1; + data->test_params.selected_senders = 1; + data->test_params.selected_receivers = 1; + data->test_params.enabled_cores = IPCLITE_TEST_ALL_CORES; + data->test_params.selected_test_case = 0; + data->test_params.num_thread = 1; + data->test_params.num_senders = 1; + data->test_params.num_receivers = 1; +} +/* Function to pack the different fields into one 64 bit message value + * 1 byte header of constant patter 01010101 + * 1 byte to store the parameter type + * 1 byte to store the test case id + * 3 bytes to store the value of parameter in payload + * 1 byte to store test start/stop information + * 1 byte to store test pass/fail information + */ +static uint64_t get_param_macro(uint64_t parameter_info, uint64_t test_info, + uint64_t payload_info, uint64_t start_stop_info, + uint64_t pass_fail_info) +{ + uint64_t param_macro = 0; + + parameter_info &= GENMASK_ULL(7, 0); + test_info &= GENMASK_ULL(7, 0); + payload_info &= GENMASK_ULL(23, 0); + start_stop_info &= GENMASK_ULL(7, 0); + pass_fail_info &= GENMASK_ULL(7, 0); + + param_macro = ((uint64_t)IPCLITE_TEST_HEADER) << 56; + param_macro |= parameter_info << 48; + param_macro |= test_info << 40; + param_macro |= payload_info << 16; + param_macro |= start_stop_info << 8; + param_macro |= pass_fail_info; + + return param_macro; +} + +static inline bool is_enabled_core(int core_id) +{ + return (data->test_params.enabled_cores & BIT(core_id)) ? true : false; +} + +static inline bool is_selected_receiver(int core_id) +{ + return (data->test_params.selected_receivers & BIT(core_id)) ? true : false; +} + +static inline bool is_selected_sender(int core_id) +{ + return (data->test_params.selected_senders & BIT(core_id)) ? true : false; +} + +static void ping_receive(struct ipclite_test_data *data) +{ + pr_debug("Successfully received a ping\n"); + data->pings_received[data->client_id]++; + wake_up_interruptible(&reply_wq); +} + +static int check_pings(struct ipclite_test_data *data) +{ + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (!is_selected_receiver(i)) + continue; + if (data->pings_sent[i] != data->pings_received[i]) + return -IPCLITE_TEST_FAIL; + } + return 0; +} + +static void ping_all_enabled_cores(u64 msg) +{ + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (i == IPCMEM_APPS || !is_enabled_core(i)) + continue; + ipclite_test_msg_send(i, msg); + } +} + +static void ping_sel_senders(uint64_t msg) +{ + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (i == IPCMEM_APPS || !(data->test_params.selected_senders & BIT(i))) + continue; + ipclite_test_msg_send(i, msg); + } +} + +static int thread_init(struct ipclite_thread_data *th_data, void *data_ptr, void *fptr) +{ + th_data->data = data_ptr; + th_data->run = false; + init_waitqueue_head(&th_data->wq); + th_data->thread = kthread_run(fptr, th_data, "test thread"); + if (IS_ERR(th_data->thread)) { + pr_err("Thread creation failed\n"); + return -EINVAL; + } + return 0; +} + +static int ping_selected_receivers(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + int ret = 0; + uint64_t macro_to_ping = get_param_macro(TEST_CASE, + data->test_params.selected_test_case, + PING_SEND, 0, 0); + bool fail = false; + + while (!kthread_should_stop()) { + + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + + for (int i = 0; i < data->test_params.num_pings/data->test_params.num_thread; ++i) { + for (int j = 0; j < IPCMEM_NUM_HOSTS; ++j) { + if (!is_selected_receiver(j)) + continue; + ret = ipclite_test_msg_send(j, macro_to_ping); + if (ret == 0) { + spin_lock(&data_lock); + data->pings_sent[j]++; + spin_unlock(&data_lock); + } else + fail = true; + /* If wait is enabled and number of pings to wait on is sent, + * Wait for replies or timeout + */ + if (data->test_params.wait != 0 && + (i+1) % data->test_params.wait == 0) { + ret = wait_event_interruptible_timeout(reply_wq, + check_pings(data) == 0, + msecs_to_jiffies(1000)); + if (ret < 1) + pr_err("Timeout occurred\n"); + } + } + } + pr_debug("Completed iteration. Marking thread as completed\n"); + spin_lock(&data_lock); + threads_completed++; + wake_up_interruptible(&thread_wq); + spin_unlock(&data_lock); + } + + return fail ? -IPCLITE_TEST_FAIL : 0; +} + +static int negative_tests(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + int ret = 0, fail = 0; + uint64_t param; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + pr_info("Test 1: Sending messages to disabled cores\n"); + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (!is_selected_receiver(i)) + continue; + param = get_param_macro(TEST_CASE, NEGATIVE, + PING_SEND, 0, 0); + ret = ipclite_test_msg_send(i, param); + if (ret == 0) { + pr_err("TEST FAILED\n"); + fail++; + } + } + if (!fail) + pr_info("TEST PASSED\n"); + + pr_info("Test 2: Passing NULL to get_global_parition_info\n"); + ret = get_global_partition_info(NULL); + if (ret == 0) { + pr_err("TEST FAILED\n"); + fail++; + } else + pr_info("TEST PASSED\n"); + + if (fail != 0) + pr_err("Negative TEST FAILED\n"); + else + pr_info("Negative TEST PASSED\n"); + + param = get_param_macro(TEST_CASE, NEGATIVE, 0, + IPCLITE_TEST_STOP, 0); + ipclite_test_msg_send(IPCMEM_APPS, param); + wait_event_interruptible_timeout(core_wq, + cores_completed == data->test_params.num_senders, + msecs_to_jiffies(1000)); + complete(&test_done); + } + return fail == 0 ? 0 : -IPCLITE_TEST_FAIL; +} + +static int hw_unlock_test(void *hw_mutex_byte) +{ + int ret = 0; + uint64_t param; + + if (!hw_mutex_byte) { + pr_err("Byte for hardware mutex testing is not initialized.\n"); + return -EFAULT; + } + + pr_info("Testing HW Mutex Lock Acquire Functionality\n"); + *((int *)(hw_mutex_byte)) = -1; + pr_debug("The initial value of the byte is %d\n", *((int *)(hw_mutex_byte))); + pr_debug("Locking the mutex from APPS Side\n"); + + ret = ipclite_hw_mutex_acquire(); + if (ret != 0) { + pr_err("Could not acquire hw mutex from APPS side\n"); + return ret; + } + + pr_debug("Setting the value of the byte to %d\n", IPCMEM_APPS); + *((int *)(hw_mutex_byte)) = IPCMEM_APPS; + pr_debug("The new value of the byte is %d\n", *((int *)(hw_mutex_byte))); + + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (i == IPCMEM_APPS || !is_selected_receiver(i)) + continue; + pr_debug("Pinging %s to try and release the locked mutex\n", + core_name[i]); + param = get_param_macro(TEST_CASE, HW_MUTEX, + HW_MUTEX_RELEASE, + IPCLITE_TEST_START, 0); + ipclite_test_msg_send(i, param); + // Wait for timeout here + udelay(1000); + } + + if (*((int *)(hw_mutex_byte)) != IPCMEM_APPS) + return -IPCLITE_TEST_FAIL; + + ret = ipclite_hw_mutex_release(); + if (ret != 0) + pr_err("Could not release mutex lock successfully\n"); + return ret; +} + +static int hw_mutex_test(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + int ret = 0; + void *addr = data->global_memory->virt_base; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + + ret = hw_unlock_test(addr); + + if (ret == 0) + pr_info("HW Unlock Test Passed.\n"); + else + pr_info("HW Unlock Test Failed.\n"); + + complete(&test_done); + } + return ret; +} +/* Ping cores which are not selected for ssr in the background */ +static int send_bg_pings(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + int ret; + uint64_t param; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + + while (!ssr_complete && !kthread_should_stop()) { + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (i == data->ssr_client || !is_selected_receiver(i)) + continue; + param = get_param_macro(TEST_CASE, + SSR, + PING_SEND, 0, 0); + ret = ipclite_test_msg_send(i, param); + if (ret != 0) + pr_err("Unable to ping core %d\n", i); + } + wait_event_interruptible_timeout(ssr_wq, + ssr_complete, + msecs_to_jiffies(1000)); + } + pr_debug("SSR recovery of core %d completed. Exiting thread\n", + data->ssr_client); + } + return 0; +} +/* Wait for 30s and then send pings one to by one to see if core wakeup + * is completed + */ +static int ssr_wakeup_check(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + int count = 0, ret = 0; + uint64_t param; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + + ssr_complete = false; + msleep_interruptible(30000); + while (count < 10) { + pr_debug("Sent ping number %d to check if wakeup is completed\n", + count); + param = get_param_macro(TEST_CASE, SSR, + SSR_WAKEUP, + IPCLITE_TEST_START, 0); + ret = ipclite_test_msg_send(data->ssr_client, param); + ++count; + wait_event_interruptible_timeout(ssr_wq, + ssr_complete, + msecs_to_jiffies(1000)); + } + if (count == 10 && !ssr_complete) { + pr_info("FW Core wakeup failed.\n"); + return -IPCLITE_TEST_FAIL; + } + pr_info("FW Core wakeup completed successfully.\n"); + pr_info("Going for non crashing testing.\n"); + param = get_param_macro(TEST_CASE, PING, 0, + IPCLITE_TEST_START, 0); + ipclite_test_msg_send(data->ssr_client, param); + complete(&crash_done); + } + return 0; +} + +static int ssr_test(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + uint64_t param = 0; + int ret = 0; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + + ssr_complete = false; + ret = thread_init(&wakeup_check, data, ssr_wakeup_check); + + if (ret != 0) { + pr_err("Thread creation failed\n"); + return -EINVAL; + } + + ret = thread_init(&bg_pings, data, send_bg_pings); + if (ret != 0) { + pr_err("Thread creation failed\n"); + kthread_stop(wakeup_check.thread); + return -EINVAL; + } + pr_info("Starting on SSR test for core %d\n", data->ssr_client); + memset(data->pings_sent, 0, sizeof(data->pings_sent)); + memset(data->pings_received, 0, sizeof(data->pings_received)); + param = get_param_macro(TEST_CASE, SSR, + SSR_CRASHING, IPCLITE_TEST_START, 0); + ipclite_test_msg_send(data->ssr_client, param); + wait_for_completion(&crash_done); + kthread_stop(wakeup_check.thread); + kthread_stop(bg_pings.thread); + complete(&test_done); + } + return 0; +} + +static int inc_byte(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + ipclite_atomic_uint32_t *addr = t_data->data; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + for (int i = 0; i < data->test_params.num_itr; ++i) + ipclite_global_atomic_inc(addr); + threads_completed++; + wake_up_interruptible(&thread_wq); + } + return 0; +} + +static int dec_byte(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + ipclite_atomic_uint32_t *addr = t_data->data; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + for (int i = 0; i < data->test_params.num_itr; ++i) + ipclite_global_atomic_dec(addr); + threads_completed++; + wake_up_interruptible(&thread_wq); + } + return 0; +} + +static int global_atomics_test(void *byte, int test_number) +{ + int ret = 0; + int total_increment = 0; + uint64_t param; + bool fail = false; + struct ipclite_thread_data ga_t1, ga_t2; + + + if (!byte) { + pr_err("Byte not initialized. Test Failed\n"); + return -EFAULT; + } + pr_debug("The initial value of the byte is %x\n", *((int *)byte)); + + threads_completed = 0; + threads_started = 0; + + switch (test_number) { + case GLOBAL_ATOMICS_INC: + ret = thread_init(&ga_t1, byte, inc_byte); + if (ret != 0) { + pr_err("Thread creation failed\n"); + return -EINVAL; + } + ret = thread_init(&ga_t2, byte, inc_byte); + if (ret != 0) { + pr_err("Thread creation failed\n"); + kthread_stop(ga_t1.thread); + return -EINVAL; + } + break; + case GLOBAL_ATOMICS_DEC: + ret = thread_init(&ga_t1, byte, dec_byte); + if (ret != 0) { + pr_err("Thread creation failed\n"); + return -EINVAL; + } + ret = thread_init(&ga_t2, byte, dec_byte); + if (ret != 0) { + pr_err("Thread creation failed\n"); + kthread_stop(ga_t1.thread); + return -EINVAL; + } + break; + case GLOBAL_ATOMICS_INC_DEC: + ret = thread_init(&ga_t1, byte, inc_byte); + if (ret != 0) { + pr_err("Thread creation failed\n"); + return -EINVAL; + } + ret = thread_init(&ga_t2, byte, dec_byte); + if (ret != 0) { + pr_err("Thread creation failed\n"); + kthread_stop(ga_t1.thread); + return -EINVAL; + } + break; + default: + pr_err("Wrong input provided\n"); + return -EINVAL; + } + param = get_param_macro(TEST_CASE, + GLOBAL_ATOMIC, + test_number, + IPCLITE_TEST_START, 0); + + for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) { + if (i == IPCMEM_APPS || !is_selected_receiver(i)) + continue; + ret = ipclite_test_msg_send(i, param); + if (ret == 0) + threads_started += 2; + } + if (is_selected_receiver(IPCMEM_APPS)) { + ga_t1.run = true; + wake_up_interruptible(&ga_t1.wq); + ga_t2.run = true; + wake_up_interruptible(&ga_t2.wq); + threads_started += 2; + } + /* Wait for all threads to complete or timeout */ + ret = wait_event_interruptible_timeout(thread_wq, + threads_started == 2 * data->test_params.num_receivers && + threads_completed == 2 * data->test_params.num_receivers, + msecs_to_jiffies(1000)); + if (ret < 1) + pr_err("Threads could not complete successfully\n"); + + pr_debug("The value of the byte is %x\n", *((int *)byte)); + /* Stopping threads if they have not already completed before evaluation */ + kthread_stop(ga_t1.thread); + kthread_stop(ga_t2.thread); + + total_increment = 2 * data->test_params.num_receivers * data->test_params.num_itr; + + switch (test_number) { + case GLOBAL_ATOMICS_INC: + if (*((int *)byte) == total_increment) + pr_info("Increment Successful.\n"); + else { + pr_err("Increment Failed.\n"); + fail = true; + } + break; + case GLOBAL_ATOMICS_DEC: + if (*((int *)byte) == 0) + pr_info("Decrement Successful\n"); + else { + pr_err("Decrement Failed\n"); + fail = true; + } + break; + case GLOBAL_ATOMICS_INC_DEC: + if (*((int *)byte) == 0) + pr_info("Increment and Decrement Successful\n"); + else { + pr_err("Increment and Decrement Failed\n"); + fail = true; + } + break; + default: + pr_err("Wrong input provided\n"); + return -EINVAL; + } + + return fail ? -IPCLITE_TEST_FAIL : 0; +} + +static inline uint32_t bitops_count_trailing_one(uint32_t x) +{ + uint32_t mask = 0; + + for (int i = 0; i < BITS(ipclite_atomic_uint32_t); i++) { + mask = 1 << i; + if (!(x & mask)) + return i; + } + return BITS(ipclite_atomic_uint32_t); +} + +/** + * @brief Finds the first zero in the bitmap + * + * @param bmap_addr pointer to bitmap + * @param size the size of the bitmap indicated in number of bits + * @return uint32_t index of the first zero + */ +static uint32_t bitops_util_find_first_zero(uint32_t *bmap_addr, uint32_t size) +{ + uint32_t res = 0; + + for (int i = 0; i * BITS(ipclite_atomic_uint32_t) < size; i++) { + if (bmap_addr[i] != ~(uint32_t)0) { + res = i * BITS(ipclite_atomic_uint32_t) + + bitops_count_trailing_one(bmap_addr[i]); + return res < size ? res : size; + } + } + return size; +} + +static int alloc_index(int *bitmap_base) +{ + uint32_t prev = 0, index = 0; + + do { + index = bitops_util_find_first_zero((unsigned int *) bitmap_base, + NUM_HANDLES); + if (index > NUM_HANDLES) { + pr_err("No Memory Error. Exiting\n"); + break; + } + prev = ipclite_global_test_and_set_bit(index % 32, + (ipclite_atomic_uint32_t *)(bitmap_base + index/32)); + if ((prev & (1UL << (index % 32))) == 0) + break; + } while (true); + return index; +} + +void clear_index(int *bitmap_base, uint32_t index) +{ + uint32_t addr_idx = index/32, ii = index % 32; + + if (bitmap_base == NULL) { + pr_err("Invalid pointer passed\n"); + return; + } + ipclite_global_test_and_clear_bit(ii, (ipclite_atomic_uint32_t *)(bitmap_base + addr_idx)); +} + +static int global_atomics_test_set_clear(struct ipclite_test_data *data) +{ + int index = 0, ret = 0; + bool fail = false; + uint64_t param; + + handle_ptr = data->global_memory->virt_base; + pr_info("Starting global atomics Test 4. Starting allocation of index\n"); + pr_debug("The total number of handles is %d\n", NUM_HANDLES); + pr_debug("Global Base : %p\n", handle_ptr); + for (int itr = 0; itr < data->test_params.num_itr; itr++) { + threads_started = 0; + threads_completed = 0; + for (int j = 0; j < IPCMEM_NUM_HOSTS; ++j) { + if (j == IPCMEM_APPS || !is_selected_receiver(j)) + continue; + param = get_param_macro(TEST_CASE, + GLOBAL_ATOMIC, + GLOBAL_ATOMICS_SET_CLR, + IPCLITE_TEST_START, 0); + ret = ipclite_test_msg_send(j, param); + if (ret == 0) + threads_started++; + } + if (is_selected_receiver(IPCMEM_APPS)) { + threads_started++; + for (int i = 0; i < 512; ++i) { + index = alloc_index((int *)handle_ptr); + handle_data[i] = index; + handle_ptr->handle_data[index] = IPCMEM_APPS; + } + + for (int i = 0; i < 512; ++i) { + index = handle_data[i]; + if (handle_ptr->handle_data[index] != IPCMEM_APPS) { + pr_err("Handle data has been overwritten.\n"); + pr_err("This is a bug : Core : %d Index : %d\n", + handle_ptr->handle_data[index], index); + fail = true; + } + } + + for (int i = 0; i < 512; ++i) { + index = handle_data[i]; + clear_index((int *)handle_ptr, index); + } + threads_completed++; + if (fail) + break; + } + wait_event_interruptible_timeout(thread_wq, + threads_started == data->test_params.num_receivers && + threads_completed == data->test_params.num_receivers, + msecs_to_jiffies(1000)); + } + if (!fail) + pr_info("Global Atomics Set and Clear test passed successfully\n"); + return fail ? -IPCLITE_TEST_FAIL : 0; +} + +static int global_atomics_test_wrapper(void *data_ptr) +{ + int result = 0, ret = 0; + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + void *addr = data->global_memory->virt_base; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + *((int *)addr) = 0; + result = global_atomics_test(addr, GLOBAL_ATOMICS_INC); + result &= global_atomics_test(addr, GLOBAL_ATOMICS_DEC); + result &= global_atomics_test(addr, GLOBAL_ATOMICS_INC_DEC); + result &= global_atomics_test_set_clear(data); + if (result != 0) { + pr_err("Global Atomics TEST FAILED\n"); + ret = -IPCLITE_TEST_FAIL; + } else { + pr_info("Global Atomics TEST PASSED\n"); + ret = 0; + } + complete(&test_done); + } + return ret; +} + +static int ping_test(void *data_ptr) +{ + int ret = 0; + uint64_t param_macro; + struct ipclite_test_data *data = data_ptr; + struct ipclite_thread_data th_arr[IPCLITE_TEST_MAX_THREADS]; + int count; + + memset(data->pings_sent, 0, sizeof(data->pings_sent)); + memset(data->pings_received, 0, sizeof(data->pings_received)); + threads_completed = 0; + param_macro = 0; + for (count = 0; count < data->test_params.num_thread; ++count) { + ret = thread_init(&th_arr[count], data, ping_selected_receivers); + if (ret != 0) + break; + } + if (count != data->test_params.num_thread) + while (count > 0) { + kthread_stop(th_arr[count-1].thread); + --count; + } + if (ret != 0) { + pr_err("Threads could not be initialized. Ping Test Failed\n"); + return ret; + } + for (threads_started = 0; threads_started < data->test_params.num_thread; + ++threads_started) { + th_arr[threads_started].run = true; + wake_up_interruptible(&th_arr[threads_started].wq); + } + ret = wait_event_interruptible_timeout(thread_wq, + threads_started == data->test_params.num_thread && + threads_completed == data->test_params.num_thread, + msecs_to_jiffies(1000) * data->test_params.num_thread); + if (ret < 1) { + pr_err("Threads not completed successfully. Only completed %d threads\n", + threads_completed); + return ret; + + } + pr_info("All threads completed successfully.\n"); + pr_debug("Going for checking\n"); + /*Wait for the queue to get processed before checking if all replies are received*/ + if (!data->test_params.wait) + msleep_interruptible(1000); + ret = check_pings(data); + + if (ret == 0) + pr_debug("All replies received successfully.\n"); + else + pr_debug("All replies not received successfully.\n"); + + while (count > 0) { + kthread_stop(th_arr[count-1].thread); + --count; + } + param_macro = get_param_macro(TEST_CASE, PING, 0, + IPCLITE_TEST_STOP, 0); + ipclite_test_msg_send(IPCMEM_APPS, param_macro); + return ret; +} + +static int wrapper_ping_test(void *data_ptr) +{ + int ret = 0; + uint64_t param_macro; + struct ipclite_thread_data *t_data = data_ptr; + struct ipclite_test_data *data = t_data->data; + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + + for (int i = 0; i < data->test_params.num_itr; ++i) { + cores_completed = 0; + param_macro = get_param_macro(TEST_CASE, + PING, + 0, IPCLITE_TEST_START, 0); + /* Ping all senders to start sending messages. + * If APPS is one of the senders start sending + */ + ping_sel_senders(param_macro); + if (is_selected_sender(IPCMEM_APPS)) + ping_test(data); + wait_event_interruptible_timeout(core_wq, + cores_completed == data->test_params.num_senders, + msecs_to_jiffies(1000)); + ret = check_pings(data); + if (ret != 0) + pr_info("Iteration %d of ping test failed\n", i+1); + else + pr_info("Iteration %d of ping test passed\n", i+1); + } + if (is_selected_sender(IPCMEM_APPS)) + complete(&test_done); + } + return 0; +} + +static int debug_tests(void *data_ptr) +{ + struct ipclite_thread_data *t_data = data_ptr; + uint64_t param; + int disabled_core = ffz(data->test_params.enabled_cores); + + while (!kthread_should_stop()) { + wait_event_interruptible(t_data->wq, t_data->run); + if (kthread_should_stop()) + break; + t_data->run = false; + param = get_param_macro(TEST_CASE, DEBUG, + PING_SEND, 0, 0); + if (disabled_core == IPCMEM_NUM_HOSTS) + pr_err("All cores are enabled. No Disabled cores\n"); + /* Pinging one enabled and disabled cores to get the error and dbg prints */ + if (disabled_core < IPCMEM_NUM_HOSTS) + ipclite_test_msg_send(disabled_core, param); + + param = get_param_macro(TEST_CASE, PING, 0, + IPCLITE_TEST_STOP, 0); + ipclite_test_msg_send(IPCMEM_APPS, param); + wait_event_interruptible_timeout(core_wq, + cores_completed == data->test_params.num_senders, + msecs_to_jiffies(1000)); + complete(&test_done); + } + return 0; +} + +static void ipclite_test_set_enabled_cores(void) +{ + if (data->test_params.enabled_cores < 0 || + data->test_params.enabled_cores > IPCLITE_TEST_ALL_CORES) { + pr_err("Invalid parameter value given to enabled cores\n"); + data->test_params.enabled_cores = IPCLITE_TEST_ALL_CORES; + return; + } + pr_info("Enabled cores set to %d\n", data->test_params.enabled_cores); +} + +static void ipclite_test_set_wait(void) +{ + uint64_t param; + + if (data->test_params.wait < 0) { + pr_err("Invalid parameter value given to wait\n"); + data->test_params.wait = 1; + return; + } + + pr_info("wait set to %d\n", data->test_params.wait); + + param = get_param_macro(WAIT, 0, data->test_params.wait, 0, 0); + ping_all_enabled_cores(param); +} + +static void ipclite_test_set_num_pings(void) +{ + uint64_t param; + + pr_info("num_pings set to %d\n", data->test_params.num_pings); + + param = get_param_macro(NUM_PINGS, 0, + data->test_params.num_pings, 0, 0); + ping_all_enabled_cores(param); +} + +static void ipclite_test_set_num_itr(void) +{ + uint64_t param; + + pr_info("num_itr set to %d\n", data->test_params.num_itr); + + param = get_param_macro(NUM_ITR, 1, + data->test_params.num_itr, 0, 0); + ping_all_enabled_cores(param); +} + +static void ipclite_test_set_receivers(void) +{ + uint64_t param; + + if (data->test_params.selected_receivers < 0 || + data->test_params.selected_receivers > IPCLITE_TEST_ALL_CORES) { + pr_err("Invalid parameter value given to selected_receivers\n"); + data->test_params.selected_receivers = 1; + data->test_params.num_receivers = 1; + return; + } + /* Check number of 1s using hamming weight function. + * Number of 1s is number of receivers + */ + data->test_params.num_receivers = hweight_long(data->test_params.selected_receivers); + + pr_info("selected_receivers set to %d\n", data->test_params.selected_receivers); + + param = get_param_macro(RECEIVER_LIST, 0, + data->test_params.selected_receivers, 0, 0); + ping_all_enabled_cores(param); +} + +static void ipclite_test_set_senders(void) +{ + if (data->test_params.selected_senders < 0 || + data->test_params.selected_senders > IPCLITE_TEST_ALL_CORES) { + pr_err("Invalid parameter value given to selected_senders\n"); + data->test_params.selected_senders = 1; + data->test_params.num_senders = 1; + return; + } + + /* Check number of 1s using hamming weight function. */ + data->test_params.num_senders = hweight_long(data->test_params.selected_senders); + + pr_info("selected_senders set to %d\n", data->test_params.selected_senders); +} + +static void ipclite_test_set_num_threads(void) +{ + uint64_t param; + + if (data->test_params.num_thread < 0 || + data->test_params.num_thread > IPCLITE_TEST_MAX_THREADS) { + pr_err("Invalid parameter value given to num_thread\n"); + data->test_params.num_thread = 1; + return; + } + + pr_info("num_thread set to %d\n", data->test_params.num_thread); + + param = get_param_macro(NUM_THREADS, 0, + data->test_params.num_thread, 0, 0); + ping_all_enabled_cores(param); +} + +static void ipclite_test_set_test(void) +{ + uint64_t param; + int ret = 0; + + if (data->test_params.selected_test_case < 0 || data->test_params.selected_test_case > 8) { + pr_err("Invalid parameter value given to test_case\n"); + data->test_params.selected_test_case = 0; + return; + } + + pr_info("selected_test_case set to %d\n", data->test_params.selected_test_case); + param = get_param_macro(TEST_CASE, + data->test_params.selected_test_case, 0, + IPCLITE_TEST_START, 0); + + switch (data->test_params.selected_test_case) { + case PING: + ret = thread_init(&thread_data, data, wrapper_ping_test); + if (ret != 0) { + pr_err("Could not create thread for testing\n"); + return; + } + thread_data.run = true; + wake_up_interruptible(&thread_data.wq); + break; + case NEGATIVE: + ping_sel_senders(param); + if (is_selected_sender(IPCMEM_APPS)) { + pr_info("Starting test %d for core %s\n", + NEGATIVE, core_name[IPCMEM_APPS]); + ret = thread_init(&thread_data, data, negative_tests); + if (ret != 0) { + pr_err("Could not create thread for testing\n"); + return; + } + thread_data.run = true; + wake_up_interruptible(&thread_data.wq); + } + break; + case GLOBAL_ATOMIC: + ret = thread_init(&thread_data, data, global_atomics_test_wrapper); + if (ret != 0) { + pr_err("Could not create thread for testing\n"); + return; + } + thread_data.run = true; + wake_up_interruptible(&thread_data.wq); + break; + case DEBUG: + ping_sel_senders(param); + if (is_selected_sender(IPCMEM_APPS)) { + ret = thread_init(&thread_data, data, debug_tests); + if (ret != 0) { + pr_err("Could not create thread for testing\n"); + return; + } + thread_data.run = true; + wake_up_interruptible(&thread_data.wq); + } + break; + case SSR: + if (data->test_params.num_senders != 1) { + pr_err("SSR Testing requires only 1 core to be selected\n"); + return; + } + /* Find first set (ffs) to get the bit position/index of sender */ + data->ssr_client = ffs(data->test_params.selected_senders) - 1; + if (data->ssr_client == 0 || !is_enabled_core(data->ssr_client)) { + pr_err("Invalid core selected for SSR Testing\n"); + return; + } + pr_info("Starting test %d for core %s\n", + SSR, core_name[data->ssr_client]); + ret = thread_init(&thread_data, data, ssr_test); + if (ret != 0) { + pr_err("Could not create thread for testing\n"); + return; + } + thread_data.run = true; + wake_up_interruptible(&thread_data.wq); + break; + case HW_MUTEX: + if (data->test_params.num_senders != 1) { + pr_err("HW Mutex Testing requires only 1 core to be selected\n"); + return; + } + + if (is_selected_sender(IPCMEM_APPS)) { + pr_info("Starting test %d for core %s\n", + HW_MUTEX, core_name[IPCMEM_APPS]); + ret = thread_init(&thread_data, data, hw_mutex_test); + if (ret != 0) { + pr_err("Could not create thread for testing\n"); + return; + } + thread_data.run = true; + wake_up_interruptible(&thread_data.wq); + } else + ping_sel_senders(param); + break; + default: + pr_err("Wrong input provided\n"); + return; + } + wait_for_completion(&test_done); + if (thread_data.thread != NULL) + ret = kthread_stop(thread_data.thread); + if (ret != 0) + pr_err("Test did not complete successfully\n"); + else + pr_info("Test completed successfully\n"); +} + +static int parse_param(char **temp_buf, int *addr) +{ + char *token; + int ret; + + token = strsep(temp_buf, " "); + if (!token) { + pr_err("Token value is NULL in parse param\n"); + return -EINVAL; + } + ret = kstrtoint(token, 0, addr); + if (ret < 0) { + pr_err("Parameter value not read correctly\n"); + return ret; + } + return 0; +} + +static ssize_t ipclite_test_params_write(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + char *temp_buf = kmalloc(strlen(buf)+1, GFP_KERNEL); + char *temp_ptr = temp_buf; + int ret, param = 0; + + if (!temp_buf) { + pr_err("Memory not allocated\n"); + return -EINVAL; + } + + ret = strscpy(temp_buf, buf, strlen(buf)+1); + + if (ret < 0) { + pr_err("User input is too large\n"); + goto exit; + } + + ret = parse_param(&temp_buf, ¶m); + if (ret != 0) + goto exit; + + if (param == ENABLED_CORES) { + ret = parse_param(&temp_buf, &data->test_params.enabled_cores); + if (ret == 0) + ipclite_test_set_enabled_cores(); + goto exit; + } else + data->test_params.selected_test_case = param; + + switch (data->test_params.selected_test_case) { + case PING: + ret = parse_param(&temp_buf, &data->test_params.selected_senders); + if (ret != 0) + break; + ipclite_test_set_senders(); + ret = parse_param(&temp_buf, &data->test_params.selected_receivers); + if (ret != 0) + break; + ipclite_test_set_receivers(); + ret = parse_param(&temp_buf, &data->test_params.num_pings); + if (ret != 0) + break; + ipclite_test_set_num_pings(); + ret = parse_param(&temp_buf, &data->test_params.wait); + if (ret != 0) + break; + ipclite_test_set_wait(); + ret = parse_param(&temp_buf, &data->test_params.num_itr); + if (ret != 0) + break; + ipclite_test_set_num_itr(); + ret = parse_param(&temp_buf, &data->test_params.num_thread); + if (ret != 0) + break; + ipclite_test_set_num_threads(); + break; + case NEGATIVE: + ret = parse_param(&temp_buf, &data->test_params.selected_senders); + if (ret != 0) + break; + ipclite_test_set_senders(); + ret = parse_param(&temp_buf, &data->test_params.selected_receivers); + if (ret != 0) + break; + ipclite_test_set_receivers(); + break; + case GLOBAL_ATOMIC: + ret = parse_param(&temp_buf, &data->test_params.selected_receivers); + if (ret != 0) + break; + ipclite_test_set_receivers(); + ret = parse_param(&temp_buf, &data->test_params.num_itr); + if (ret != 0) + break; + ipclite_test_set_num_itr(); + break; + case DEBUG: + ret = parse_param(&temp_buf, &data->test_params.selected_senders); + if (ret != 0) + break; + ipclite_test_set_senders(); + break; + case SSR: + ret = parse_param(&temp_buf, &data->test_params.selected_senders); + if (ret != 0) + break; + ipclite_test_set_senders(); + ret = parse_param(&temp_buf, &data->test_params.selected_receivers); + if (ret != 0) + break; + ipclite_test_set_receivers(); + ret = parse_param(&temp_buf, &data->test_params.num_pings); + if (ret != 0) + break; + ipclite_test_set_num_pings(); + break; + case HW_MUTEX: + ret = parse_param(&temp_buf, &data->test_params.selected_senders); + if (ret != 0) + break; + ipclite_test_set_senders(); + ret = parse_param(&temp_buf, &data->test_params.selected_receivers); + if (ret != 0) + break; + ipclite_test_set_receivers(); + break; + default: + pr_err("Wrong input provided\n"); + goto exit; + } + if (ret == 0) + ipclite_test_set_test(); +exit: + kfree(temp_ptr); + return count; +} + + + +static int ipclite_test_callback_fn(unsigned int client_id, long long msg, + void *data_ptr) +{ + struct ipclite_test_data *data = data_ptr; + uint64_t header, parameter_info, test_info, payload_info, + start_stop_info, pass_fail_info; + uint64_t reply_macro; + int ret = 0; + + /* Unpack the different bit fields from message value */ + header = (msg & GENMASK(63, 56))>>56; + parameter_info = (msg & GENMASK(55, 48))>>48; + test_info = (msg & GENMASK(47, 40))>>40; + payload_info = (msg & GENMASK(39, 16))>>16; + start_stop_info = (msg & GENMASK(15, 8))>>8; + pass_fail_info = (msg & GENMASK(7, 0)); + + if (!data) { + pr_err("Callback data pointer not loaded successfully\n"); + return -EFAULT; + } + + data->client_id = client_id; + + if (header != IPCLITE_TEST_HEADER) { + pr_err("Corrupted message packed received\n"); + return -EINVAL; + } + + pr_debug("The message received is %lx\n", msg); + + switch (test_info) { + case PING: + case NEGATIVE: + case DEBUG: + if (payload_info == PING_SEND) { + reply_macro = get_param_macro(TEST_CASE, + test_info, + PING_REPLY, + 0, 0); + ipclite_test_msg_send(client_id, reply_macro); + break; + } + if (payload_info == PING_REPLY) { + ping_receive(data); + break; + } + if (pass_fail_info == IPCLITE_TEST_PASS) + pr_info("Test passed on core %s\n", core_name[client_id]); + else if (pass_fail_info == IPCLITE_TEST_FAIL) + pr_info("Test failed on core %s\n", core_name[client_id]); + if (start_stop_info == IPCLITE_TEST_STOP) { + ++cores_completed; + if (cores_completed == data->test_params.num_senders) + pr_info("Test completed on all cores\n"); + if (is_selected_sender(IPCMEM_APPS)) + wake_up_interruptible(&core_wq); + else + complete(&test_done); + } + break; + case HW_MUTEX: + if (start_stop_info == IPCLITE_TEST_START) { + ret = ipclite_hw_mutex_release(); + if (ret == 0) + *((int *)data->global_memory->virt_base) = IPCMEM_APPS; + reply_macro = get_param_macro(TEST_CASE, + test_info, + HW_MUTEX_RELEASE, + IPCLITE_TEST_STOP, 0); + ipclite_test_msg_send(client_id, reply_macro); + + } + if (pass_fail_info == IPCLITE_TEST_PASS) + pr_info("HW Unlock Test passed on core %s\n", + core_name[client_id]); + else if (pass_fail_info == IPCLITE_TEST_FAIL) + pr_info("HW Unlock Test failed on core %s\n", + core_name[client_id]); + if (start_stop_info == IPCLITE_TEST_STOP) + complete(&test_done); + break; + case SSR: + if (payload_info == PING_SEND) { + reply_macro = get_param_macro(TEST_CASE, + test_info, + PING_REPLY, + 0, 0); + data->pings_received[client_id]++; + ipclite_test_msg_send(client_id, reply_macro); + if (data->pings_received[client_id] == data->test_params.num_pings) { + pr_info("Waking up ssr_wakeup_check_thread.\n"); + pr_info("Signaling other cores to make sure there is no other crash\n"); + wakeup_check.run = true; + wake_up_interruptible(&wakeup_check.wq); + bg_pings.run = true; + wake_up_interruptible(&bg_pings.wq); + } + } + if (payload_info == SSR_WAKEUP) { + if (start_stop_info == IPCLITE_TEST_STOP) { + ssr_complete = true; + pr_info("%s wakeup completed\n", + core_name[client_id]); + wake_up_interruptible(&ssr_wq); + } + } + if (pass_fail_info == IPCLITE_TEST_PASS) + pr_info("Test %d passed on core %s\n", + test_info, core_name[client_id]); + else if (pass_fail_info == IPCLITE_TEST_FAIL) + pr_info("Test %d failed on core %s\n", + test_info, core_name[client_id]); + break; + case GLOBAL_ATOMIC: + if (start_stop_info == IPCLITE_TEST_STOP) { + pr_debug("%s completed Global Atomics Test.\n", + core_name[client_id]); + if (payload_info == GLOBAL_ATOMICS_SET_CLR) + threads_completed++; + else + threads_completed += 2; + wake_up_interruptible(&thread_wq); + } + break; + default: + pr_info("Wrong input given\n"); + } + return 0; +} + +struct kobj_attribute ipclite_test_params = __ATTR(ipclite_test_params, + 0660, + NULL, + ipclite_test_params_write); + +static int ipclite_test_sysfs_node_setup(void) +{ + int ret = 0; + + sysfs_dir = kobject_create_and_add("ipclite_test", kernel_kobj); + if (sysfs_dir == NULL) { + pr_err("Cannot create sysfs directory\n"); + return -ENOENT; + } + + ret = sysfs_create_file(sysfs_dir, &ipclite_test_params.attr); + if (ret) { + pr_err("Cannot create sysfs file for ipclite test module. Error - %d\n", + ret); + return -ENOENT; + } + return 0; +} + +static int __init ipclite_test_init(void) +{ + int ret = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->global_memory = kzalloc(sizeof(*(data->global_memory)), + GFP_KERNEL); + if (!data->global_memory) { + kfree(data); + data = NULL; + return -ENOMEM; + } + ret = get_global_partition_info(data->global_memory); + if (ret != 0) { + pr_err("Unable to load global partition information\n"); + goto bail; + } + + ret = ipclite_register_test_client(ipclite_test_callback_fn, data); + if (ret != 0) { + pr_err("Could not register client\n"); + goto bail; + } + + ret = ipclite_test_sysfs_node_setup(); + if (ret != 0) { + pr_err("Failed to create sysfs interface\n"); + goto bail; + } + + init_test_params(); + return 0; +bail: + kfree(data->global_memory); + kfree(data); + data = NULL; + return ret; +} + +static void __exit ipclite_test_exit(void) +{ + pr_info("Removing IPCLite Test Module\n"); + sysfs_remove_file(sysfs_dir, &ipclite_test_params.attr); + kobject_put(sysfs_dir); + kfree(data->global_memory); + kfree(data); + data = NULL; +} + +module_init(ipclite_test_init); +module_exit(ipclite_test_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/msm/synx/test/ipclite_test.h b/msm/synx/test/ipclite_test.h new file mode 100644 index 0000000000..f7ce3ba744 --- /dev/null +++ b/msm/synx/test/ipclite_test.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include "../ipclite_client.h" +#include "../ipclite.h" + +/* General testing related configurations */ +#define IPCLITE_TEST_MAX_THREADS 5 +#define IPCLITE_TEST_HEADER 0xaa +#define IPCLITE_TEST_ALL_CORES GENMASK(IPCMEM_NUM_HOSTS - 1, 0) + +/* Synx Usecase related definitions */ +#define NUM_HANDLES 4096 +#define BITMAP_SIZE (NUM_HANDLES/32) +#define BITS(x) (sizeof(x)*8) + +struct handle_t { + int handle_bitmap[BITMAP_SIZE]; + int handle_data[NUM_HANDLES]; +}; + +/* Flags for Pass, Fail, Start, and Stop */ +#define IPCLITE_TEST_PASS 2 +#define IPCLITE_TEST_FAIL 1 + +#define IPCLITE_TEST_START 2 +#define IPCLITE_TEST_STOP 1 + +/* List of Cases Available for Testing */ +enum ipclite_test_type { + PING = 1, + NEGATIVE = 2, + GLOBAL_ATOMIC = 3, + DEBUG = 4, + SSR = 5, + HW_MUTEX = 6, +}; + +/* List of sysfs parameters */ +enum ipclite_test_param { + TEST_CASE = 1, + SENDER_LIST = 2, + RECEIVER_LIST = 3, + NUM_PINGS = 4, + WAIT = 5, + NUM_ITR = 6, + NUM_THREADS = 7, + ENABLED_CORES = 8, +}; + +/* List of subtests for HW Mutex Test */ +enum ipclite_test_hw_mutex_subtest { + HW_MUTEX_RELEASE = 1, +}; + +/* List of messages for SSR Testing */ +enum ipclite_test_ssr_subtest { + SSR_CRASHING = 1, + SSR_WAKEUP = 2, +}; + +/* List of subtest for Global Atomics Testing */ +enum ipclite_test_global_atomics_subtest { + GLOBAL_ATOMICS_INC = 1, + GLOBAL_ATOMICS_DEC = 2, + GLOBAL_ATOMICS_INC_DEC = 3, + GLOBAL_ATOMICS_SET_CLR = 4, +}; + +/* Types of pings and replies to be sent and received */ +enum ipclite_test_ping { + PING_SEND = 10, + PING_REPLY = 11, +}; + +static char core_name[IPCMEM_NUM_HOSTS][13] = { + "IPCMEM_APPS", + "IPCMEM_MODEM", + "IPCMEM_LPASS", + "IPCMEM_SLPI", + "IPCMEM_GPU", + "IPCMEM_CDSP", + "IPCMEM_CVP", + "IPCMEM_CAM", + "IPCMEM_VPU" +}; + +struct ipclite_test_params { + int wait; + int num_pings; + int num_itr; + int selected_senders; + int selected_receivers; + int selected_test_case; + int enabled_cores; + int num_thread; + int num_senders; + int num_receivers; +}; + +struct ipclite_test_data { + int pings_sent[IPCMEM_NUM_HOSTS]; + int pings_received[IPCMEM_NUM_HOSTS]; + int client_id; + struct global_region_info *global_memory; + struct ipclite_test_params test_params; + int ssr_client; +}; + +struct ipclite_thread_data { + struct task_struct *thread; + void *data; + wait_queue_head_t wq; + bool run; +}; + +static int ipclite_test_callback_fn(unsigned int client_id, long long msg, void *d); diff --git a/pineapple.bzl b/pineapple.bzl index 5baba7f38e..f6ee82f88d 100644 --- a/pineapple.bzl +++ b/pineapple.bzl @@ -8,6 +8,7 @@ def define_pineapple(): modules = [ "synx-driver", "ipclite", + "ipclite_test", ], config_options = [ "TARGET_SYNX_ENABLE", diff --git a/synx_kernel_board.mk b/synx_kernel_board.mk index b4fd6f17c4..8d324311ae 100644 --- a/synx_kernel_board.mk +++ b/synx_kernel_board.mk @@ -13,6 +13,7 @@ ifeq ($(TARGET_SYNX_ENABLE), true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite_test.ko BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko #BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko diff --git a/synx_modules.bzl b/synx_modules.bzl index 7cbee89a75..d6ae761a25 100644 --- a/synx_modules.bzl +++ b/synx_modules.bzl @@ -23,3 +23,10 @@ register_synx_module( "synx/ipclite.c", ], ) +register_synx_module( + name = "ipclite_test", + path = "msm", + srcs = [ + "synx/test/ipclite_test.c", + ], +) From 88d2c55b0c3107307f5fe38ed9ea9ddb76aed5cf Mon Sep 17 00:00:00 2001 From: Viraj Mandlekar Date: Mon, 17 Apr 2023 16:55:30 +0530 Subject: [PATCH 32/42] msm: synx: ipclite: Versioning 1. IPCLite now support downgrading of APIs depending on the version. 2. Version can be configured from Device Tree 3. Features can be configured from Device Tree 4. FW would downgrade to compatible versions. Change-Id: Ie6a4d94f63a5ed4bea6327ced218cfb75fb9b8e6 Signed-off-by: Viraj Mandlekar --- msm/synx/ipclite.c | 834 +++++++++++++++++++++++++++++---------------- msm/synx/ipclite.h | 42 ++- 2 files changed, 581 insertions(+), 295 deletions(-) diff --git a/msm/synx/ipclite.c b/msm/synx/ipclite.c index 741d36f81a..8ce478017f 100644 --- a/msm/synx/ipclite.c +++ b/msm/synx/ipclite.c @@ -23,11 +23,6 @@ #include "ipclite_client.h" #include "ipclite.h" -#define GLOBAL_ATOMICS_ENABLED 1 -#define GLOBAL_ATOMICS_DISABLED 0 -#define FIFO_FULL_RESERVE 8 -#define FIFO_ALIGNMENT 8 - static struct ipclite_info *ipclite; static struct ipclite_client synx_client; static struct ipclite_client test_client; @@ -37,10 +32,10 @@ static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem; static struct mutex ssr_mutex; static struct kobject *sysfs_kobj; -static uint32_t enabled_hosts, partitions; -static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED; static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO; static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump; +static uint32_t enabled_hosts, partitions, major_ver, minor_ver; +static uint64_t feature_mask; static inline bool is_host_enabled(uint32_t host) { @@ -72,7 +67,7 @@ static void ipclite_inmem_log(const char *psztStr, ...) static void ipclite_dump_debug_struct(void) { - int i, host; + int i = 0, host = 0; struct ipclite_debug_struct *temp_dbg_struct; /* Check if debug structures are initialized */ @@ -82,7 +77,7 @@ static void ipclite_dump_debug_struct(void) } /* Check if debug structures are enabled before printing */ - if (!(ipclite_debug_control & IPCLITE_DBG_STRUCT)) { + if (!(IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT))) { pr_err("Debug Structures not enabled\n"); return; } @@ -143,7 +138,7 @@ static void ipclite_dump_debug_struct(void) static void ipclite_dump_inmem_logs(void) { - int i; + int i = 0; uint32_t local_index = 0; /* Check if debug and inmem structures are initialized */ @@ -153,7 +148,7 @@ static void ipclite_dump_inmem_logs(void) } /* Check if debug structures are enabled before printing */ - if (!(ipclite_debug_control & IPCLITE_INMEM_LOG)) { + if (!(IS_DEBUG_CONFIG(IPCLITE_INMEM_LOG))) { pr_err("In-Memory Logs not enabled\n"); return; } @@ -185,17 +180,16 @@ int ipclite_hw_mutex_acquire(void) int ret; if (unlikely(!ipclite)) { - IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized"); + pr_err("IPCLite not initialized\n"); return -ENOMEM; } ret = hwspin_lock_timeout_irqsave(ipclite->hwlock, HWSPINLOCK_TIMEOUT, &ipclite->hw_mutex_flags); if (ret) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed"); + pr_err("Hw mutex lock acquire failed\n"); return ret; } ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS; - IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired"); return ret; } EXPORT_SYMBOL(ipclite_hw_mutex_acquire); @@ -203,7 +197,7 @@ EXPORT_SYMBOL(ipclite_hw_mutex_acquire); int ipclite_hw_mutex_release(void) { if (unlikely(!ipclite)) { - IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized"); + pr_err("IPCLite not initialized\n"); return -ENOMEM; } if (ipclite->ipcmem.toc_data.host_info->hwlock_owner != IPCMEM_APPS) @@ -211,45 +205,61 @@ int ipclite_hw_mutex_release(void) ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->hw_mutex_flags); - IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock released"); return 0; } EXPORT_SYMBOL(ipclite_hw_mutex_release); +/* Atomic Functions Start */ void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data) { + BUG_ON(addr == NULL); + atomic_set(addr, data); } EXPORT_SYMBOL(ipclite_atomic_init_u32); void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data) { + BUG_ON(addr == NULL); + atomic_set(addr, data); } EXPORT_SYMBOL(ipclite_atomic_init_i32); void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data) { + BUG_ON(addr == NULL); + ATOMIC_HW_MUTEX_ACQUIRE; + atomic_set(addr, data); + ATOMIC_HW_MUTEX_RELEASE; } EXPORT_SYMBOL(ipclite_global_atomic_store_u32); void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data) { + BUG_ON(addr == NULL); + ATOMIC_HW_MUTEX_ACQUIRE; + atomic_set(addr, data); + ATOMIC_HW_MUTEX_RELEASE; } EXPORT_SYMBOL(ipclite_global_atomic_store_i32); uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr) { - uint32_t ret; + uint32_t ret = 0; + + BUG_ON(addr == NULL); ATOMIC_HW_MUTEX_ACQUIRE; + ret = atomic_read(addr); + ATOMIC_HW_MUTEX_RELEASE; return ret; @@ -258,10 +268,14 @@ EXPORT_SYMBOL(ipclite_global_atomic_load_u32); int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr) { - int32_t ret; + int32_t ret = 0; + + BUG_ON(addr == NULL); ATOMIC_HW_MUTEX_ACQUIRE; + ret = atomic_read(addr); + ATOMIC_HW_MUTEX_RELEASE; return ret; @@ -270,11 +284,14 @@ EXPORT_SYMBOL(ipclite_global_atomic_load_i32); uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr) { - uint32_t ret; - uint32_t mask = (1 << nr); + uint32_t ret = 0, mask = (1 << nr); + + BUG_ON(addr == NULL); ATOMIC_HW_MUTEX_ACQUIRE; + ret = atomic_fetch_or(mask, addr); + ATOMIC_HW_MUTEX_RELEASE; return ret; @@ -283,11 +300,14 @@ EXPORT_SYMBOL(ipclite_global_test_and_set_bit); uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr) { - uint32_t ret; - uint32_t mask = (1 << nr); + uint32_t ret = 0, mask = (1 << nr); + + BUG_ON(addr == NULL); ATOMIC_HW_MUTEX_ACQUIRE; + ret = atomic_fetch_and(~mask, addr); + ATOMIC_HW_MUTEX_RELEASE; return ret; @@ -298,8 +318,12 @@ int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr) { int32_t ret = 0; + BUG_ON(addr == NULL); + ATOMIC_HW_MUTEX_ACQUIRE; + ret = atomic_fetch_add(1, addr); + ATOMIC_HW_MUTEX_RELEASE; return ret; @@ -310,19 +334,23 @@ int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr) { int32_t ret = 0; + BUG_ON(addr == NULL); + ATOMIC_HW_MUTEX_ACQUIRE; + ret = atomic_fetch_sub(1, addr); + ATOMIC_HW_MUTEX_RELEASE; return ret; } EXPORT_SYMBOL(ipclite_global_atomic_dec); +/* Atomic Functions End */ static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo) { - size_t len; - u32 head; - u32 tail; + size_t len = 0; + u32 head = 0, tail = 0; head = le32_to_cpu(*rx_fifo->head); tail = le32_to_cpu(*rx_fifo->tail); @@ -345,8 +373,8 @@ static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo) static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo, void *data, size_t count) { - size_t len; - u32 tail; + size_t len = 0; + u32 tail = 0; tail = le32_to_cpu(*rx_fifo->tail); @@ -367,7 +395,7 @@ static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo, static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo, size_t count, uint32_t core_id) { - u32 tail; + u32 tail = 0; tail = le32_to_cpu(*rx_fifo->tail); @@ -378,7 +406,7 @@ static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo, *rx_fifo->tail = cpu_to_le32(tail); /* Storing the debug data in debug structures */ - if (ipclite_debug_control & IPCLITE_DBG_STRUCT) { + if (IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT)) { ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[1] = ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0]; ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0] = @@ -398,9 +426,7 @@ static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo, static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo) { - u32 head; - u32 tail; - u32 avail; + u32 head = 0, tail = 0, avail = 0; head = le32_to_cpu(*tx_fifo->head); tail = le32_to_cpu(*tx_fifo->tail); @@ -425,7 +451,7 @@ static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo, unsigned int head, const void *data, size_t count) { - size_t len; + size_t len = 0; if (WARN_ON_ONCE(head > tx_fifo->length)) return head; @@ -447,7 +473,7 @@ static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo, static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo, const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id) { - unsigned int head; + unsigned int head = 0; head = le32_to_cpu(*tx_fifo->head); head = ipcmem_tx_write_one(tx_fifo, head, data, dlen); @@ -465,7 +491,7 @@ static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo, *tx_fifo->head, core_id, signal_id); /* Storing the debug data in debug structures */ - if (ipclite_debug_control & IPCLITE_DBG_STRUCT) { + if (IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT)) { ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[1] = ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0]; ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0] = @@ -516,8 +542,8 @@ static void ipclite_tx_write(struct ipclite_channel *channel, static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail) { - uint64_t data; int ret = 0; + uint64_t data = 0; if (avail < sizeof(data)) { IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n", @@ -538,8 +564,8 @@ static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail) static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail) { - uint64_t data; int ret = 0; + uint64_t data = 0; if (avail < sizeof(data)) { IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n", @@ -560,11 +586,11 @@ static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail) static irqreturn_t ipclite_intr(int irq, void *data) { + int ret = 0; + unsigned int avail = 0; + uint64_t msg = 0; struct ipclite_channel *channel; struct ipclite_irq_info *irq_info; - unsigned int avail = 0; - int ret = 0; - uint64_t msg; irq_info = (struct ipclite_irq_info *)data; channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]); @@ -573,7 +599,7 @@ static irqreturn_t ipclite_intr(int irq, void *data) channel->remote_pid, irq_info->signal_id); /* Storing the debug data in debug structures */ - if (ipclite_debug_control & IPCLITE_DBG_STRUCT) { + if (IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT)) { ipclite_dbg_struct->dbg_info_host[channel->remote_pid].num_intr++; ipclite_dbg_struct->dbg_info_overall.last_recv_host_id = channel->remote_pid; ipclite_dbg_struct->dbg_info_overall.last_sigid_recv = irq_info->signal_id; @@ -589,7 +615,7 @@ static irqreturn_t ipclite_intr(int irq, void *data) } IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n"); } else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) { - IPCLITE_OS_LOG(IPCLITE_DBG, "Versioning is currently not enabled\n"); + IPCLITE_OS_LOG(IPCLITE_DBG, "Versioning is not enabled using IPCC signals\n"); } else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) { for (;;) { avail = ipclite_rx_avail(channel); @@ -610,8 +636,8 @@ static irqreturn_t ipclite_intr(int irq, void *data) static int ipclite_tx(struct ipclite_channel *channel, uint64_t data, size_t dlen, uint32_t ipclite_signal) { - unsigned long flags; int ret = 0; + unsigned long flags; if (channel->status != ACTIVE) { if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) { @@ -639,7 +665,7 @@ static int ipclite_tx(struct ipclite_channel *channel, return ret; } -static int ipclite_send_debug_info(int32_t proc_id) +static int ipclite_notify_core(int32_t proc_id, int32_t signal_id) { int ret = 0; struct ipclite_channel *channel; @@ -659,190 +685,26 @@ static int ipclite_send_debug_info(int32_t proc_id) } } - ret = mbox_send_message(channel->irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, NULL); + ret = mbox_send_message(channel->irq_info[signal_id].mbox_chan, NULL); if (ret < 0) { IPCLITE_OS_LOG(IPCLITE_ERR, - "Debug Signal sending failed to Core : %d Signal : %d ret : %d\n", - proc_id, IPCLITE_DEBUG_SIGNAL, ret); + "Signal sending failed to Core : %d Signal : %d ret : %d\n", + proc_id, signal_id, ret); return ret; } IPCLITE_OS_LOG(IPCLITE_DBG, - "Debug Signal send completed to core : %d signal : %d ret : %d\n", - proc_id, IPCLITE_DEBUG_SIGNAL, ret); + "Signal send completed to core : %d signal : %d ret : %d\n", + proc_id, signal_id, ret); return 0; } -int ipclite_ssr_update(int32_t proc_id) -{ - int ret = 0; - struct ipclite_channel *channel; - - if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); - return -EINVAL; - } - channel = &ipclite->channel[proc_id]; - - if (channel->status != ACTIVE) { - if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) { - channel->status = ACTIVE; - } else { - IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id); - return -EOPNOTSUPP; - } - } - - ret = mbox_send_message(channel->irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, NULL); - if (ret < 0) { - IPCLITE_OS_LOG(IPCLITE_ERR, - "SSR Signal sending failed to Core : %d Signal : %d ret : %d\n", - proc_id, IPCLITE_SSR_SIGNAL, ret); - return ret; - } - - IPCLITE_OS_LOG(IPCLITE_DBG, - "SSR Signal send completed to core : %d signal : %d ret : %d\n", - proc_id, IPCLITE_SSR_SIGNAL, ret); - return 0; -} - -void ipclite_recover(enum ipcmem_host_type core_id) -{ - int ret, host, host0, host1; - uint32_t p; - - IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id); - - /* verify and reset the hw mutex lock */ - if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) { - ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; - hwspin_unlock_raw(ipclite->hwlock); - IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n"); - } - - mutex_lock(&ssr_mutex); - /* Set the Global Channel Status to 0 to avoid Race condition */ - for (p = 0; p < partitions; p++) { - host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0; - host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1; - if (host0 != core_id && host1 != core_id) - continue; - - ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) - (&(ipclite->ipcmem.partition[p]->hdr.status)), 0); - - IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", - host0, host1, ipclite->ipcmem.partition[p]->hdr.status); - } - - /* Resets the TX/RX queue */ - *(ipclite->channel[core_id].tx_fifo->head) = 0; - *(ipclite->channel[core_id].rx_fifo->tail) = 0; - - IPCLITE_OS_LOG(IPCLITE_DBG, "TX Fifo Reset : %d\n", - *(ipclite->channel[core_id].tx_fifo->head)); - IPCLITE_OS_LOG(IPCLITE_DBG, "RX Fifo Reset : %d\n", - *(ipclite->channel[core_id].rx_fifo->tail)); - - /* Increment the Global Channel Status for APPS and crashed core*/ - ipclite_global_atomic_inc((ipclite_atomic_int32_t *) - ipclite->channel[core_id].gstatus_ptr); - - ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr; - - /* Update other cores about SSR */ - for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { - if (!is_host_enabled(host) || host == core_id) - continue; - ret = ipclite_ssr_update(host); - if (ret < 0) - IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host); - else - IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host); - } - mutex_unlock(&ssr_mutex); - - /* Dump the debug information */ - if (ipclite_debug_dump & IPCLITE_DUMP_SSR) { - ipclite_dump_debug_struct(); - ipclite_dump_inmem_logs(); - } - - return; -} -EXPORT_SYMBOL(ipclite_recover); - -int ipclite_msg_send(int32_t proc_id, uint64_t data) -{ - int ret = 0; - - if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); - return -EINVAL; - } - - ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), - IPCLITE_MSG_SIGNAL); - - IPCLITE_OS_LOG(IPCLITE_DBG, "Message send complete to core : %d signal : %d ret : %d\n", - proc_id, IPCLITE_MSG_SIGNAL, ret); - return ret; -} -EXPORT_SYMBOL(ipclite_msg_send); - -int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv) -{ - if (!cb_func_ptr) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n"); - return -EINVAL; - } - synx_client.callback = cb_func_ptr; - synx_client.priv_data = priv; - synx_client.reg_complete = 1; - IPCLITE_OS_LOG(IPCLITE_DBG, "Client Registration completed\n"); - return 0; -} -EXPORT_SYMBOL(ipclite_register_client); - -int ipclite_test_msg_send(int32_t proc_id, uint64_t data) -{ - int ret = 0; - - if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); - return -EINVAL; - } - - ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), - IPCLITE_TEST_SIGNAL); - - IPCLITE_OS_LOG(IPCLITE_DBG, "Test Msg send complete to core : %d signal : %d ret : %d\n", - proc_id, IPCLITE_TEST_SIGNAL, ret); - return ret; -} -EXPORT_SYMBOL(ipclite_test_msg_send); - -int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv) -{ - if (!cb_func_ptr) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n"); - return -EINVAL; - } - test_client.callback = cb_func_ptr; - test_client.priv_data = priv; - test_client.reg_complete = 1; - IPCLITE_OS_LOG(IPCLITE_DBG, "Test Client Registration Completed\n"); - return 0; -} -EXPORT_SYMBOL(ipclite_register_test_client); - static int map_ipcmem(struct ipclite_info *ipclite, const char *name) { + int ret = 0; struct device *dev; struct device_node *np; struct resource r; - int ret = 0; dev = ipclite->dev; @@ -1004,15 +866,14 @@ static int32_t setup_partitions(struct ipclite_mem *ipcmem, uint32_t base_offset /*Set up info to parse partition entries*/ ipcmem->toc_data.partition_info->num_entries = partitions = num_entry; ipcmem->toc_data.partition_info->entry_size = sizeof(struct ipcmem_partition_entry); + return 0; } static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn) { - int ret; - uint32_t remote_pid; - uint32_t host_count = 0; - uint32_t gmem_offset = 0; + int ret = 0; + uint32_t remote_pid = 0, host_count = 0, gmem_offset = 0; struct device_node *cn; for_each_available_child_of_node(pn, cn) { @@ -1053,6 +914,7 @@ static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn) ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED; IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n"); + return 0; } @@ -1060,7 +922,7 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n struct ipclite_channel *channel) { int ret = 0; - u32 index; + u32 index = 0; struct ipclite_irq_info *irq_info; struct device *dev; char strs[MAX_CHANNEL_SIGNALS][IPCLITE_SIGNAL_LABEL_SIZE] = { @@ -1081,8 +943,7 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n return ret; } - ret = of_property_read_u32(dev->of_node, "index", - &index); + ret = of_property_read_u32(dev->of_node, "index", &index); if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse index\n"); goto err_dev; @@ -1114,7 +975,8 @@ static int ipclite_channel_irq_init(struct device *parent, struct device_node *n goto err_dev; } IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt init completed, ret = %d\n", ret); - return 0; + + return ret; err_dev: device_unregister(dev); @@ -1122,34 +984,10 @@ err_dev: return ret; } -int32_t get_global_partition_info(struct global_region_info *global_ipcmem) -{ - struct ipcmem_global_partition *global_partition; - - if (!ipclite) { - IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n"); - return -ENOMEM; - } - - if (!global_ipcmem) - return -EINVAL; - - global_partition = ipclite->ipcmem.global_partition; - global_ipcmem->virt_base = (void *)((char *)global_partition + - global_partition->hdr.region_offset); - global_ipcmem->size = (size_t)(global_partition->hdr.region_size); - - IPCLITE_OS_LOG(IPCLITE_DBG, "base = %p, size=%lx\n", global_ipcmem->virt_base, - global_ipcmem->size); - return 0; -} -EXPORT_SYMBOL(get_global_partition_info); - static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid, int remote_pid) { - uint32_t p; - uint32_t found = -1; + uint32_t p = 0, found = -1; for (p = 0; p < partitions; p++) { if (ipcmem.toc_data.partition_entry[p].host0 == local_pid @@ -1178,16 +1016,13 @@ static void ipclite_channel_release(struct device *dev) static int ipclite_channel_init(struct device *parent, struct device_node *node) { + int ret = 0; + u32 local_pid = 0, remote_pid = 0; + u32 *descs = NULL; struct ipclite_fifo *rx_fifo; struct ipclite_fifo *tx_fifo; - struct device *dev; - u32 local_pid, remote_pid, global_atomic; - u32 *descs; - int ret = 0; - struct device_node *child; - struct ipcmem_partition_header *partition_hdr; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1217,14 +1052,6 @@ static int ipclite_channel_init(struct device *parent, } IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid); - ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic); - if (ret) { - dev_err(dev, "failed to parse global_atomic\n"); - goto err_put_dev; - } - if (global_atomic == 0) - global_atomic_support = GLOBAL_ATOMICS_DISABLED; - rx_fifo = devm_kzalloc(dev, sizeof(*rx_fifo), GFP_KERNEL); tx_fifo = devm_kzalloc(dev, sizeof(*tx_fifo), GFP_KERNEL); if (!rx_fifo || !tx_fifo) { @@ -1327,6 +1154,7 @@ static void probe_subsystem(struct device *dev, struct device_node *np) IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Channel init failed\n"); } +/* IPCLite Debug related functions start */ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -1355,7 +1183,7 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj, for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { if (!is_host_enabled(host)) continue; - ret = ipclite_send_debug_info(host); + ret = ipclite_notify_core(host, IPCLITE_DEBUG_SIGNAL); if (ret < 0) IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host); else @@ -1393,7 +1221,7 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj, for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { if (!is_host_enabled(host)) continue; - ret = ipclite_send_debug_info(host); + ret = ipclite_notify_core(host, IPCLITE_DEBUG_SIGNAL); if (ret < 0) IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host); else @@ -1471,7 +1299,7 @@ static int ipclite_debug_sysfs_setup(void) return ret; } -static int ipclite_debug_info_setup(void) +static int ipclite_debug_mem_setup(void) { /* Setting up the Debug Structures */ ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base + @@ -1500,23 +1328,89 @@ static int ipclite_debug_info_setup(void) return 0; } -static int ipclite_probe(struct platform_device *pdev) +static int ipclite_debug_setup(void) { int ret = 0; - int hwlock_id; + + /* Set up sysfs for debug */ + ret = ipclite_debug_sysfs_setup(); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n"); + return ret; + } + + /* Mapping Debug Memory */ + ret = ipclite_debug_mem_setup(); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n"); + return ret; + } + + /* Update the Global Debug variable for FW cores */ + ipclite_dbg_info->debug_level = ipclite_debug_level; + ipclite_dbg_info->debug_control = ipclite_debug_control; + + return ret; +} +/* IPCLite Debug related functions end */ + +/* IPCLite Features setup related functions start */ +static int ipclite_feature_setup(struct device_node *pn) +{ + int ret = 0; + uint32_t feature_mask_l = 0, feature_mask_h = 0; + + /* Parse the feature related DT entries and store the values locally */ + ret = of_property_read_u32(pn, "feature_mask_low", &feature_mask_l); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse feature_mask_low\n"); + return ret; + } + + ret = of_property_read_u32(pn, "feature_mask_high", &feature_mask_h); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse feature_mask_high\n"); + return ret; + } + + /* Combine feature_mask_low and feature_mask_high into 64-bit feature_mask */ + feature_mask = (uint64_t) feature_mask_h << 32 | feature_mask_l; + + /* Update the feature mask to TOC for FW */ + ipclite->ipcmem.toc->hdr.feature_mask = feature_mask; + + /* Set up Global Atomics Feature*/ + if (!(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC))) + IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Global Atomic Support Disabled\n"); + + /* Set up Test Suite Feature*/ + if (!(IS_FEATURE_CONFIG(IPCLITE_TEST_SUITE))) + IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Test Suite Disabled\n"); + + return ret; +} +/* IPCLite Features setup related functions end */ + +/* API Definition Start - Minor Version 0*/ +static int ipclite_init_v0(struct platform_device *pdev) +{ + int ret = 0, hwlock_id = 0; struct ipcmem_region *mem; struct device_node *cn; struct device_node *pn = pdev->dev.of_node; struct ipclite_channel broadcast; + /* Allocate memory for IPCLite */ ipclite = kzalloc(sizeof(*ipclite), GFP_KERNEL); if (!ipclite) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Memory Allocation Failed\n"); ret = -ENOMEM; goto error; } ipclite->dev = &pdev->dev; + /* Parse HW Lock from DT */ hwlock_id = of_hwspin_lock_get_id(pn, 0); if (hwlock_id < 0) { if (hwlock_id != -EPROBE_DEFER) @@ -1526,6 +1420,7 @@ static int ipclite_probe(struct platform_device *pdev) } IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id); + /* Reserve a HWSpinLock for later use */ ipclite->hwlock = hwspin_lock_request_specific(hwlock_id); if (!ipclite->hwlock) { IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n"); @@ -1538,6 +1433,7 @@ static int ipclite_probe(struct platform_device *pdev) /* Initializing Local Mutex Lock for SSR functionality */ mutex_init(&ssr_mutex); + /* Map to IPCLite Memory */ ret = map_ipcmem(ipclite, "memory-region"); if (ret) { IPCLITE_OS_LOG(IPCLITE_ERR, "failed to map ipcmem\n"); @@ -1552,47 +1448,44 @@ static int ipclite_probe(struct platform_device *pdev) goto release; } - /* Set up sysfs for debug */ - ret = ipclite_debug_sysfs_setup(); - if (ret) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n"); - goto release; - } - - /* Mapping Debug Memory */ - ret = ipclite_debug_info_setup(); - if (ret) { - IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n"); - goto release; - } - /* Setup Channel for each Remote Subsystem */ for_each_available_child_of_node(pn, cn) probe_subsystem(&pdev->dev, cn); - /* Broadcast init_done signal to all subsystems once mbox channels - * are set up - */ + + /* Broadcast init_done signal to all subsystems once mbox channels are set up */ broadcast = ipclite->channel[IPCMEM_APPS]; - ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, - NULL); + ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, NULL); if (ret < 0) goto mem_release; mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0); - if (global_atomic_support) { - ipclite->ipcmem.toc->hdr.feature_mask |= GLOBAL_ATOMIC_SUPPORT_BMSK; + /* Debug Setup */ + ret = ipclite_debug_setup(); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Debug Setup Failed\n"); + goto release; } - IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", global_atomic_support); + + /* Features Setup */ + ret = ipclite_feature_setup(pn); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Features Setup Failed\n"); + goto release; + } + + /* Update TOC with version entries for FW */ + ipclite->ipcmem.toc->hdr.major_version = major_ver; + ipclite->ipcmem.toc->hdr.minor_version = minor_ver; /* Should be called after all Global TOC related init is done */ insert_magic_number(); - /* Update the Global Debug variable for FW cores */ - ipclite_dbg_info->debug_level = ipclite_debug_level; - ipclite_dbg_info->debug_control = ipclite_debug_control; + IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Version : %d.%d Feature Mask : 0x%llx\n", + major_ver, minor_ver, feature_mask); + + IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Probe Completed Successfully\n"); - IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite probe completed successfully\n"); return ret; mem_release: @@ -1605,6 +1498,363 @@ mem_release: release: kfree(ipclite); ipclite = NULL; +error: + return ret; +} + +static int ipclite_register_client_v0(IPCLite_Client cb_func_ptr, void *priv) +{ + if (!cb_func_ptr) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n"); + return -EINVAL; + } + + synx_client.callback = cb_func_ptr; + synx_client.priv_data = priv; + synx_client.reg_complete = 1; + + IPCLITE_OS_LOG(IPCLITE_DBG, "Client Registration completed\n"); + + return 0; +} + +static int ipclite_register_test_client_v0(IPCLite_Client cb_func_ptr, void *priv) +{ + if (!cb_func_ptr) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n"); + return -EINVAL; + } + + test_client.callback = cb_func_ptr; + test_client.priv_data = priv; + test_client.reg_complete = 1; + + IPCLITE_OS_LOG(IPCLITE_DBG, "Test Client Registration Completed\n"); + + return 0; +} + +static int ipclite_msg_send_v0(int32_t proc_id, uint64_t data) +{ + int ret = 0; + + /* Check for valid core id */ + if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); + return -EINVAL; + } + + /* Send the data to the core */ + ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_MSG_SIGNAL); + if (ret < 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Message send failed to core : %d signal:%d ret:%d\n", + proc_id, IPCLITE_MSG_SIGNAL, ret); + return ret; + } + + IPCLITE_OS_LOG(IPCLITE_DBG, "Message send complete to core : %d signal : %d ret : %d\n", + proc_id, IPCLITE_MSG_SIGNAL, ret); + return ret; +} + +static int ipclite_test_msg_send_v0(int32_t proc_id, uint64_t data) +{ + int ret = 0; + + /* Check for valid core id */ + if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id); + return -EINVAL; + } + + /* Send the data to the core */ + ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_TEST_SIGNAL); + if (ret < 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Message send failed to core : %d signal:%d ret:%d\n", + proc_id, IPCLITE_TEST_SIGNAL, ret); + return ret; + } + + IPCLITE_OS_LOG(IPCLITE_DBG, "Test Msg send complete to core : %d signal : %d ret : %d\n", + proc_id, IPCLITE_TEST_SIGNAL, ret); + return ret; +} + +static int32_t get_global_partition_info_v0(struct global_region_info *global_ipcmem) +{ + struct ipcmem_global_partition *global_partition; + + if (!ipclite) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n"); + return -ENOMEM; + } + + if (!global_ipcmem) + return -EINVAL; + + global_partition = ipclite->ipcmem.global_partition; + global_ipcmem->virt_base = (void *)((char *)global_partition + + global_partition->hdr.region_offset); + global_ipcmem->size = (size_t)(global_partition->hdr.region_size); + + IPCLITE_OS_LOG(IPCLITE_DBG, "base = %p, size=%lx\n", global_ipcmem->virt_base, + global_ipcmem->size); + return 0; +} + +static void ipclite_recover_v0(enum ipcmem_host_type core_id) +{ + int ret = 0, host = 0, host0 = 0, host1 = 0; + uint32_t p = 0; + + IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id); + + /* verify and reset the hw mutex lock */ + if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) { + ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST; + hwspin_unlock_raw(ipclite->hwlock); + IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n"); + } + + mutex_lock(&ssr_mutex); + + /* Set the Global Channel Status to 0 to avoid Race condition */ + for (p = 0; p < partitions; p++) { + host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0; + host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1; + if (host0 != core_id && host1 != core_id) + continue; + + ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *) + (&(ipclite->ipcmem.partition[p]->hdr.status)), 0); + + IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", + host0, host1, ipclite->ipcmem.partition[p]->hdr.status); + } + + /* Resets the TX/RX queue */ + *(ipclite->channel[core_id].tx_fifo->head) = 0; + *(ipclite->channel[core_id].rx_fifo->tail) = 0; + + IPCLITE_OS_LOG(IPCLITE_DBG, "TX Fifo Reset : %d\n", + *(ipclite->channel[core_id].tx_fifo->head)); + IPCLITE_OS_LOG(IPCLITE_DBG, "RX Fifo Reset : %d\n", + *(ipclite->channel[core_id].rx_fifo->tail)); + + /* Increment the Global Channel Status for APPS and crashed core*/ + ipclite_global_atomic_inc((ipclite_atomic_int32_t *) + ipclite->channel[core_id].gstatus_ptr); + + ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr; + + /* Update other cores about SSR */ + for (host = 1; host < IPCMEM_NUM_HOSTS; host++) { + if (!is_host_enabled(host) || host == core_id) + continue; + ret = ipclite_notify_core(host, IPCLITE_SSR_SIGNAL); + if (ret < 0) + IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host); + else + IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host); + } + + mutex_unlock(&ssr_mutex); + + /* Dump the debug information */ + if (ipclite_debug_dump & IPCLITE_DUMP_SSR) { + ipclite_dump_debug_struct(); + ipclite_dump_inmem_logs(); + } +} +/* API Definition End - Minor Version 0*/ + +/* Versioned Functions Start */ +int ipclite_init(struct platform_device *pdev) +{ + if (api_list_t.init == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return -EINVAL; + } + + return api_list_t.init(pdev); +} + +int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv) +{ + if (api_list_t.register_client == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return -EINVAL; + } + + return api_list_t.register_client(cb_func_ptr, priv); +} +EXPORT_SYMBOL(ipclite_register_client); + +int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv) +{ + if (api_list_t.register_test_client == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return -EINVAL; + } + + return api_list_t.register_test_client(cb_func_ptr, priv); +} +EXPORT_SYMBOL(ipclite_register_test_client); + +int ipclite_msg_send(int32_t proc_id, uint64_t data) +{ + if (api_list_t.msg_send == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return -EINVAL; + } + + return api_list_t.msg_send(proc_id, data); +} +EXPORT_SYMBOL(ipclite_msg_send); + +int ipclite_test_msg_send(int32_t proc_id, uint64_t data) +{ + if (api_list_t.test_msg_send == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return -EINVAL; + } + + return api_list_t.test_msg_send(proc_id, data); +} +EXPORT_SYMBOL(ipclite_test_msg_send); + +void ipclite_recover(enum ipcmem_host_type core_id) +{ + if (api_list_t.recover == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return; + } + + api_list_t.recover(core_id); +} +EXPORT_SYMBOL(ipclite_recover); + +int32_t get_global_partition_info(struct global_region_info *global_ipcmem) +{ + if (api_list_t.partition_info == NULL) { + IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__); + return -EINVAL; + } + + return api_list_t.partition_info(global_ipcmem); +} +EXPORT_SYMBOL(get_global_partition_info); +/* Versioned Functions End */ + +/* List of APIs based on the version */ +struct ipclite_api_list api_list_version[] = { + /* Minor Version 0 */ + { + .init = ipclite_init_v0, + .register_client = ipclite_register_client_v0, + .register_test_client = ipclite_register_test_client_v0, + .msg_send = ipclite_msg_send_v0, + .test_msg_send = ipclite_test_msg_send_v0, + .partition_info = get_global_partition_info_v0, + .recover = ipclite_recover_v0, + }, +}; + +/* IPCLite Version setup related functions start */ +static int ipclite_update_version_api(struct ipclite_api_list *res_str, + struct ipclite_api_list *ver_str) +{ + if (res_str == NULL || ver_str == NULL) + return -EINVAL; + + /* Register APIs based on the version */ + res_str->init = (ver_str->init != NULL) ? + ver_str->init : res_str->init; + + res_str->register_client = (ver_str->register_client != NULL) ? + ver_str->register_client : res_str->register_client; + res_str->register_test_client = (ver_str->register_test_client != NULL) ? + ver_str->register_test_client : res_str->register_test_client; + + res_str->msg_send = (ver_str->msg_send != NULL) ? + ver_str->msg_send : res_str->msg_send; + res_str->test_msg_send = (ver_str->test_msg_send != NULL) ? + ver_str->test_msg_send : res_str->test_msg_send; + + res_str->partition_info = (ver_str->partition_info != NULL) ? + ver_str->partition_info : res_str->partition_info; + res_str->recover = (ver_str->recover != NULL) ? + ver_str->recover : res_str->recover; + + return 0; +} + +static int ipclite_register_api(void) +{ + int ret = 0, ver_itr = 0; + + /* Register APIs based on the version */ + for (ver_itr = 0; ver_itr <= minor_ver; ver_itr++) { + ret = ipclite_update_version_api(&api_list_t, &api_list_version[ver_itr]); + if (ret != 0) + return ret; + } + + return ret; +} + +static int ipclite_version_setup(struct device_node *pn) +{ + int ret = 0; + + /* Parse the version related DT entries and store the values locally */ + ret = of_property_read_u32(pn, "major_version", &major_ver); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse major_vesion\n"); + return ret; + } + + ret = of_property_read_u32(pn, "minor_version", &minor_ver); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse minor_vesion\n"); + return ret; + } + + /* Verify IPCLite Version - if version does not match crash the system */ + BUG_ON(major_ver != MAJOR_VERSION || minor_ver > MINOR_VERSION); + + return ret; +} +/* IPCLite Version setup related functions end */ + +/* Start of IPCLite Init*/ +static int ipclite_probe(struct platform_device *pdev) +{ + int ret = 0; + + /* Version Setup */ + ret = ipclite_version_setup(pdev->dev.of_node); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Version Setup Failed\n"); + goto error; + } + + /* Register API Setup */ + ret = ipclite_register_api(); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite API Registration Failed\n"); + goto error; + } + + /* IPCLite Init */ + ret = ipclite_init(pdev); + if (ret != 0) { + IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Init Failed\n"); + goto error; + } + + return ret; + error: IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n"); return ret; diff --git a/msm/synx/ipclite.h b/msm/synx/ipclite.h index f89d35b41b..39a064489e 100644 --- a/msm/synx/ipclite.h +++ b/msm/synx/ipclite.h @@ -10,21 +10,26 @@ #include #include "ipclite_client.h" +/* version related entries */ +#define MAJOR_VERSION 1 +#define MINOR_VERSION 0 + #define IPCMEM_INIT_COMPLETED 0x1 #define ACTIVE_CHANNEL 0x1 #define IPCMEM_TOC_SIZE (4*1024) #define IPCMEM_TOC_VAR_OFFSET 0x100 -#define MAX_CHANNEL_SIGNALS 6 #define GLOBAL_ATOMIC_SUPPORT_BMSK 0x1UL +/* IPCC signal info */ #define IPCLITE_MSG_SIGNAL 0 #define IPCLITE_MEM_INIT_SIGNAL 1 #define IPCLITE_VERSION_SIGNAL 2 #define IPCLITE_TEST_SIGNAL 3 #define IPCLITE_SSR_SIGNAL 4 #define IPCLITE_DEBUG_SIGNAL 5 +#define MAX_CHANNEL_SIGNALS 6 /** Flag definitions for the entries */ #define IPCMEM_FLAGS_ENABLE_READ_PROTECTION (0x01) @@ -41,6 +46,11 @@ /* Timeout (ms) for the trylock of remote spinlocks */ #define HWSPINLOCK_TIMEOUT 1000 +/* queue related entries */ +#define FIFO_FULL_RESERVE 8 +#define FIFO_ALIGNMENT 8 + +/* debug related entries */ #define IPCLITE_DEBUG_INFO_SIZE 256 #define IPCLITE_CORE_DBG_LABEL "APSS:" #define IPCLITE_LOG_MSG_SIZE 100 @@ -51,6 +61,7 @@ #define ADD_OFFSET(x, y) ((void *)((size_t)x + y)) +/* IPCLite Logging Mechanism */ #define IPCLITE_OS_LOG(__level, __fmt, arg...) \ do { \ if (ipclite_debug_level & __level) { \ @@ -63,8 +74,28 @@ } \ } while (0) -#define ATOMIC_HW_MUTEX_ACQUIRE (global_atomic_support ?: ipclite_hw_mutex_acquire()) -#define ATOMIC_HW_MUTEX_RELEASE (global_atomic_support ?: ipclite_hw_mutex_release()) +/* IPCLite Debug enable status */ +#define IS_DEBUG_CONFIG(ipclite_debug) (ipclite_debug_control & ipclite_debug) + +/* IPCLite Feature enable status */ +#define IS_FEATURE_CONFIG(ipclite_feature) (feature_mask & ipclite_feature) + +/* Global Atomic status */ +#define ATOMIC_HW_MUTEX_ACQUIRE \ +(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC) ?: ipclite_hw_mutex_acquire()) +#define ATOMIC_HW_MUTEX_RELEASE \ +(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC) ?: ipclite_hw_mutex_release()) + +/* API Structure */ +struct ipclite_api_list { + int (*init)(struct platform_device *pdev); + int32_t (*register_client)(IPCLite_Client cb_func_ptr, void *priv); + int32_t (*register_test_client)(IPCLite_Client cb_func_ptr, void *priv); + int32_t (*msg_send)(int32_t proc_id, uint64_t data); + int32_t (*test_msg_send)(int32_t proc_id, uint64_t data); + int32_t (*partition_info)(struct global_region_info *global_ipcmem); + void (*recover)(enum ipcmem_host_type core_id); +} api_list_t; /** * enum ipclite_channel_status - channel status @@ -79,6 +110,11 @@ enum ipclite_channel_status { ACTIVE = 2, }; +enum ipclite_feature_mask { + IPCLITE_GLOBAL_ATOMIC = 0x0001ULL, + IPCLITE_TEST_SUITE = 0x0002ULL, +}; + enum ipclite_debug_level { IPCLITE_ERR = 0x0001, IPCLITE_WARN = 0x0002, From bde84ad2fb13a2893bfc3ca976b00b14c5ba6cb1 Mon Sep 17 00:00:00 2001 From: Kuldeep Singh Date: Thu, 27 Jul 2023 01:30:51 +0530 Subject: [PATCH 33/42] msm: synx: Fix to block wait till all handles are signaled Fixed a race condition in case of global and local merge where wait was unblocked without waiting for signal on local handles. Change-Id: Ib1a87dd4b766c5d45114daae7539dc044488b724 Signed-off-by: Kuldeep Singh --- msm/synx/synx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 3d425f3b75..2c6b627f7e 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -991,8 +991,8 @@ int synx_async_wait(struct synx_session *session, synx_native_signal_fence(synx_obj, status); } } - else - status = synx_util_get_object_status(synx_obj); + + status = synx_util_get_object_status(synx_obj); synx_cb->session = session; synx_cb->idx = idx; @@ -1375,7 +1375,6 @@ int synx_wait(struct synx_session *session, else synx_native_signal_fence(synx_obj, rc); mutex_unlock(&synx_obj->obj_lock); - goto status; } } @@ -1389,7 +1388,6 @@ int synx_wait(struct synx_session *session, goto fail; } -status: mutex_lock(&synx_obj->obj_lock); rc = synx_util_get_object_status(synx_obj); mutex_unlock(&synx_obj->obj_lock); From 2993c7c4d60d781d5d7fcd4725e8bcf94853cb15 Mon Sep 17 00:00:00 2001 From: Priyanko Sarkar Date: Mon, 10 Apr 2023 18:22:25 +0530 Subject: [PATCH 34/42] msm: debugfs v2 changes 1. Prints Global shared memory & hashtables on console. 2. Provides flexibility to select tables and columns. 3. Provides option to specify handle range. 4. Option to print details of specific handle. Change-Id: I974cb16004328aa81020c008b486b4a78167f7be Signed-off-by: Priyanko Sarkar --- msm/Makefile | 3 + msm/synx/synx_debugfs.c | 148 +++++++---- msm/synx/synx_debugfs.h | 68 ++++- msm/synx/synx_debugfs_util.c | 497 +++++++++++++++++++++++++++++++++++ msm/synx/synx_debugfs_util.h | 39 +++ msm/synx/synx_global.c | 31 ++- msm/synx/synx_global.h | 5 +- msm/synx/synx_util.c | 14 +- msm/synx/synx_util.h | 10 +- synx_modules.bzl | 1 + 10 files changed, 748 insertions(+), 68 deletions(-) create mode 100644 msm/synx/synx_debugfs_util.c create mode 100644 msm/synx/synx_debugfs_util.h diff --git a/msm/Makefile b/msm/Makefile index aff0d93920..241f5e757b 100644 --- a/msm/Makefile +++ b/msm/Makefile @@ -1,5 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only +CONFIG_DEBUGFS_SYNX = false + obj-m += synx/ipclite.o obj-m += synx-driver.o synx-driver-objs := synx/synx.o synx/synx_util.o synx/synx_debugfs.o synx/synx_global.o +synx-driver-$(CONFIG_DEBUGFS_SYNX) += synx/synx_debugfs_util.o diff --git a/msm/synx/synx_debugfs.c b/msm/synx/synx_debugfs.c index 711fa31424..afc7a97c2f 100644 --- a/msm/synx/synx_debugfs.c +++ b/msm/synx/synx_debugfs.c @@ -9,17 +9,24 @@ #include #include #include +#include #include "synx_api.h" #include "synx_debugfs.h" #include "synx_util.h" +#include "synx_global.h" +#include "synx_debugfs_util.h" -#define MAX_DBG_BUF_SIZE (36 * SYNX_MAX_OBJS) +#define MAX_DBG_BUF_SIZE (64 * SYNX_MAX_OBJS) +#ifdef ENABLE_DEBUGFS +#define MAX_HELP_BUF_SIZE (4096) +#define BUF_SIZE 16 +#endif struct dentry *my_direc; - -int synx_columns = NAME_COLUMN | ID_COLUMN | - STATE_COLUMN | GLOBAL_COLUMN; +u32 lower_handle_id = GLOBAL_HANDLE_STARTING_ID, upper_handle_id = GLOBAL_HANDLE_STARTING_ID; +long synx_columns = STATUS_COLUMN | ID_COLUMN | REF_CNT_COLUMN | + NUM_CHILD_COLUMN | SUBSCRIBERS_COLUMN | WAITERS_COLUMN | PARENTS_COLUMN | GLOBAL_SHARED_MEM; EXPORT_SYMBOL(synx_columns); int synx_debug = SYNX_ERR | SYNX_WARN | @@ -32,10 +39,10 @@ void populate_bound_rows( int j; for (j = 0; j < row->num_bound_synxs; j++) - cur += scnprintf(cur, end - cur, - "\n\tID: %d", - row->bound_synxs[j].external_desc.id); + SYNX_CONSOLE_LOG(cur, end, "\n\tID: %d", + row->bound_synxs[j].external_desc.id); } + static ssize_t synx_table_read(struct file *file, char *buf, size_t count, @@ -44,98 +51,149 @@ static ssize_t synx_table_read(struct file *file, struct synx_device *dev = file->private_data; struct error_node *err_node, *err_node_tmp; char *dbuf, *cur, *end; - int rc = SYNX_SUCCESS; ssize_t len = 0; dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL); if (!dbuf) return -ENOMEM; - /* dump client details */ cur = dbuf; end = cur + MAX_DBG_BUF_SIZE; - if (synx_columns & NAME_COLUMN) - cur += scnprintf(cur, end - cur, "| Name |"); - if (synx_columns & ID_COLUMN) - cur += scnprintf(cur, end - cur, "| ID |"); - if (synx_columns & STATE_COLUMN) - cur += scnprintf(cur, end - cur, "| Status |"); - if (synx_columns & FENCE_COLUMN) - cur += scnprintf(cur, end - cur, "| Fence |"); - if (synx_columns & COREDATA_COLUMN) - cur += scnprintf(cur, end - cur, "| Coredata |"); - if (synx_columns & GLOBAL_COLUMN) - cur += scnprintf(cur, end - cur, "| Coredata |"); - if (synx_columns & BOUND_COLUMN) - cur += scnprintf(cur, end - cur, "| Bound |"); - cur += scnprintf(cur, end - cur, "\n"); +#ifdef ENABLE_DEBUGFS + SYNX_CONSOLE_LOG(cur, end, "\n\tHandle ID start value : %d", lower_handle_id); + SYNX_CONSOLE_LOG(cur, end, "\n\tHandle ID end value : %d\n", upper_handle_id); - rc = synx_global_dump_shared_memory(); - if (rc) { - cur += scnprintf(cur, end - cur, - "Err %d: Failed to dump global shared mem\n", rc); - } - - if (synx_columns & ERROR_CODES && !list_empty( - &dev->error_list)) { - cur += scnprintf( - cur, end - cur, "\nError(s): "); + if (synx_columns & GLOBAL_HASHTABLE) + synx_debugfs_util_print_hash_table(&cur, &end, true); + if (synx_columns & LOCAL_HASHTABLE) + synx_debugfs_util_print_hash_table(&cur, &end, false); + if (synx_columns & CLIENT_HASHTABLE) + synx_debugfs_util_print_client_table(&cur, &end); + if (synx_columns & GLOBAL_SHARED_MEM) + synx_debugfs_util_print_global_shared_memory(&cur, &end); + if (synx_columns & DMA_FENCE_MAP) + synx_debugfs_util_print_dma_fence(&cur, &end); +#endif + if (synx_columns & ERROR_CODES && !list_empty(&dev->error_list)) { + SYNX_CONSOLE_LOG(cur, end, "\nError(s): "); mutex_lock(&dev->error_lock); list_for_each_entry_safe( err_node, err_node_tmp, &dev->error_list, node) { - cur += scnprintf(cur, end - cur, - "\n\tTime: %s - ID: %d - Code: %d", - err_node->timestamp, - err_node->h_synx, - err_node->error_code); + SYNX_CONSOLE_LOG(cur, end, "\n\tTime: %s - ", err_node->timestamp); + SYNX_CONSOLE_LOG(cur, end, "ID: %d - ", err_node->h_synx); + SYNX_CONSOLE_LOG(cur, end, "Code: %d", err_node->error_code); list_del(&err_node->node); kfree(err_node); } mutex_unlock(&dev->error_lock); } - len = simple_read_from_buffer(buf, count, ppos, dbuf, cur - dbuf); kfree(dbuf); return len; } +#ifdef ENABLE_DEBUGFS static ssize_t synx_table_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - return 0; + u32 stat = -1; + u32 i = 0, base = 10, num = 0; + bool invalid_val = false; + char *kbuffer = kzalloc(BUF_SIZE, GFP_KERNEL); + + if (!kbuffer) + return -ENOMEM; + stat = copy_from_user(kbuffer, buf, BUF_SIZE); + if (stat != 0) { + kfree(kbuffer); + return -EFAULT; + } + if (kbuffer[i] == '0' && (kbuffer[i+1] == 'x' || kbuffer[i+1] == 'X')) { + base = 16; + i += 2; + } + for ( ; (i < BUF_SIZE / 2 && kbuffer[i] != '-' && kbuffer[i] != '\n'); i++) + SYNX_READ_CHAR(kbuffer, num, base, i); + if (!invalid_val) + lower_handle_id = num; + + if (kbuffer[i] == '-') { + num = 0; + i++; + for ( ; i < BUF_SIZE && kbuffer[i] != '\n'; i++) + SYNX_READ_CHAR(kbuffer, num, base, i); + if (!invalid_val) + upper_handle_id = num; + } else if (kbuffer[i] == '\n') + upper_handle_id = lower_handle_id; + kfree(kbuffer); + + return count; } +#endif static const struct file_operations synx_table_fops = { .owner = THIS_MODULE, .read = synx_table_read, +#ifdef ENABLE_DEBUGFS .write = synx_table_write, +#endif .open = simple_open, }; +#ifdef ENABLE_DEBUGFS +static ssize_t synx_help_read(struct file *file, + char *buf, + size_t count, + loff_t *ppos) +{ + char *dbuf, *cur, *end; + ssize_t len = 0; + + dbuf = kzalloc(MAX_HELP_BUF_SIZE, GFP_KERNEL); + if (!dbuf) + return -ENOMEM; + + cur = dbuf; + end = cur + MAX_HELP_BUF_SIZE; + synx_debugfs_util_load_help_content(&cur, &end); + len = simple_read_from_buffer(buf, count, ppos, dbuf, cur - dbuf); + kfree(dbuf); + return len; +} +static const struct file_operations synx_help_fops = { + .owner = THIS_MODULE, + .read = synx_help_read, +}; +#endif struct dentry *synx_init_debugfs_dir(struct synx_device *dev) { struct dentry *dir = NULL; - dir = debugfs_create_dir("synx_debug", NULL); if (!dir) { dprintk(SYNX_ERR, "Failed to create debugfs for synx\n"); return NULL; } - debugfs_create_u32("debug_level", 0644, dir, &synx_debug); - debugfs_create_u32("column_level", 0644, dir, &synx_columns); + debugfs_create_ulong("column_level", 0644, dir, &synx_columns); if (!debugfs_create_file("synx_table", 0644, dir, dev, &synx_table_fops)) { dprintk(SYNX_ERR, "Failed to create debugfs file for synx\n"); return NULL; } - +#ifdef ENABLE_DEBUGFS + if (!debugfs_create_file("help", + 0444, dir, dev, &synx_help_fops)) { + dprintk(SYNX_ERR, "Failed to create debugfs help file for synx\n"); + return NULL; + } +#endif return dir; } diff --git a/msm/synx/synx_debugfs.h b/msm/synx/synx_debugfs.h index 0692a89281..d11aa02c7e 100644 --- a/msm/synx/synx_debugfs.h +++ b/msm/synx/synx_debugfs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SYNX_DEBUGFS_H__ @@ -11,6 +11,8 @@ #include #include "synx_private.h" +//#define ENABLE_DEBUGFS +#define STATE_NAME_SPACE (4) enum synx_debug_level { SYNX_ERR = 0x0001, @@ -26,14 +28,30 @@ enum synx_debug_level { }; enum synx_columns_level { - NAME_COLUMN = 0x0001, - ID_COLUMN = 0x0002, - BOUND_COLUMN = 0x0004, - STATE_COLUMN = 0x0008, - FENCE_COLUMN = 0x0010, - COREDATA_COLUMN = 0x0020, - GLOBAL_COLUMN = 0x0040, - ERROR_CODES = 0x8000, + NAME_COLUMN = 0x00000001, + ID_COLUMN = 0x00000002, + BOUND_COLUMN = 0x00000004, + STATUS_COLUMN = 0x00000008, + FENCE_COLUMN = 0x00000010, + COREDATA_COLUMN = 0x00000020, + GLOBAL_IDX_COLUMN = 0x00000040, + REL_CNT_COLUMN = 0x00000080, + MAP_CNT_COLUMN = 0x00000100, + REF_CNT_COLUMN = 0x00000200, + NUM_CHILD_COLUMN = 0x00000400, + SUBSCRIBERS_COLUMN = 0x00000800, + WAITERS_COLUMN = 0x00001000, + PARENTS_COLUMN = 0x00002000, + CLIENT_ID_COLUMN = 0x00004000, + + LOCAL_HASHTABLE = 0x00010000, + GLOBAL_HASHTABLE = 0x00020000, + CLIENT_HASHTABLE = 0x00040000, + GLOBAL_SHARED_MEM = 0x00080000, + DMA_FENCE_MAP = 0x00100000, + CSL_FENCE_MAP = 0x00200000, + + ERROR_CODES = 0x00008000, }; #ifndef SYNX_DBG_LABEL @@ -43,6 +61,8 @@ enum synx_columns_level { #define SYNX_DBG_TAG SYNX_DBG_LABEL ": %4s: " extern int synx_debug; +extern u32 lower_handle_id, upper_handle_id; +extern long synx_columns; static inline char *synx_debug_str(int level) { @@ -77,6 +97,36 @@ static inline char *synx_debug_str(int level) } \ } while (0) +#define SYNX_CONSOLE_LOG(__cur, __end, \ + __fmt_string, arg...) \ + do { \ + if ((__end - __cur) * (sizeof(char *)) \ + - strlen(__fmt_string) <= STATE_NAME_SPACE) \ + dprintk(SYNX_DBG, __fmt_string, ## arg); \ + else \ + __cur += scnprintf(__cur, __end - __cur, \ + __fmt_string, ## arg); \ + } while (0) + +#define SYNX_READ_CHAR(__buf, __num, \ + __base, __pos) \ + do { \ + if (__buf[__pos] >= '0' && \ + __buf[__pos] <= '9') \ + __num = __num * __base + \ + (__buf[__pos] - '0'); \ + else if (__buf[__pos] >= 'a' && \ + __buf[__pos] <= 'f') \ + __num = __num * __base + \ + (__buf[__pos] - 'a' + 10); \ + else if (__buf[__pos] >= 'A' && \ + __buf[__pos] <= 'F') \ + __num = __num * __base + \ + (__buf[__pos] - 'A' + 10); \ + else \ + invalid_val = true; \ + } while (0) + /** * synx_init_debugfs_dir - Initializes debugfs * diff --git a/msm/synx/synx_debugfs_util.c b/msm/synx/synx_debugfs_util.c new file mode 100644 index 0000000000..9b21d29fc4 --- /dev/null +++ b/msm/synx/synx_debugfs_util.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "synx_debugfs.h" +#include "synx_debugfs_util.h" +#include "synx_util.h" +#include "synx_private.h" +#include "synx_global.h" + +#define MAX_CUSTOM_STATUS ((1UL << 32) - 1) + +char *synx_debugfs_util_get_state_name(u32 status) +{ + char *state; + + if (status == 0) + state = "INV"; + else if (status == 1) + state = "ACT"; + else if (status == 2) + state = "SUC"; + else if (status == 3) + state = "ERR"; + else if (status == 4) + state = "CAN"; + else if (status == 5) + state = "EXT"; + else if (status == 6) + state = "SSR"; + else if (status > 64 && status <= MAX_CUSTOM_STATUS) + state = "CUS"; + else + state = "???"; + + return state; +} + +static int synx_debugfs_util_get_client_data(struct synx_client *client) +{ + if (IS_ERR_OR_NULL(client)) + return -SYNX_NOENT; + kref_get(&client->refcount); + return SYNX_SUCCESS; +} + +static void synx_debugfs_util_put_client_data(struct synx_client *client) +{ + if (!IS_ERR_OR_NULL(client)) + kref_put(&client->refcount, synx_client_destroy); +} + +static int synx_debugfs_util_get_handle(struct synx_handle_coredata *handle_coredata) +{ + if (IS_ERR_OR_NULL(handle_coredata)) + return -SYNX_NOENT; + kref_get(&handle_coredata->refcount); + return SYNX_SUCCESS; +} + +static void synx_debugfs_util_put_handle(struct synx_handle_coredata *handle_coredata) +{ + if (!IS_ERR_OR_NULL(handle_coredata)) + kref_put(&handle_coredata->refcount, synx_util_destroy_handle); +} + +static int synx_debugfs_util_get_CSL_fence_entry(struct synx_entry_64 *entry) +{ + if (IS_ERR_OR_NULL(entry)) + return -SYNX_NOENT; + kref_get(&entry->refcount); + return SYNX_SUCCESS; +} + +static void synx_debugfs_util_put_CSL_fence_entry(struct synx_entry_64 *entry) +{ + if (!IS_ERR_OR_NULL(entry)) + kref_put(&entry->refcount, synx_util_destroy_data); +} + +bool synx_debugfs_util_is_valid_global_shared_memory_entry(struct synx_global_coredata *entry, + u32 idx) +{ + int i; + + if (!entry || entry->handle != idx) + return false; + if (entry->status || entry->handle || entry->refcount || + entry->num_child || entry->subscribers || entry->waiters) + return true; + + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + if (entry->parents[i]) + return true; + } + return false; +} + +static bool synx_debugfs_util_is_valid_dma_handle_range(struct synx_fence_entry *fence_entry) +{ + if ((fence_entry->g_handle >= lower_handle_id && + fence_entry->g_handle <= upper_handle_id) || + (fence_entry->l_handle >= lower_handle_id && + fence_entry->l_handle <= upper_handle_id)) + return true; + return false; +} + +static void synx_debugfs_util_print_map_column_values(char **cur, + char **end, + struct synx_map_entry *entry) +{ + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%s", synx_debugfs_util_get_state_name + (synx_util_get_object_status(entry->synx_obj))); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t %x", entry->key); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t %d", kref_read(&entry->refcount)); + if (synx_columns & BOUND_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t %d", entry->synx_obj->num_bound_synxs); + if (synx_columns & GLOBAL_IDX_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t %d", entry->synx_obj->global_idx); + if (synx_columns & MAP_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t %d", entry->synx_obj->map_count); + SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------\n"); +} + +void synx_debugfs_util_print_hash_table(char **cur, + char **end, + bool is_global) +{ + struct synx_map_entry *map_entry = NULL; + struct synx_coredata *synx_obj = NULL; + u32 key; + + if (is_global) + SYNX_CONSOLE_LOG(*cur, *end, + "\n\t-------------GLOBAL MAP TABLE------------\n"); + else + SYNX_CONSOLE_LOG(*cur, *end, + "\n\t-------------LOCAL MAP TABLE------------\n"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\t\t"); + + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |"); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |"); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |"); + if (synx_columns & BOUND_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| NUM BOUND |"); + if (synx_columns & GLOBAL_IDX_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| GLOBAL INDEX |"); + if (synx_columns & MAP_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| MAP CNT |"); + SYNX_CONSOLE_LOG(*cur, *end, "\n"); + + for (key = lower_handle_id; key <= upper_handle_id; key++) { + map_entry = synx_util_get_map_entry(key); + if (IS_ERR_OR_NULL(map_entry) || IS_ERR_OR_NULL(map_entry->synx_obj) || + (is_global ^ synx_util_is_global_handle(key))) { + synx_util_release_map_entry(map_entry); + continue; + } + synx_obj = map_entry->synx_obj; + synx_util_get_object(synx_obj); + mutex_lock(&synx_obj->obj_lock); + synx_debugfs_util_print_map_column_values(cur, end, map_entry); + mutex_unlock(&synx_obj->obj_lock); + synx_util_put_object(synx_obj); + synx_util_release_map_entry(map_entry); + } +} + +void synx_debugfs_util_print_dma_fence(char **cur, + char **end) +{ + struct synx_fence_entry *curr = NULL; + struct hlist_node *tmp; + struct dma_fence *fence_entry = NULL; + u32 map_itr; + + SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------DMA FENCE MAP TABLE------------\n"); + + if (synx_columns & FENCE_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| DMA FENCE |"); + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |"); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |"); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |"); + SYNX_CONSOLE_LOG(*cur, *end, "\n"); + + spin_lock_bh(&synx_dev->native->fence_map_lock); + hash_for_each_safe(synx_dev->native->fence_map, map_itr, tmp, curr, node) { + if (IS_ERR_OR_NULL(curr)) + continue; + fence_entry = (struct dma_fence *)curr->key; + dma_fence_get(fence_entry); + if (synx_debugfs_util_is_valid_dma_handle_range(curr)) { + if (synx_columns & FENCE_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t%p", fence_entry); + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%s", + synx_debugfs_util_get_state_name + (__fence_state(fence_entry, false))); + if (synx_columns & ID_COLUMN) { + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr->g_handle); + SYNX_CONSOLE_LOG(*cur, *end, "\t%d", curr->l_handle); + } + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", + kref_read(&(fence_entry)->refcount)); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\t-------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, + "-----------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------\n"); + } + dma_fence_put(fence_entry); + } + spin_unlock_bh(&synx_dev->native->fence_map_lock); +} + +void synx_debugfs_util_print_csl_fence(char **cur, + char **end) +{ + u32 itr, rc = SYNX_SUCCESS; + struct synx_entry_64 *curr = NULL; + struct hlist_node *tmp; + struct synx_map_entry *map_entry = NULL; + + SYNX_CONSOLE_LOG(*cur, *end, "\n\t------------- CSL FENCE MAP TABLE------------\n"); + + if (synx_columns & FENCE_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| CSL FENCE |"); + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |"); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |"); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |"); + SYNX_CONSOLE_LOG(*cur, *end, "\n"); + + spin_lock_bh(&synx_dev->native->csl_map_lock); + hash_for_each_safe(synx_dev->native->csl_fence_map, itr, tmp, curr, node) { + rc = synx_debugfs_util_get_CSL_fence_entry(curr); + if (rc) { + spin_unlock_bh(&synx_dev->native->csl_map_lock); + return; + } + if (curr->data[0] >= lower_handle_id && curr->data[0] <= upper_handle_id) { + if (synx_columns & FENCE_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "%p", curr->key); + if (synx_columns & STATUS_COLUMN) { + map_entry = synx_util_get_map_entry(curr->data[0]); + if (!IS_ERR_OR_NULL(map_entry) && + !IS_ERR_OR_NULL(map_entry->synx_obj)) { + SYNX_CONSOLE_LOG(*cur, *end, "\t%s", + synx_debugfs_util_get_state_name + (synx_util_get_object_status(map_entry->synx_obj))); + synx_util_release_map_entry(map_entry); + } + } //TODO : Update status field of CSL Fence with updated structure + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr->data[0]); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t%d", kref_read(&curr->refcount)); + SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------\n"); + } + synx_debugfs_util_put_CSL_fence_entry(curr); + } + spin_unlock_bh(&synx_dev->native->csl_map_lock); +} + +void synx_debugfs_util_print_global_shared_memory(char **cur, + char **end) +{ + struct synx_global_coredata synx_global_entry; + u32 i, idx; + + /* Column heading set up*/ + SYNX_CONSOLE_LOG(*cur, *end, + "\n\t ------------- GLOBAL SHARED MEMORY ------------\n\t"); + + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| STATUS |"); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE |"); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| REF CNT |"); + if (synx_columns & NUM_CHILD_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| NUM CHILD |"); + if (synx_columns & SUBSCRIBERS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| SUBSCRIBERS |"); + if (synx_columns & WAITERS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| WAITERS |"); + if (synx_columns & PARENTS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| PARENTS |"); + SYNX_CONSOLE_LOG(*cur, *end, "\n"); + + for (idx = lower_handle_id ; idx <= upper_handle_id ; idx++) { + if (!synx_fetch_global_shared_memory_handle_details(idx, &synx_global_entry) || + !synx_debugfs_util_is_valid_global_shared_memory_entry(&synx_global_entry, idx)) + continue; + if (synx_columns & STATUS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t %s", + synx_debugfs_util_get_state_name(synx_global_entry.status)); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%x", synx_global_entry.handle); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.refcount); + if (synx_columns & NUM_CHILD_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.num_child); + if (synx_columns & SUBSCRIBERS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t%d", synx_global_entry.subscribers); + if (synx_columns & WAITERS_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.waiters); + if (synx_columns & PARENTS_COLUMN) { + for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) { + if (synx_global_entry.parents[i]) + SYNX_CONSOLE_LOG(*cur, *end, " %2u", + synx_global_entry.parents[i]); + } + } + SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------\n"); + } +} + +void synx_debugfs_util_print_client_table(char **cur, + char **end) +{ + u32 rc = SYNX_SUCCESS; + struct synx_client *curr; + struct hlist_node *tmp; + struct hlist_node *tmp2; + struct synx_handle_coredata *curr2 = NULL; + u32 client_map_itr, handle_map_itr; + + SYNX_CONSOLE_LOG(*cur, *end, "\n\t ------------- CLIENT MAP TABLE------------\n"); + if (synx_columns & CLIENT_ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| CLIENT ID |"); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "|CLIENT REF COUNT|"); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE ID |"); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "|REF COUNT|"); + if (synx_columns & REL_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "|REL COUNT|"); + SYNX_CONSOLE_LOG(*cur, *end, "\n"); + spin_lock_bh(&synx_dev->native->metadata_map_lock); + hash_for_each_safe(synx_dev->native->client_metadata_map, + client_map_itr, tmp, curr, node) { + rc = synx_debugfs_util_get_client_data(curr); + if (rc) + goto bail; + spin_lock_bh(&curr->handle_map_lock); + hash_for_each_safe(curr->handle_map, + handle_map_itr, tmp2, curr2, node) { + rc = synx_debugfs_util_get_handle(curr2); + if (rc) { + spin_unlock_bh(&curr->handle_map_lock); + synx_debugfs_util_put_client_data(curr); + goto bail; + } + if (curr2->key >= lower_handle_id && curr2->key <= upper_handle_id) { + if (synx_columns & CLIENT_ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t%u", curr->id); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", + kref_read(&curr->refcount)); + if (synx_columns & ID_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t%d", curr2->key); + if (synx_columns & REF_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", + kref_read(&curr2->refcount)); + if (synx_columns & REL_CNT_COLUMN) + SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr2->rel_count); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\t-------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, + "-----------------------------------------"); + SYNX_CONSOLE_LOG(*cur, *end, "-----------\n"); + } + synx_debugfs_util_put_handle(curr2); + } + spin_unlock_bh(&curr->handle_map_lock); + synx_debugfs_util_put_client_data(curr); + } +bail: + spin_unlock_bh(&synx_dev->native->metadata_map_lock); +} + +void synx_debugfs_util_load_help_content(char **cur, + char **end) +{ + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tSynx tables Supported for debugfs with the column names:"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tGLOBAL/LOCAL MAP COLUMNS : STATUS, ID, REF_CNT, BOUND,"); + SYNX_CONSOLE_LOG(*cur, *end, "\tGLOBAL INDEX, MAP CNT\n"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tGLOBAL SHARED MEMORY COLUMNS : STATUS, ID,"); + SYNX_CONSOLE_LOG(*cur, *end, + "REF_CNT, NUM_CHILD, \tSUBSCRIBERS, WAITERS, PARENTS"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tCLIENT MAP COLUMNS : CLIENT_ID, REF_CNT, REL_CNT, ID"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tDMA FENCE COLUMNS: STATUS, ID, REF_CNT, DMA FENCE"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tINSTRUCTIONS TO BE FOLLOWED:"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tTO PRINT CHOOSE THE COLUMNS ACCORDINGLY AND ADD UP THE"); + SYNX_CONSOLE_LOG(*cur, *end, + "\tHEXADECIMAL VALUES & PASS THE ADDED UP VALUES FOR COLUMN ALONG"); + SYNX_CONSOLE_LOG(*cur, *end, "WITH TABLE SELECTION VALUE AS SHOWN BELOW:"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tSet Below Values for Column selection\n"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tNAME_COLUMN = 0x0001"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tID_COLUMN = 0x0002"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tBOUND_COLUMN = 0x0004"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tSTATUS_COLUMN = 0x0008"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tFENCE_COLUMN = 0x0010"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tCOREDATA_COLUMN = 0x0020"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_IDX_COLUMN = 0x0040"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tREL_CNT_COLUMN = 0x0080"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tMAP_CNT_COLUMN = 0x0100"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tREF_CNT_COLUMN = 0x0200"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tNUM_CHILD_COLUMN = 0x0400"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tSUBSCRIBERS_COLUMN= 0x0800"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tWAITERS_COLUMN = 0x1000"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tPARENTS_COLUMN = 0x2000"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tCLIENT_ID_COLUMN = 0x4000"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tSet Below Values for Table selection\n"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tLOCAL_HASHTABLE = 0x00010000"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_HASHTABLE = 0x00020000"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tCLIENT_HASHTABLE = 0x00040000"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_SHARED_MEM = 0x00080000"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tDMA_FENCE_MAP = 0x00100000\n"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tExample : To select Global map & all its columns :"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\t echo 0x2034E>column_level"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\t Last four digits in hexadecimal flag"); + SYNX_CONSOLE_LOG(*cur, *end, " is dedicated for setting columns,"); + SYNX_CONSOLE_LOG(*cur, *end, + "\tuser can even set \"FFFF\" to set all columns"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\t Instead of passing 0x2034E, \tuser can even pass"); + SYNX_CONSOLE_LOG(*cur, *end, " 0x2FFFF to fetch the same"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tUser can set Handle Range with echo command as shown below\n"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\techo 1048577-1048580>synx_table"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tFor single handle : echo \"1048577\">synx_table"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tHandle range can be set in hexadecimal values as shown below:"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\techo 0x100001-10000f>synx_table"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tSingle handle : echo 0x100001>synx_table"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\n\tTo print info on console : cat synx_table"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tHandle states :"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tACT : SYNX_STATE_ACTIVE"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tINV : SYNX_STATE_INVALID"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tERR : SYNX_STATE_SIGNALED_ERROR"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tSUC : SYNX_STATE_SIGNALED_SUCCESS"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tCAN : SYNX_STATE_SIGNALED_CANCELLED"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tEXT : SYNX_STATE_SIGNALED_EXTERNAL"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tSSR : SYNX_STATE_SIGNALED_SSR\n"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tCUS : CUSTOM SIGNAL"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\t??? : UNKNOWN / UNDEFINED"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tAdditional information:"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tNo need to set handle ID range and column or table selection"); + SYNX_CONSOLE_LOG(*cur, *end, "\tvalues again if once it is already set"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tSimply using cat synx_table command user can print the data"); + SYNX_CONSOLE_LOG(*cur, *end, "\tfor same table with same set of columns"); + SYNX_CONSOLE_LOG(*cur, *end, "\n\tTo print all tables and all"); + SYNX_CONSOLE_LOG(*cur, *end, "columns set column level value to 0x1fffff"); + SYNX_CONSOLE_LOG(*cur, *end, + "\n\tCurrently we do not support CSL fence\n\n"); +} diff --git a/msm/synx/synx_debugfs_util.h b/msm/synx/synx_debugfs_util.h new file mode 100644 index 0000000000..4e47504a47 --- /dev/null +++ b/msm/synx/synx_debugfs_util.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SYNX_DEBUGFS_UTIL_H__ +#define __SYNX_DEBUGFS_UTIL_H__ + +#include "synx_api.h" +#include "synx_private.h" + +#define GLOBAL_HANDLE_STARTING_ID (1048577) + +/* DMA FENCE print function */ +void synx_debugfs_util_print_dma_fence(char **cur, char **end); + +/* CSL FENCE print function */ +void synx_debugfs_util_print_csl_fence(char **cur, char **end); + +/* GLOBAL & LOCAL MAP print function */ +void synx_debugfs_util_print_hash_table(char **cur, char **end, bool flag); + +/* GLOBAL SHARED MEMORY print function */ +void synx_debugfs_util_print_global_shared_memory(char **cur, char **end); + +/* CLIENT MAP print function */ +void synx_debugfs_util_print_client_table(char **cur, char **end); + +/* Function to get SYNX State Name */ +char *synx_debugfs_util_get_state_name(u32 status); + +/* Function for loading content of the help option for debugfs v2 */ +void synx_debugfs_util_load_help_content(char **cur, char **end); + +/* Function to check entry of the global shared memory is valid or not */ +bool synx_debugfs_util_is_valid_global_shared_memory_entry(struct synx_global_coredata *entry, +u32 idx); + +#endif /* __SYNX_DEBUGFS_UTIL_H__ */ diff --git a/msm/synx/synx_global.c b/msm/synx/synx_global.c index 7a10a3d653..4ead16965b 100644 --- a/msm/synx/synx_global.c +++ b/msm/synx/synx_global.c @@ -5,7 +5,6 @@ #include #include - #include "synx_debugfs.h" #include "synx_global.h" @@ -78,6 +77,33 @@ static void synx_global_print_data( func, i, synx_g_obj->parents[i]); } +bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle, + struct synx_global_coredata *synx_global_entry) +{ + int rc = SYNX_SUCCESS; + u32 idx; + unsigned long flags; + struct synx_global_coredata *entry; + + if (!synx_gmem.table) { + dprintk(SYNX_VERB, "synx_gmem is NULL\n"); + return false; + } + idx = synx_handle & SYNX_HANDLE_INDEX_MASK; + if (!synx_is_valid_idx(idx)) + return false; + rc = synx_gmem_lock(idx, &flags); + if (rc) { + dprintk(SYNX_VERB, "Failed to lock entry %d\n", idx); + return false; + } + entry = &synx_gmem.table[idx]; + memcpy(synx_global_entry, entry, sizeof(struct synx_global_coredata)); + synx_gmem_unlock(idx, &flags); + + return true; +} + int synx_global_dump_shared_memory(void) { int rc = SYNX_SUCCESS, idx; @@ -805,14 +831,13 @@ int synx_global_recover(enum synx_core_id core_id) const u32 size = SYNX_GLOBAL_MAX_OBJS; unsigned long flags; struct synx_global_coredata *synx_g_obj; - bool update; int *clear_idx = NULL; + if (!synx_gmem.table) return -SYNX_NOMEM; clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL); - if (!clear_idx) return -SYNX_NOMEM; diff --git a/msm/synx/synx_global.h b/msm/synx/synx_global.h index 4f1b7edc2a..dcf5d10e9e 100644 --- a/msm/synx/synx_global.h +++ b/msm/synx/synx_global.h @@ -10,7 +10,6 @@ #include "ipclite_client.h" #include - /** * enum synx_core_id - Synx core IDs * @@ -299,4 +298,8 @@ int synx_global_dump_shared_memory(void); */ int synx_global_fetch_handle_details(u32 idx, u32 *h_synx); +/* Function to fetch global shared memory entry */ +bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle, + struct synx_global_coredata *synx_global_entry); + #endif /* __SYNX_SHARED_MEM_H__ */ diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 86d3e593da..65bea3a83c 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -10,9 +10,9 @@ #include "synx_debugfs.h" #include "synx_util.h" +#include "synx_private.h" extern void synx_external_callback(s32 sync_obj, int status, void *data); -static u32 __fence_state(struct dma_fence *fence, bool locked); int synx_util_init_coredata(struct synx_coredata *synx_obj, struct synx_create_params *params, @@ -224,7 +224,7 @@ int synx_util_init_group_coredata(struct synx_coredata *synx_obj, return rc; } -static void synx_util_destroy_coredata(struct kref *kref) +void synx_util_destroy_coredata(struct kref *kref) { int rc; struct synx_coredata *synx_obj = @@ -697,7 +697,7 @@ error: return -SYNX_INVALID; } -static u32 __fence_state(struct dma_fence *fence, bool locked) +u32 __fence_state(struct dma_fence *fence, bool locked) { s32 status; u32 state = SYNX_STATE_INVALID; @@ -1003,7 +1003,7 @@ static void synx_util_destroy_map_entry_worker( kfree(map_entry); } -static void synx_util_destroy_map_entry(struct kref *kref) +void synx_util_destroy_map_entry(struct kref *kref) { struct synx_map_entry *map_entry = container_of(kref, struct synx_map_entry, refcount); @@ -1046,7 +1046,7 @@ static void synx_util_destroy_handle_worker( kfree(synx_data); } -static void synx_util_destroy_handle(struct kref *kref) +void synx_util_destroy_handle(struct kref *kref) { struct synx_handle_coredata *synx_data = container_of(kref, struct synx_handle_coredata, @@ -1442,7 +1442,7 @@ static void synx_client_cleanup(struct work_struct *dispatch) vfree(client); } -static void synx_client_destroy(struct kref *kref) +void synx_client_destroy(struct kref *kref) { struct synx_client *client = container_of(kref, struct synx_client, refcount); @@ -1571,7 +1571,7 @@ struct synx_entry_64 *synx_util_retrieve_data(void *fence, return entry; } -static void synx_util_destroy_data(struct kref *kref) +void synx_util_destroy_data(struct kref *kref) { struct synx_entry_64 *entry = container_of(kref, struct synx_entry_64, refcount); diff --git a/msm/synx/synx_util.h b/msm/synx/synx_util.h index 95f54c4bb2..60d20dae13 100644 --- a/msm/synx/synx_util.h +++ b/msm/synx/synx_util.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SYNX_UTIL_H__ @@ -11,7 +11,8 @@ #include "synx_private.h" extern struct synx_device *synx_dev; - +u32 __fence_state(struct dma_fence *fence, bool locked); +void synx_util_destroy_coredata(struct kref *kref); extern void synx_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb); extern int synx_native_signal_fence(struct synx_coredata *synx_obj, @@ -99,7 +100,7 @@ struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj, u32 h_synx, u32 flags); struct synx_map_entry *synx_util_get_map_entry(u32 h_synx); void synx_util_release_map_entry(struct synx_map_entry *map_entry); - +void synx_util_destroy_map_entry(struct kref *kref); /* fence map functions */ int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx, u32 global); @@ -157,6 +158,8 @@ struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client void synx_util_release_handle(struct synx_handle_coredata *synx_data); int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id, u32 type, struct synx_handle_coredata **handle); +void synx_client_destroy(struct kref *kref); +void synx_util_destroy_handle(struct kref *kref); /* client memory handler functions */ struct synx_client *synx_get_client(struct synx_session *session); @@ -172,6 +175,7 @@ struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type); void synx_util_remove_data(void *fence, u32 type); /* misc */ +void synx_util_destroy_data(struct kref *kref); void synx_util_map_import_params_to_create( struct synx_import_indv_params *params, struct synx_create_params *c_params); diff --git a/synx_modules.bzl b/synx_modules.bzl index d6ae761a25..cf945d1afe 100644 --- a/synx_modules.bzl +++ b/synx_modules.bzl @@ -13,6 +13,7 @@ register_synx_module( "synx/synx_global.c", "synx/synx_util.c", "synx/synx_debugfs.c", + "synx/synx_debugfs_util.c", ], ) From d5bed2898aa514562a4fb6a5a0ae2732833a54f9 Mon Sep 17 00:00:00 2001 From: Amir Suhail Date: Wed, 6 Dec 2023 17:49:19 +0530 Subject: [PATCH 35/42] msm: synx: Fixing import of dma-fence checks 1. invalid values is being passed to fence variable because of improper position of DMA_FENCE flag check. 2. added dma_fence_put for the reference taken by sync_file_get_fence. Change-Id: I6f2a8f2041149bd3553967aee5ee3e1626993adf Signed-off-by: Amir Suhail --- msm/synx/synx.c | 26 +++++++++++++++++++------- msm/synx/synx_util.c | 2 +- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 2c6b627f7e..097a2f44a6 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -2009,6 +2009,7 @@ static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl, { struct synx_import_info import_info; struct synx_import_params params = {0}; + int result = SYNX_SUCCESS; if (k_ioctl->size != sizeof(import_info)) return -SYNX_INVALID; @@ -2018,28 +2019,32 @@ static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl, k_ioctl->size)) return -EFAULT; - if (import_info.flags & SYNX_IMPORT_SYNX_FENCE) - params.indv.fence = &import_info.synx_obj; - else if (import_info.flags & SYNX_IMPORT_DMA_FENCE) + if (import_info.flags & SYNX_IMPORT_DMA_FENCE) params.indv.fence = sync_file_get_fence(import_info.desc.id[0]); + else if (import_info.flags & SYNX_IMPORT_SYNX_FENCE) + params.indv.fence = &import_info.synx_obj; params.type = SYNX_IMPORT_INDV_PARAMS; params.indv.flags = import_info.flags; params.indv.new_h_synx = &import_info.new_synx_obj; if (synx_import(session, ¶ms)) - return -SYNX_INVALID; + result = -SYNX_INVALID; + // Fence needs to be put irresepctive of import status if (import_info.flags & SYNX_IMPORT_DMA_FENCE) dma_fence_put(params.indv.fence); + if (result != SYNX_SUCCESS) + return result; + if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr), &import_info, k_ioctl->size)) return -EFAULT; - return SYNX_SUCCESS; + return result; } static int synx_handle_import_arr( @@ -2082,12 +2087,19 @@ static int synx_handle_import_arr( while (idx < arr_info.num_objs) { params.new_h_synx = &arr[idx].new_synx_obj; params.flags = arr[idx].flags; - if (arr[idx].flags & SYNX_IMPORT_SYNX_FENCE) - params.fence = &arr[idx].synx_obj; + if (arr[idx].flags & SYNX_IMPORT_DMA_FENCE) params.fence = sync_file_get_fence(arr[idx].desc.id[0]); + else if (arr[idx].flags & SYNX_IMPORT_SYNX_FENCE) + params.fence = &arr[idx].synx_obj; + rc = synx_native_import_indv(client, ¶ms); + + // Fence needs to be put irresepctive of import status + if (arr[idx].flags & SYNX_IMPORT_DMA_FENCE) + dma_fence_put(params.fence); + if (rc != SYNX_SUCCESS) break; idx++; diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 65bea3a83c..628dd91db5 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -53,7 +53,7 @@ int synx_util_init_coredata(struct synx_coredata *synx_obj, strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name)); if (params->flags & SYNX_CREATE_DMA_FENCE) { - fence = params->fence; + fence = (struct dma_fence *)params->fence; if (IS_ERR_OR_NULL(fence)) { dprintk(SYNX_ERR, "invalid external fence\n"); goto free; From 8b3cd55815e526f2ddbcc01bd7d05543c94da643 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Thu, 18 Jan 2024 18:16:54 -0800 Subject: [PATCH 36/42] msm: synx: Fix to prevent stale addresses after synx_init failure Adding cdev_del() to delete the synx cdev which is not deleted during synx_init failure condition. Change-Id: I9990ff36a500453cd972c45f06fb57e8a0143a1d Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index 097a2f44a6..d55a791c46 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -2827,6 +2827,11 @@ static int __init synx_init(void) } synx_dev->class = class_create(THIS_MODULE, SYNX_DEVICE_NAME); + + if (IS_ERR(synx_dev->class)) { + rc = PTR_ERR(synx_dev->class); + goto err_class_create; + } device_create(synx_dev->class, NULL, synx_dev->dev, NULL, SYNX_DEVICE_NAME); @@ -2877,6 +2882,8 @@ err: fail: device_destroy(synx_dev->class, synx_dev->dev); class_destroy(synx_dev->class); +err_class_create: + cdev_del(&synx_dev->cdev); reg_fail: unregister_chrdev_region(synx_dev->dev, 1); alloc_fail: From a2795fd6f59324a7fbcff455b82b8cdc54d27e1a Mon Sep 17 00:00:00 2001 From: Amir Suhail Date: Tue, 23 Jan 2024 18:05:51 +0530 Subject: [PATCH 37/42] msm: synx: Fixing cleanup of merge synx and dma fences 1. dma fence framework takes care of putting the child dma fence reference when the array fence is being destroyed. Synx shouldn't put the child dma fence reference taken for dma fence array. 2. When destroying the child dma fence we have to signal the fence if synx has taken the last reference. If there are more than 1 reference on dma fence, then check if the other references are from merge fences instead of any client taking direct reference on the dma fence. Change-Id: I8b0879b8c1d2401cdd08f85ae330b74af99a2dad Signed-off-by: Amir Suhail --- msm/synx/synx.c | 49 +++++++++++++++++++++++++++++++------------- msm/synx/synx_util.c | 49 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 75 insertions(+), 23 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index d55a791c46..b32bfd7e66 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -1176,6 +1176,12 @@ int synx_merge(struct synx_session *session, if (IS_ERR_OR_NULL(client)) return -SYNX_INVALID; + synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_obj)) { + rc = -SYNX_NOMEM; + goto fail; + } + rc = synx_util_validate_merge(client, params->h_synxs, params->num_objs, &fences, &count); if (rc < 0) { @@ -1183,12 +1189,8 @@ int synx_merge(struct synx_session *session, "[sess :%llu] merge validation failed\n", client->id); rc = -SYNX_INVALID; - goto fail; - } - synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL); - if (IS_ERR_OR_NULL(synx_obj)) { - rc = -SYNX_NOMEM; + kfree(synx_obj); goto fail; } @@ -1205,12 +1207,20 @@ int synx_merge(struct synx_session *session, *params->h_merged_obj, 0); if (IS_ERR_OR_NULL(map_entry)) { rc = PTR_ERR(map_entry); - goto clean_up; + + /* + * dma fence put will take care of removing the references taken + * on child fences + */ + dma_fence_put(synx_obj->fence); + kfree(synx_obj); + goto fail; } rc = synx_util_add_callback(synx_obj, *params->h_merged_obj); + if (rc != SYNX_SUCCESS) - goto clear; + goto clean_up; rc = synx_util_init_handle(client, synx_obj, params->h_merged_obj, map_entry); @@ -1218,8 +1228,7 @@ int synx_merge(struct synx_session *session, dprintk(SYNX_ERR, "[sess :%llu] unable to init merge handle %u\n", client->id, *params->h_merged_obj); - dma_fence_put(synx_obj->fence); - goto clear; + goto clean_up; } h_child_list = kzalloc(count*4, GFP_KERNEL); @@ -1289,13 +1298,25 @@ int synx_merge(struct synx_session *session, synx_put_client(client); return SYNX_SUCCESS; clear: - synx_util_release_map_entry(map_entry); + synx_native_release_core(client, (*params->h_merged_obj)); + synx_put_client(client); + return rc; + clean_up: - kfree(synx_obj); + /* + * if map_entry is not created the cleanup of child fences have to be + * handled manually + */ + if (IS_ERR_OR_NULL(map_entry)) { + kfree(synx_obj); + synx_util_merge_error(client, params->h_synxs, count); + if (params->num_objs && params->num_objs <= count) + kfree(fences); + + } else { + synx_util_release_map_entry(map_entry); + } fail: - synx_util_merge_error(client, params->h_synxs, count); - if (params->num_objs && params->num_objs <= count) - kfree(fences); synx_put_client(client); return rc; } diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 628dd91db5..f842e1dea2 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -182,6 +182,36 @@ int synx_util_add_callback(struct synx_coredata *synx_obj, return SYNX_SUCCESS; } +static int synx_util_count_dma_array_fences(struct dma_fence *fence) +{ + struct dma_fence_cb *cur, *tmp; + int32_t num_dma_array = 0; + struct dma_fence_array_cb *cb_array = NULL; + struct dma_fence_array *array = NULL; + + if (IS_ERR_OR_NULL(fence)) { + dprintk(SYNX_ERR, "invalid fence passed\n"); + return num_dma_array; + } + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + // count for parent fences + cb_array = container_of(cur, struct dma_fence_array_cb, cb); + if (IS_ERR_OR_NULL(cb_array)) { + dprintk(SYNX_VERB, "cb_array not found in fence %pK\n", fence); + continue; + } + array = cb_array->array; + if (!IS_ERR_OR_NULL(array) && dma_fence_is_array(&(array->base))) + num_dma_array++; + } + + dprintk(SYNX_VERB, "number of fence_array found %d for child fence %pK\n", + num_dma_array, fence); + + return num_dma_array; +} + int synx_util_init_group_coredata(struct synx_coredata *synx_obj, struct dma_fence **fences, struct synx_merge_params *params, @@ -276,7 +306,6 @@ int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status) "signaling child fence %pK failed=%d\n", array->fences[i], rc); } - dma_fence_put(array->fences[i]); } } return rc; @@ -285,6 +314,7 @@ int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status) void synx_util_object_destroy(struct synx_coredata *synx_obj) { int rc; + int num_dma_array = 0; u32 i; s32 sync_id; u32 type; @@ -358,14 +388,15 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) if (synx_util_is_merged_object(synx_obj) && synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL); - else if (kref_read(&synx_obj->fence->refcount) == 1 && - (synx_util_get_object_status_locked(synx_obj) == - SYNX_STATE_ACTIVE)) { - // set fence error to cancel - dma_fence_set_error(synx_obj->fence, - -SYNX_STATE_SIGNALED_CANCEL); + else if (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) { + num_dma_array = synx_util_count_dma_array_fences(synx_obj->fence); + if (kref_read(&synx_obj->fence->refcount) == 1 + num_dma_array) { + // set fence error to cancel + dma_fence_set_error(synx_obj->fence, + -SYNX_STATE_SIGNALED_CANCEL); - rc = dma_fence_signal_locked(synx_obj->fence); + rc = dma_fence_signal_locked(synx_obj->fence); + } } spin_unlock_irqrestore(synx_obj->fence->lock, flags); if (rc) From 51b2182feaa4fc82e1d3575f48d3414533f8acfa Mon Sep 17 00:00:00 2001 From: Amir Suhail Date: Tue, 23 Jan 2024 18:05:51 +0530 Subject: [PATCH 38/42] msm: synx: Fixing cleanup of merge synx and dma fences 1. dma fence framework takes care of putting the child dma fence reference when the array fence is being destroyed. Synx shouldn't put the child dma fence reference taken for dma fence array. 2. When destroying the child dma fence we have to signal the fence if synx has taken the last reference. If there are more than 1 reference on dma fence, then check if the other references are from merge fences instead of any client taking direct reference on the dma fence. Change-Id: I8b0879b8c1d2401cdd08f85ae330b74af99a2dad Signed-off-by: Amir Suhail (cherry picked from commit a2795fd6f59324a7fbcff455b82b8cdc54d27e1a) --- msm/synx/synx.c | 49 +++++++++++++++++++++++++++++++------------- msm/synx/synx_util.c | 49 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 75 insertions(+), 23 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index d55a791c46..b32bfd7e66 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -1176,6 +1176,12 @@ int synx_merge(struct synx_session *session, if (IS_ERR_OR_NULL(client)) return -SYNX_INVALID; + synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL); + if (IS_ERR_OR_NULL(synx_obj)) { + rc = -SYNX_NOMEM; + goto fail; + } + rc = synx_util_validate_merge(client, params->h_synxs, params->num_objs, &fences, &count); if (rc < 0) { @@ -1183,12 +1189,8 @@ int synx_merge(struct synx_session *session, "[sess :%llu] merge validation failed\n", client->id); rc = -SYNX_INVALID; - goto fail; - } - synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL); - if (IS_ERR_OR_NULL(synx_obj)) { - rc = -SYNX_NOMEM; + kfree(synx_obj); goto fail; } @@ -1205,12 +1207,20 @@ int synx_merge(struct synx_session *session, *params->h_merged_obj, 0); if (IS_ERR_OR_NULL(map_entry)) { rc = PTR_ERR(map_entry); - goto clean_up; + + /* + * dma fence put will take care of removing the references taken + * on child fences + */ + dma_fence_put(synx_obj->fence); + kfree(synx_obj); + goto fail; } rc = synx_util_add_callback(synx_obj, *params->h_merged_obj); + if (rc != SYNX_SUCCESS) - goto clear; + goto clean_up; rc = synx_util_init_handle(client, synx_obj, params->h_merged_obj, map_entry); @@ -1218,8 +1228,7 @@ int synx_merge(struct synx_session *session, dprintk(SYNX_ERR, "[sess :%llu] unable to init merge handle %u\n", client->id, *params->h_merged_obj); - dma_fence_put(synx_obj->fence); - goto clear; + goto clean_up; } h_child_list = kzalloc(count*4, GFP_KERNEL); @@ -1289,13 +1298,25 @@ int synx_merge(struct synx_session *session, synx_put_client(client); return SYNX_SUCCESS; clear: - synx_util_release_map_entry(map_entry); + synx_native_release_core(client, (*params->h_merged_obj)); + synx_put_client(client); + return rc; + clean_up: - kfree(synx_obj); + /* + * if map_entry is not created the cleanup of child fences have to be + * handled manually + */ + if (IS_ERR_OR_NULL(map_entry)) { + kfree(synx_obj); + synx_util_merge_error(client, params->h_synxs, count); + if (params->num_objs && params->num_objs <= count) + kfree(fences); + + } else { + synx_util_release_map_entry(map_entry); + } fail: - synx_util_merge_error(client, params->h_synxs, count); - if (params->num_objs && params->num_objs <= count) - kfree(fences); synx_put_client(client); return rc; } diff --git a/msm/synx/synx_util.c b/msm/synx/synx_util.c index 628dd91db5..f842e1dea2 100644 --- a/msm/synx/synx_util.c +++ b/msm/synx/synx_util.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -182,6 +182,36 @@ int synx_util_add_callback(struct synx_coredata *synx_obj, return SYNX_SUCCESS; } +static int synx_util_count_dma_array_fences(struct dma_fence *fence) +{ + struct dma_fence_cb *cur, *tmp; + int32_t num_dma_array = 0; + struct dma_fence_array_cb *cb_array = NULL; + struct dma_fence_array *array = NULL; + + if (IS_ERR_OR_NULL(fence)) { + dprintk(SYNX_ERR, "invalid fence passed\n"); + return num_dma_array; + } + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + // count for parent fences + cb_array = container_of(cur, struct dma_fence_array_cb, cb); + if (IS_ERR_OR_NULL(cb_array)) { + dprintk(SYNX_VERB, "cb_array not found in fence %pK\n", fence); + continue; + } + array = cb_array->array; + if (!IS_ERR_OR_NULL(array) && dma_fence_is_array(&(array->base))) + num_dma_array++; + } + + dprintk(SYNX_VERB, "number of fence_array found %d for child fence %pK\n", + num_dma_array, fence); + + return num_dma_array; +} + int synx_util_init_group_coredata(struct synx_coredata *synx_obj, struct dma_fence **fences, struct synx_merge_params *params, @@ -276,7 +306,6 @@ int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status) "signaling child fence %pK failed=%d\n", array->fences[i], rc); } - dma_fence_put(array->fences[i]); } } return rc; @@ -285,6 +314,7 @@ int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status) void synx_util_object_destroy(struct synx_coredata *synx_obj) { int rc; + int num_dma_array = 0; u32 i; s32 sync_id; u32 type; @@ -358,14 +388,15 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj) if (synx_util_is_merged_object(synx_obj) && synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL); - else if (kref_read(&synx_obj->fence->refcount) == 1 && - (synx_util_get_object_status_locked(synx_obj) == - SYNX_STATE_ACTIVE)) { - // set fence error to cancel - dma_fence_set_error(synx_obj->fence, - -SYNX_STATE_SIGNALED_CANCEL); + else if (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) { + num_dma_array = synx_util_count_dma_array_fences(synx_obj->fence); + if (kref_read(&synx_obj->fence->refcount) == 1 + num_dma_array) { + // set fence error to cancel + dma_fence_set_error(synx_obj->fence, + -SYNX_STATE_SIGNALED_CANCEL); - rc = dma_fence_signal_locked(synx_obj->fence); + rc = dma_fence_signal_locked(synx_obj->fence); + } } spin_unlock_irqrestore(synx_obj->fence->lock, flags); if (rc) From 77dced0308cebb92f2534ead5af739f039314fed Mon Sep 17 00:00:00 2001 From: Priyanko Sarkar Date: Thu, 21 Dec 2023 13:31:58 +0530 Subject: [PATCH 39/42] msm: Build files updation for disabling synx for volcano and pitti target Updating synx build files to disable synx for volcano and pitti target Change-Id: I736b3b62332e9bfd70ded29690f3bcb199152af7 Signed-off-by: Priyanko Sarkar --- Android.mk | 6 ++++++ synx_kernel_board.mk | 10 ++++++++-- synx_kernel_product.mk | 6 ++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/Android.mk b/Android.mk index 222403af9d..cd8a14d3f2 100644 --- a/Android.mk +++ b/Android.mk @@ -7,6 +7,12 @@ else TARGET_SYNX_ENABLE := true endif +ifneq (,$(call is-board-platform-in-list2,volcano)) +TARGET_SYNX_ENABLE := false +endif +ifneq (,$(call is-board-platform-in-list2,pitti)) +TARGET_SYNX_ENABLE := false +endif ifeq ($(TARGET_SYNX_ENABLE),true) SYNX_BLD_DIR := $(TOP)/vendor/qcom/opensource/synx-kernel diff --git a/synx_kernel_board.mk b/synx_kernel_board.mk index 8d324311ae..58d51bbe55 100644 --- a/synx_kernel_board.mk +++ b/synx_kernel_board.mk @@ -8,9 +8,15 @@ ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true) else TARGET_SYNX_ENABLE := true endif -# + +ifneq (,$(call is-board-platform-in-list2,volcano)) +TARGET_SYNX_ENABLE := false +endif +ifneq (,$(call is-board-platform-in-list2,pitti)) +TARGET_SYNX_ENABLE := false +endif ifeq ($(TARGET_SYNX_ENABLE), true) -ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) +ifneq (,$(call is-board-platform-in-list2,$(TARGET_BOARD_PLATFORM))) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite_test.ko diff --git a/synx_kernel_product.mk b/synx_kernel_product.mk index 70ce198745..85d514e225 100644 --- a/synx_kernel_product.mk +++ b/synx_kernel_product.mk @@ -7,6 +7,12 @@ else TARGET_SYNX_ENABLE := true endif +ifneq (,$(call is-board-platform-in-list2,volcano)) +TARGET_SYNX_ENABLE := false +endif +ifneq (,$(call is-board-platform-in-list2,pitti)) +TARGET_SYNX_ENABLE := false +endif ifeq ($(TARGET_SYNX_ENABLE), true) PRODUCT_PACKAGES += synx-driver.ko endif \ No newline at end of file From 996abee2bd98482aff0e6e268c6a74092e5e7006 Mon Sep 17 00:00:00 2001 From: Pravin Kumar Ravi Date: Tue, 26 Mar 2024 19:03:51 -0700 Subject: [PATCH 40/42] synx: Copy status only for merged fence When an individual dma fence is signaled, the status is wrongly overwritten with ACTIVE. This fix copies status only for merged fences. Change-Id: I34b1367316c86b76b2ed17434e4d01863ae06b58 Signed-off-by: Pravin Kumar Ravi --- msm/synx/synx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index b32bfd7e66..ab46b43a6c 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -605,9 +605,9 @@ u32 synx_custom_get_status(struct synx_coredata *synx_obj, u32 status) synx_obj->status = synx_get_child_status(synx_obj); else synx_obj->status = parent_global_status; + custom_status = synx_obj->status; } - custom_status = synx_obj->status; mutex_unlock(&synx_obj->obj_lock); bail: From a32830483ba7b2337aaef9ad26524e5e9b7b8307 Mon Sep 17 00:00:00 2001 From: Priyanko Sarkar Date: Mon, 22 Apr 2024 11:25:30 +0530 Subject: [PATCH 41/42] msm: synx: Adding memset inside synx_read function Adding memset to initialize structure members to zero to fix security concerns. Change-Id: I4212124aeadb5f5a2c10756d4e210a21e229f323 Signed-off-by: Priyanko Sarkar --- msm/synx/synx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index b32bfd7e66..84a823c522 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -2484,6 +2484,7 @@ static ssize_t synx_read(struct file *filep, list_del_init(&cb->node); mutex_unlock(&client->event_q_lock); + memset(&data, 0, sizeof(struct synx_userpayload_info_v2)); rc = size; data.synx_obj = cb->kernel_cb.h_synx; From 014456b1edaa81f670b89ca5eb70f13d47a1db36 Mon Sep 17 00:00:00 2001 From: Priyanko Sarkar Date: Wed, 24 Jul 2024 14:59:22 +0530 Subject: [PATCH 42/42] msm: synx: Fixes null pointer dereference in handle import Fixes null pointer access during global handle allocation failure while importing local handle as global. Change-Id: I4f644af86151d8599e3ad51d82fbf6e7db4aca41 Signed-off-by: Priyanko Sarkar --- msm/synx/synx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/msm/synx/synx.c b/msm/synx/synx.c index c657587256..c6b7ca9bba 100644 --- a/msm/synx/synx.c +++ b/msm/synx/synx.c @@ -1725,8 +1725,8 @@ static int synx_native_import_handle(struct synx_client *client, old_entry); } - if (rc != SYNX_SUCCESS) - return rc; + if (IS_ERR_OR_NULL(map_entry)) + return -SYNX_INVALID; *params->new_h_synx = h_synx;