From 6bf912d7a98362bb2969033d8249dbfc8e7ab1cd Mon Sep 17 00:00:00 2001 From: Gerrit SelfHelp Service Account Date: Mon, 15 Nov 2021 11:17:00 -0800 Subject: [PATCH 01/77] Initial empty repository From f29d4d57e11c09e04f76d789e9b2cd78f69ec5d6 Mon Sep 17 00:00:00 2001 From: Shashank Babu Chinta Venkata Date: Fri, 19 Nov 2021 10:03:05 -0800 Subject: [PATCH 02/77] mm-drivers: sync-fence: add sync fence driver snapshot Add snapshot for syncfence driver in mm-drivers repo. Change-Id: I43556e3479b45399b1ac0e8ba7a423f36bb21cf9 Signed-off-by: Shashank Babu Chinta Venkata Signed-off-by: Jeykumar Sankaran --- sync_fence/include/uapi/Kbuild | 6 + .../include/uapi/sync_fence/qcom_sync_file.h | 63 +++ sync_fence/src/qcom_sync_file.c | 466 ++++++++++++++++++ 3 files changed, 535 insertions(+) create mode 100644 sync_fence/include/uapi/Kbuild create mode 100644 sync_fence/include/uapi/sync_fence/qcom_sync_file.h create mode 100644 sync_fence/src/qcom_sync_file.c diff --git a/sync_fence/include/uapi/Kbuild b/sync_fence/include/uapi/Kbuild new file mode 100644 index 0000000000..f662bb6426 --- /dev/null +++ b/sync_fence/include/uapi/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +# Top-level Makefile calls into asm-$(ARCH) +# List only non-arch directories below + +header-y += sync_fence/ diff --git a/sync_fence/include/uapi/sync_fence/qcom_sync_file.h b/sync_fence/include/uapi/sync_fence/qcom_sync_file.h new file mode 100644 index 0000000000..964e0f46f7 --- /dev/null +++ b/sync_fence/include/uapi/sync_fence/qcom_sync_file.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_LINUX_SPEC_SYNC_H +#define _UAPI_LINUX_SPEC_SYNC_H + +#include +#include + +#define SPEC_FENCE_SIGNAL_ANY 0x1 +#define SPEC_FENCE_SIGNAL_ALL 0x2 + +/** + * struct fence_bind_data - data passed to bind ioctl + * @out_bind_fd: file descriptor of second fence + * @fds: file descriptor list of child fences + */ +struct fence_bind_data { + __u32 out_bind_fd; + __u64 fds; +}; + +/** + * struct fence_create_data - detailed fence information + * @num_fences: Total fences that array needs to carry. + * @flags: Flags specifying on how to signal the array + * @out_bind_fd: Returns the fence fd. + */ +struct fence_create_data { + __u32 num_fences; + __u32 flags; + __u32 out_bind_fd; +}; + +#define SPEC_SYNC_MAGIC '>' + +/** + * DOC: SPEC_SYNC_IOC_BIND - bind two fences + * + * Takes a struct fence_bind_data. binds the child fds with the fence array + * pointed by fd1. + */ +#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data) + +/** + * DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array + * + * Takes a struct fence_create_data. If num_fences is > 0, fence array will be + * created and returns the array fd in fence_create_data.fd1 + */ +#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data) + +/** + * DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version + * + * Returns Spec driver version. + */ +#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64) + +#endif /* _UAPI_LINUX_SPEC_SYNC_H */ diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c new file mode 100644 index 0000000000..3cb2178412 --- /dev/null +++ b/sync_fence/src/qcom_sync_file.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLASS_NAME "sync" +#define DRV_NAME "spec_sync" +#define DRV_VERSION 1 +#define NAME_LEN 32 + +#define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10 /* user flags for debug */ +#define FENCE_MIN 1 +#define FENCE_MAX 32 + +struct sync_device { + /* device info */ + struct class *dev_class; + dev_t dev_num; + struct device *dev; + struct cdev *cdev; + struct mutex lock; + + /* device drv data */ + atomic_t device_available; + char name[NAME_LEN]; + uint32_t version; + struct mutex l_lock; + struct list_head fence_array_list; +}; + +struct fence_array_node { + struct dma_fence_array *fence_array; + struct list_head list; +}; + +/* Speculative Sync Device Driver State */ +static struct sync_device sync_dev; + +static bool sanitize_fence_array(struct dma_fence_array *fence) +{ + struct fence_array_node *node; + int ret = false; + + mutex_lock(&sync_dev.l_lock); + list_for_each_entry(node, &sync_dev.fence_array_list, list) { + if (node->fence_array == fence) { + ret = true; + break; + } + } + mutex_unlock(&sync_dev.l_lock); + + return ret; +} + +static void clear_fence_array_tracker(bool force_clear) +{ + struct fence_array_node *node, *temp; + struct dma_fence_array *array; + struct dma_fence *fence; + bool is_signaled; + + mutex_lock(&sync_dev.l_lock); + list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) { + array = node->fence_array; + fence = &array->base; + is_signaled = dma_fence_is_signaled(fence); + + if (force_clear && !array->fences) + array->num_fences = 0; + + pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled, + atomic_read(&array->num_pending)); + + if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending)) + dma_fence_signal(fence); + + if (force_clear || is_signaled) { + dma_fence_put(fence); + list_del(&node->list); + kfree(node); + } + } + mutex_unlock(&sync_dev.l_lock); +} + +static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) +{ + if (atomic_read(&obj->device_available)) + return NULL; + + atomic_inc(&obj->device_available); + + memset(obj->name, 0, NAME_LEN); + strlcpy(obj->name, name, sizeof(obj->name)); + + return obj; +} + +static int spec_sync_open(struct inode *inode, struct file *file) +{ + char task_comm[TASK_COMM_LEN]; + struct sync_device *obj = &sync_dev; + int ret = 0; + + if (!inode || !inode->i_cdev || !file) { + pr_err("NULL pointer passed\n"); + return -EINVAL; + } + + mutex_lock(&sync_dev.lock); + + get_task_comm(task_comm, current); + + obj = spec_fence_init_locked(obj, task_comm); + if (!obj) { + pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm); + ret = -EEXIST; + goto end; + } + + file->private_data = obj; + +end: + mutex_unlock(&sync_dev.lock); + return ret; +} + +static int spec_sync_release(struct inode *inode, struct file *file) +{ + int ret = 0; + struct sync_device *obj = file->private_data; + + mutex_lock(&sync_dev.lock); + + if (!atomic_read(&obj->device_available)) { + pr_err("sync release failed !!\n"); + ret = -ENODEV; + goto end; + } + + clear_fence_array_tracker(true); + atomic_dec(&obj->device_available); + +end: + mutex_unlock(&sync_dev.lock); + return ret; +} + +static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg) +{ + uint32_t version = obj->version; + + if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t))) + return -EFAULT; + + return 0; +} + +static int spec_sync_create_array(struct fence_create_data *f) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + struct sync_file *sync_file; + struct dma_fence_array *fence_array; + struct fence_array_node *node; + bool signal_any; + int ret = 0; + + if (fd < 0) { + pr_err("failed to get_unused_fd_flags\n"); + return fd; + } + + if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) { + pr_err("invalid arguments num_fences:%d\n", f->num_fences); + ret = -ERANGE; + goto error_args; + } + + signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true; + + fence_array = dma_fence_array_create(f->num_fences, NULL, + dma_fence_context_alloc(1), 0, signal_any); + + /* Set the enable signal such that signalling is not done during wait*/ + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags); + set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags); + + sync_file = sync_file_create(&fence_array->base); + if (!sync_file) { + pr_err("sync_file_create fail\n"); + ret = -EINVAL; + goto err; + } + node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL); + if (!node) { + fput(sync_file->file); + ret = -ENOMEM; + goto err; + } + + fd_install(fd, sync_file->file); + node->fence_array = fence_array; + + mutex_lock(&sync_dev.l_lock); + list_add_tail(&node->list, &sync_dev.fence_array_list); + mutex_unlock(&sync_dev.l_lock); + + pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences); + return fd; + +err: + dma_fence_put(&fence_array->base); +error_args: + put_unused_fd(fd); + return ret; +} + +static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg) +{ + struct fence_create_data f; + int fd; + + if (copy_from_user(&f, (void __user *)arg, sizeof(f))) + return -EFAULT; + + fd = spec_sync_create_array(&f); + if (fd < 0) + return fd; + + f.out_bind_fd = fd; + + if (copy_to_user((void __user *)arg, &f, sizeof(f))) + return -EFAULT; + + return 0; +} + +static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) +{ + struct dma_fence_array *fence_array; + struct dma_fence *fence = NULL; + struct dma_fence *user_fence = NULL; + struct dma_fence **fence_list; + int *user_fds, ret = 0, i; + u32 num_fences, counter; + + fence = sync_file_get_fence(sync_bind_info->out_bind_fd); + if (!fence) { + pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd); + return -EINVAL; + } + + fence_array = container_of(fence, struct dma_fence_array, base); + if (!sanitize_fence_array(fence_array)) { + pr_err("spec fence not found in the registered list out_fd:%d\n", + sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto end; + } + num_fences = fence_array->num_fences; + counter = num_fences; + + user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL); + if (!user_fds) { + ret = -ENOMEM; + goto end; + } + + fence_list = kmalloc_array(num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); + if (!fence_list) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds, + num_fences * sizeof(int))) { + kfree(fence_list); + ret = -EFAULT; + goto out; + } + + fence_array->fences = fence_list; + for (i = 0; i < num_fences; i++) { + user_fence = sync_file_get_fence(user_fds[i]); + if (!user_fence) { + pr_err("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", + user_fds[i], sync_bind_info->out_bind_fd); + counter = i; + ret = -EINVAL; + goto bind_invalid; + } + fence_array->fences[i] = user_fence; + pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd, + i, user_fds[i], fence_array->fences[i]->error); + } + + clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + dma_fence_enable_sw_signaling(&fence_array->base); + + clear_fence_array_tracker(false); + +bind_invalid: + if (ret) { + for (i = counter - 1; i >= 0; i--) + dma_fence_put(fence_array->fences[i]); + + kfree(fence_list); + fence_array->fences = NULL; + fence_array->num_fences = 0; + dma_fence_set_error(fence, -EINVAL); + dma_fence_signal(fence); + clear_fence_array_tracker(false); + } +out: + kfree(user_fds); +end: + dma_fence_put(fence); + return ret; +} + +static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg) +{ + struct fence_bind_data sync_bind_info; + + if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data))) + return -EFAULT; + + if (sync_bind_info.out_bind_fd < 0) { + pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd); + return -EINVAL; + } + + return spec_sync_bind_array(&sync_bind_info); +} + +static long spec_sync_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_device *obj = file->private_data; + int ret = 0; + + switch (cmd) { + case SPEC_SYNC_IOC_CREATE_FENCE: + ret = spec_sync_ioctl_create_fence(obj, arg); + break; + case SPEC_SYNC_IOC_BIND: + ret = spec_sync_ioctl_bind(obj, arg); + break; + case SPEC_SYNC_IOC_GET_VER: + ret = spec_sync_ioctl_get_ver(obj, arg); + break; + default: + ret = -ENOTTY; + } + + return ret; +} + +const struct file_operations spec_sync_fops = { + .owner = THIS_MODULE, + .open = spec_sync_open, + .release = spec_sync_release, + .unlocked_ioctl = spec_sync_ioctl, +}; + +static int spec_sync_register_device(void) +{ + int ret; + + sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME); + if (sync_dev.dev_class == NULL) { + pr_err("%s: class_create fail.\n", __func__); + goto res_err; + } + + ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME); + if (ret) { + pr_err("%s: alloc_chrdev_region fail.\n", __func__); + goto alloc_chrdev_region_err; + } + + sync_dev.dev = device_create(sync_dev.dev_class, NULL, + sync_dev.dev_num, + &sync_dev, DRV_NAME); + if (IS_ERR(sync_dev.dev)) { + pr_err("%s: device_create fail.\n", __func__); + goto device_create_err; + } + + sync_dev.cdev = cdev_alloc(); + if (sync_dev.cdev == NULL) { + pr_err("%s: cdev_alloc fail.\n", __func__); + goto cdev_alloc_err; + } + cdev_init(sync_dev.cdev, &spec_sync_fops); + sync_dev.cdev->owner = THIS_MODULE; + + ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1); + if (ret) { + pr_err("%s: cdev_add fail.\n", __func__); + goto cdev_add_err; + } + + sync_dev.version = DRV_VERSION; + mutex_init(&sync_dev.lock); + mutex_init(&sync_dev.l_lock); + INIT_LIST_HEAD(&sync_dev.fence_array_list); + + return 0; + +cdev_add_err: + cdev_del(sync_dev.cdev); +cdev_alloc_err: + device_destroy(sync_dev.dev_class, sync_dev.dev_num); +device_create_err: + unregister_chrdev_region(sync_dev.dev_num, 1); +alloc_chrdev_region_err: + class_destroy(sync_dev.dev_class); +res_err: + return -ENODEV; +} + +static int __init spec_sync_init(void) +{ + int ret = 0; + + ret = spec_sync_register_device(); + if (ret) { + pr_err("%s: speculative sync driver register fail.\n", __func__); + return ret; + } + return ret; +} + +static void __exit spec_sync_deinit(void) +{ + cdev_del(sync_dev.cdev); + device_destroy(sync_dev.dev_class, sync_dev.dev_num); + unregister_chrdev_region(sync_dev.dev_num, 1); + class_destroy(sync_dev.dev_class); +} + +module_init(spec_sync_init); +module_exit(spec_sync_deinit); + +MODULE_DESCRIPTION("QCOM Speculative Sync Driver"); +MODULE_LICENSE("GPL v2"); From 03c0ab32cb33fc66a27aa09277d592b51bcfbd29 Mon Sep 17 00:00:00 2001 From: Shashank Babu Chinta Venkata Date: Fri, 19 Nov 2021 10:29:59 -0800 Subject: [PATCH 03/77] mm-drivers: msm_ext_display: add snapshot Add snapshot of msm_ext_display driver in mm-drivers repo. Change-Id: Iaf70f09d3a95f564e08105d33cdc26cbb4981048 Signed-off-by: Shashank Babu Chinta Venkata Signed-off-by: Jeykumar Sankaran --- msm_ext_display/src/msm_ext_display.c | 702 ++++++++++++++++++++++++++ 1 file changed, 702 insertions(+) create mode 100644 msm_ext_display/src/msm_ext_display.c diff --git a/msm_ext_display/src/msm_ext_display.c b/msm_ext_display/src/msm_ext_display.c new file mode 100644 index 0000000000..57da7fe2ee --- /dev/null +++ b/msm_ext_display/src/msm_ext_display.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct msm_ext_disp_list { + struct msm_ext_disp_init_data *data; + struct list_head list; +}; + +struct msm_ext_disp { + struct msm_ext_disp_data ext_disp_data; + struct platform_device *pdev; + struct msm_ext_disp_codec_id current_codec; + struct msm_ext_disp_audio_codec_ops *ops; + struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS]; + bool audio_session_on; + struct list_head display_list; + struct mutex lock; + bool update_audio; +}; + +static const unsigned int msm_ext_disp_supported_cable[] = { + EXTCON_DISP_DP, + EXTCON_DISP_HDMI, + EXTCON_NONE, +}; + +static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id) +{ + int ret = 0; + + if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("invalid params\n"); + return -EINVAL; + } + + ext_disp->audio_sdev[id] = devm_extcon_dev_allocate( + &ext_disp->pdev->dev, + msm_ext_disp_supported_cable); + if (IS_ERR(ext_disp->audio_sdev[id])) + return PTR_ERR(ext_disp->audio_sdev[id]); + + ret = devm_extcon_dev_register(&ext_disp->pdev->dev, + ext_disp->audio_sdev[id]); + if (ret) { + pr_err("audio registration failed\n"); + return ret; + } + + pr_debug("extcon registration done\n"); + + return ret; +} + +static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp, + int id) +{ + if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("Invalid params\n"); + return; + } + + devm_extcon_dev_unregister(&ext_disp->pdev->dev, + ext_disp->audio_sdev[id]); +} + +static const char *msm_ext_disp_name(enum msm_ext_disp_type type) +{ + switch (type) { + case EXT_DISPLAY_TYPE_HDMI: + return "EXT_DISPLAY_TYPE_HDMI"; + case EXT_DISPLAY_TYPE_DP: + return "EXT_DISPLAY_TYPE_DP"; + default: return "???"; + } +} + +static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_init_data *data) +{ + struct msm_ext_disp_list *node; + + if (!ext_disp || !data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->data = data; + + list_add(&node->list, &ext_disp->display_list); + + pr_debug("Added new display (%s) ctld (%d) stream (%d)\n", + msm_ext_disp_name(data->codec.type), + data->codec.ctrl_id, data->codec.stream_id); + + return 0; +} + +static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_init_data *data) +{ + struct msm_ext_disp_list *node; + struct list_head *pos = NULL; + + if (!ext_disp || !data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + list_for_each(pos, &ext_disp->display_list) { + node = list_entry(pos, struct msm_ext_disp_list, list); + if (node->data == data) { + list_del(pos); + pr_debug("Deleted the intf data\n"); + kfree(node); + return 0; + } + } + + pr_debug("Intf data not present for delete op\n"); + + return 0; +} + +static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec, + struct msm_ext_disp_init_data **data) +{ + int ret = 0; + struct msm_ext_disp_list *node; + struct list_head *position = NULL; + + if (!ext_disp || !data || !codec) { + pr_err("Invalid params\n"); + ret = -EINVAL; + goto end; + } + + *data = NULL; + list_for_each(position, &ext_disp->display_list) { + node = list_entry(position, struct msm_ext_disp_list, list); + if (node->data->codec.type == codec->type && + node->data->codec.stream_id == codec->stream_id && + node->data->codec.ctrl_id == codec->ctrl_id) { + *data = node->data; + break; + } + } + + if (!*data) + ret = -ENODEV; +end: + return ret; +} + +static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state new_state) +{ + int ret = 0; + int state; + struct extcon_dev *audio_sdev; + + if (!ext_disp->ops) { + pr_err("codec not registered, skip notification\n"); + ret = -EPERM; + goto end; + } + + audio_sdev = ext_disp->audio_sdev[codec->stream_id]; + + state = extcon_get_state(audio_sdev, codec->type); + if (state == !!new_state) { + ret = -EEXIST; + pr_debug("same state\n"); + goto end; + } + + ret = extcon_set_state_sync(audio_sdev, + codec->type, !!new_state); + if (ret) + pr_err("Failed to set state. Error = %d\n", ret); + else + pr_debug("state changed to %d\n", new_state); + +end: + return ret; +} + +static struct msm_ext_disp *msm_ext_disp_validate_and_get( + struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + struct msm_ext_disp_data *ext_disp_data; + struct msm_ext_disp *ext_disp; + + if (!pdev) { + pr_err("invalid platform device\n"); + goto err; + } + + if (!codec || + codec->type >= EXT_DISPLAY_TYPE_MAX || + codec->ctrl_id != 0 || + codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("invalid display codec id\n"); + goto err; + } + + if (state < EXT_DISPLAY_CABLE_DISCONNECT || + state >= EXT_DISPLAY_CABLE_STATE_MAX) { + pr_err("invalid HPD state (%d)\n", state); + goto err; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("invalid drvdata\n"); + goto err; + } + + ext_disp = container_of(ext_disp_data, + struct msm_ext_disp, ext_disp_data); + + return ext_disp; +err: + return ERR_PTR(-EINVAL); +} + +static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, + struct msm_ext_disp_codec_id *codec) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + + ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data); + if (ret || !data) { + pr_err("Display not found (%s) ctld (%d) stream (%d)\n", + msm_ext_disp_name(codec->type), + codec->ctrl_id, codec->stream_id); + goto end; + } + + if (ext_disp->ops) { + *ext_disp->ops = data->codec_ops; + ext_disp->current_codec = *codec; + + /* update pdev for interface to use */ + ext_disp->ext_disp_data.intf_pdev = data->pdev; + ext_disp->ext_disp_data.intf_data = data->intf_data; + } + +end: + return ret; +} + +static int msm_ext_disp_audio_config(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp *ext_disp; + + ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state); + if (IS_ERR(ext_disp)) { + ret = PTR_ERR(ext_disp); + goto end; + } + + if (state == EXT_DISPLAY_CABLE_CONNECT) { + ret = msm_ext_disp_select_audio_codec(pdev, codec); + } else { + mutex_lock(&ext_disp->lock); + if (ext_disp->ops) + memset(ext_disp->ops, 0, sizeof(*ext_disp->ops)); + + pr_debug("codec ops cleared for %s\n", + msm_ext_disp_name(ext_disp->current_codec.type)); + + ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX; + mutex_unlock(&ext_disp->lock); + } +end: + return ret; +} + +static int msm_ext_disp_audio_notify(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp *ext_disp; + + ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state); + if (IS_ERR(ext_disp)) { + ret = PTR_ERR(ext_disp); + goto end; + } + + mutex_lock(&ext_disp->lock); + ret = msm_ext_disp_process_audio(ext_disp, codec, state); + mutex_unlock(&ext_disp->lock); +end: + return ret; +} + +static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp) +{ + int ret; + struct msm_ext_disp_init_data *data = NULL; + + if (!ext_disp) { + pr_err("invalid input\n"); + return; + } + + ret = msm_ext_disp_get_intf_data(ext_disp, + &ext_disp->current_codec, &data); + if (ret) { + pr_err("%s not found\n", + msm_ext_disp_name(ext_disp->current_codec.type)); + return; + } + + *ext_disp->ops = data->codec_ops; + data->codec_ops.ready(ext_disp->pdev); +} + +int msm_hdmi_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + return msm_ext_disp_register_audio_codec(pdev, ops); +} + +/** + * Register audio codec ops to display driver + * for HDMI/Display Port usecase support. + * + * @return 0 on success, negative value on error + * + */ +int msm_ext_disp_register_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_audio_codec_ops *ops) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !ops) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + if (ext_disp->ops) { + pr_err("Codec already registered\n"); + ret = -EINVAL; + goto end; + } + + ext_disp->ops = ops; + + pr_debug("audio codec registered\n"); + + if (ext_disp->update_audio) { + ext_disp->update_audio = false; + msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec); + msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec, + EXT_DISPLAY_CABLE_CONNECT); + } + +end: + mutex_unlock(&ext_disp->lock); + if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX) + msm_ext_disp_ready_for_display(ext_disp); + + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_register_audio_codec); + +int msm_ext_disp_select_audio_codec(struct platform_device *pdev, + struct msm_ext_disp_codec_id *codec) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !codec) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + if (!ext_disp->ops) { + pr_warn("Codec is not registered\n"); + ext_disp->update_audio = true; + ext_disp->current_codec = *codec; + ret = -EINVAL; + goto end; + } + + ret = msm_ext_disp_update_audio_ops(ext_disp, codec); + +end: + mutex_unlock(&ext_disp->lock); + + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_select_audio_codec); + +static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data) +{ + struct msm_ext_disp_audio_codec_ops *ops; + + if (!init_data) { + pr_err("Invalid init_data\n"); + return -EINVAL; + } + + if (!init_data->pdev) { + pr_err("Invalid display intf pdev\n"); + return -EINVAL; + } + + if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX || + init_data->codec.ctrl_id != 0 || + init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) { + pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n", + init_data->codec.type, + init_data->codec.ctrl_id, + init_data->codec.stream_id); + return -EINVAL; + } + + ops = &init_data->codec_ops; + + if (!ops->audio_info_setup || !ops->get_audio_edid_blk || + !ops->cable_status || !ops->get_intf_id || + !ops->teardown_done || !ops->acknowledge || + !ops->ready) { + pr_err("Invalid codec operation pointers\n"); + return -EINVAL; + } + + return 0; +} + +int msm_ext_disp_register_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !init_data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + ret = msm_ext_disp_validate_intf(init_data); + if (ret) + goto end; + + ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data); + if (!ret) { + pr_err("%s already registered. ctrl(%d) stream(%d)\n", + msm_ext_disp_name(init_data->codec.type), + init_data->codec.ctrl_id, + init_data->codec.stream_id); + goto end; + } + + ret = msm_ext_disp_add_intf_data(ext_disp, init_data); + if (ret) + goto end; + + init_data->intf_ops.audio_config = msm_ext_disp_audio_config; + init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify; + + pr_debug("%s registered. ctrl(%d) stream(%d)\n", + msm_ext_disp_name(init_data->codec.type), + init_data->codec.ctrl_id, + init_data->codec.stream_id); +end: + mutex_unlock(&ext_disp->lock); + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_register_intf); + +int msm_ext_disp_deregister_intf(struct platform_device *pdev, + struct msm_ext_disp_init_data *init_data) +{ + int ret = 0; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev || !init_data) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("Invalid drvdata\n"); + return -EINVAL; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + mutex_lock(&ext_disp->lock); + + ret = msm_ext_disp_remove_intf_data(ext_disp, init_data); + if (ret) + goto end; + + init_data->intf_ops.audio_config = NULL; + init_data->intf_ops.audio_notify = NULL; + + pr_debug("%s deregistered\n", + msm_ext_disp_name(init_data->codec.type)); +end: + mutex_unlock(&ext_disp->lock); + + return ret; +} +EXPORT_SYMBOL(msm_ext_disp_deregister_intf); + +static int msm_ext_disp_probe(struct platform_device *pdev) +{ + int ret = 0, id; + struct device_node *of_node = NULL; + struct msm_ext_disp *ext_disp = NULL; + + if (!pdev) { + pr_err("No platform device found\n"); + ret = -ENODEV; + goto end; + } + + of_node = pdev->dev.of_node; + if (!of_node) { + pr_err("No device node found\n"); + ret = -ENODEV; + goto end; + } + + ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL); + if (!ext_disp) { + ret = -ENOMEM; + goto end; + } + + platform_set_drvdata(pdev, &ext_disp->ext_disp_data); + ext_disp->pdev = pdev; + + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) { + ret = msm_ext_disp_extcon_register(ext_disp, id); + if (ret) + goto child_node_failure; + } + + ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev); + if (ret) { + pr_err("Failed to add child devices. Error = %d\n", ret); + goto child_node_failure; + } else { + pr_debug("%s: Added child devices.\n", __func__); + } + + mutex_init(&ext_disp->lock); + + INIT_LIST_HEAD(&ext_disp->display_list); + ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX; + ext_disp->update_audio = false; + + return ret; + +child_node_failure: + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) + msm_ext_disp_extcon_unregister(ext_disp, id); + + devm_kfree(&ext_disp->pdev->dev, ext_disp); +end: + return ret; +} + +static int msm_ext_disp_remove(struct platform_device *pdev) +{ + int ret = 0, id; + struct msm_ext_disp *ext_disp = NULL; + struct msm_ext_disp_data *ext_disp_data = NULL; + + if (!pdev) { + pr_err("No platform device\n"); + ret = -ENODEV; + goto end; + } + + ext_disp_data = platform_get_drvdata(pdev); + if (!ext_disp_data) { + pr_err("No drvdata found\n"); + ret = -ENODEV; + goto end; + } + + ext_disp = container_of(ext_disp_data, struct msm_ext_disp, + ext_disp_data); + + for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) + msm_ext_disp_extcon_unregister(ext_disp, id); + + mutex_destroy(&ext_disp->lock); + devm_kfree(&ext_disp->pdev->dev, ext_disp); + +end: + return ret; +} + +static const struct of_device_id msm_ext_dt_match[] = { + {.compatible = "qcom,msm-ext-disp",}, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, msm_ext_dt_match); + +static struct platform_driver this_driver = { + .probe = msm_ext_disp_probe, + .remove = msm_ext_disp_remove, + .driver = { + .name = "msm-ext-disp", + .of_match_table = msm_ext_dt_match, + }, +}; + +static int __init msm_ext_disp_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&this_driver); + if (ret) + pr_err("failed, ret = %d\n", ret); + + return ret; +} + +subsys_initcall(msm_ext_disp_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM External Display"); From 559a6bee2c2b577f3790475d8f2a4f43c94a2627 Mon Sep 17 00:00:00 2001 From: Jeykumar Sankaran Date: Wed, 15 Dec 2021 16:29:11 -0800 Subject: [PATCH 04/77] mm-drivers: enable mm-driver modules compilation Enable compilation of mm-driver modules and add scripts to copy the uapi header files. Change-Id: I0af6581ca96aa630c9707ef05abc4cccbfe92bab Signed-off-by: Jeykumar Sankaran --- Android.bp | 36 ++++++++++++++ Android.mk | 1 + config/kalamammdrivers.conf | 5 ++ config/kalamammdriversconf.h | 7 +++ mm_driver_board.mk | 12 +++++ mm_driver_product.mk | 3 ++ mm_drivers_kernel_headers.py | 95 ++++++++++++++++++++++++++++++++++++ msm_ext_display/Android.mk | 32 ++++++++++++ msm_ext_display/Kbuild | 10 ++++ msm_ext_display/Makefile | 15 ++++++ sync_fence/Android.mk | 32 ++++++++++++ sync_fence/Kbuild | 10 ++++ sync_fence/Makefile | 15 ++++++ 13 files changed, 273 insertions(+) create mode 100644 Android.bp create mode 100644 Android.mk create mode 100644 config/kalamammdrivers.conf create mode 100644 config/kalamammdriversconf.h create mode 100644 mm_driver_board.mk create mode 100644 mm_driver_product.mk create mode 100644 mm_drivers_kernel_headers.py create mode 100644 msm_ext_display/Android.mk create mode 100644 msm_ext_display/Kbuild create mode 100644 msm_ext_display/Makefile create mode 100644 sync_fence/Android.mk create mode 100644 sync_fence/Kbuild create mode 100644 sync_fence/Makefile diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000000..753cce932b --- /dev/null +++ b/Android.bp @@ -0,0 +1,36 @@ +headers_src = [ + "sync_fence/include/uapi/*/**/*.h", +] + +mm_drivers_headers_out = [ + "sync_fence/qcom_sync_file.h", +] + +mm_drivers_kernel_headers_verbose = "--verbose " +genrule { + name: "qti_generate_mm_drivers_kernel_headers", + tools: [ + "headers_install.sh", + "unifdef" + ], + tool_files: [ + "mm_drivers_kernel_headers.py", + ], + srcs: headers_src, + cmd: "python3 $(location mm_drivers_kernel_headers.py) " + + mm_drivers_kernel_headers_verbose + + "--header_arch arm64 " + + "--gen_dir $(genDir) " + + "--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " + + "--unifdef $(location unifdef) " + + "--headers_install $(location headers_install.sh)", + out: mm_drivers_headers_out, +} + +cc_library_headers { + name: "qti_mm_drivers_kernel_headers", + generated_headers: ["qti_generate_mm_drivers_kernel_headers"], + export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"], + vendor: true, + recovery_available: true +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000000..5053e7d643 --- /dev/null +++ b/Android.mk @@ -0,0 +1 @@ +include $(call all-subdir-makefiles) diff --git a/config/kalamammdrivers.conf b/config/kalamammdrivers.conf new file mode 100644 index 0000000000..3df22e75d5 --- /dev/null +++ b/config/kalamammdrivers.conf @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2020, The Linux Foundation. All rights reserved. + +export CONFIG_MSM_EXT_DISPLAY=y diff --git a/config/kalamammdriversconf.h b/config/kalamammdriversconf.h new file mode 100644 index 0000000000..26ca25d02f --- /dev/null +++ b/config/kalamammdriversconf.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_EXT_DISPLAY 1 diff --git a/mm_driver_board.mk b/mm_driver_board.mk new file mode 100644 index 0000000000..4ee3326519 --- /dev/null +++ b/mm_driver_board.mk @@ -0,0 +1,12 @@ +#SPDX-License-Identifier: GPL-2.0-only + +ifneq ($(TARGET_BOARD_AUTO),true) + ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/sync_fence.ko + endif +endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk new file mode 100644 index 0000000000..2f0db285b4 --- /dev/null +++ b/mm_driver_product.mk @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +PRODUCT_PACKAGES += msm_ext_display.ko sync_fence.ko diff --git a/mm_drivers_kernel_headers.py b/mm_drivers_kernel_headers.py new file mode 100644 index 0000000000..67885a9446 --- /dev/null +++ b/mm_drivers_kernel_headers.py @@ -0,0 +1,95 @@ + # Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + # + # This program is free software; you can redistribute it and/or modify it + # under the terms of the GNU General Public License version 2 as published by + # the Free Software Foundation. + # + # This program is distributed in the hope that it will be useful, but WITHOUT + # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + # more details. + # + # You should have received a copy of the GNU General Public License along with + # this program. If not, see . + +import argparse +import filecmp +import os +import re +import subprocess +import sys + +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + cmd = ["sh", headers_install, h, out_h] + + if True: + print('run_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi): + error_count = 0 + for h in mm_drivers_include_uapi: + mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0], + 'sync_fence', 'include', 'uapi') + os.sep + if not run_headers_install( + verbose, gen_dir, headers_install, unifdef, + mm_drivers_uapi_include_prefix, h): error_count += 1 + return error_count + +def main(): + """Parse command line arguments and perform top level control.""" + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments that apply to every invocation of this script. + parser.add_argument( + '--verbose', action='store_true', + help='Print output that describes the workings of this script.') + parser.add_argument( + '--header_arch', required=True, + help='The arch for which to generate headers.') + parser.add_argument( + '--gen_dir', required=True, + help='Where to place the generated files.') + parser.add_argument( + '--mm_drivers_include_uapi', required=True, nargs='*', + help='The list of techpack/*/include/uapi header files.') + parser.add_argument( + '--headers_install', required=True, + help='The headers_install tool to process input headers.') + parser.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') + + args = parser.parse_args() + + if args.verbose: + print('header_arch [%s]' % args.header_arch) + print('gen_dir [%s]' % args.gen_dir) + print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi) + print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) + + return gen_mm_drivers_headers(args.verbose, args.gen_dir, + args.headers_install, args.unifdef, args.mm_drivers_include_uapi) + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/msm_ext_display/Android.mk b/msm_ext_display/Android.mk new file mode 100644 index 0000000000..feff0e4139 --- /dev/null +++ b/msm_ext_display/Android.mk @@ -0,0 +1,32 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_ext_display +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_ext_display.ko +LOCAL_MODULE_KBUILD_NAME := msm_ext_display.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/msm_ext_display/Kbuild b/msm_ext_display/Kbuild new file mode 100644 index 0000000000..284134c0af --- /dev/null +++ b/msm_ext_display/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h + +obj-m += msm_ext_display.o + +msm_ext_display-y := src/msm_ext_display.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/msm_ext_display/Makefile b/msm_ext_display/Makefile new file mode 100644 index 0000000000..31a8ce65bd --- /dev/null +++ b/msm_ext_display/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/sync_fence/Android.mk b/sync_fence/Android.mk new file mode 100644 index 0000000000..59ee256f05 --- /dev/null +++ b/sync_fence/Android.mk @@ -0,0 +1,32 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR) +KBUILD_OPTIONS += MODNAME=sync_fence +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := sync_fence.ko +LOCAL_MODULE_KBUILD_NAME := sync_fence.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild new file mode 100644 index 0000000000..48cb10624b --- /dev/null +++ b/sync_fence/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel +LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ + +obj-m += sync_fence.o + +sync_fence-y := src/qcom_sync_file.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/sync_fence/Makefile b/sync_fence/Makefile new file mode 100644 index 0000000000..ecd6ef1771 --- /dev/null +++ b/sync_fence/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions From 40f846646bdf59667eb577a84fde8cd8e3cb9181 Mon Sep 17 00:00:00 2001 From: Sandeep Gangadharaiah Date: Tue, 8 Feb 2022 13:48:19 -0500 Subject: [PATCH 05/77] mm-drivers: msm_ext_display: export msm-ext-display module symbols msm_ext_display module symbols have to be exported before they can be used by external kernel modules. This change updates the makefile for the same. Change-Id: I86dbc2d8bbc0a3a0d640172ef0aebc03723eecc8 Signed-off-by: Sandeep Gangadharaiah --- msm_ext_display/Android.mk | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/msm_ext_display/Android.mk b/msm_ext_display/Android.mk index feff0e4139..78d659c784 100644 --- a/msm_ext_display/Android.mk +++ b/msm_ext_display/Android.mk @@ -18,6 +18,16 @@ KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR) KBUILD_OPTIONS += MODNAME=msm_ext_display KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +########################################################### +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm-ext-disp-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk ########################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) From 5b5214916c3fe0f104bdf59f5ca61b93e928051f Mon Sep 17 00:00:00 2001 From: Jeykumar Sankaran Date: Fri, 1 Apr 2022 19:07:41 -0700 Subject: [PATCH 06/77] mm-drivers: sync_fence: avoid compiling spec_fence driver for taro sync_fence driver is maintained in kernel SI for all the taro variants. Since the Display SI 3.0 is shared with taro dev SI variant, avoid compiling sync_fence as dlkm for taro target. Change-Id: Icc7990812256a42efad7a8945c08338f83ee0914 Signed-off-by: Jeykumar Sankaran --- Android.mk | 7 ++++++- mm_driver_board.mk | 15 +++++++++------ mm_driver_product.mk | 6 +++++- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/Android.mk b/Android.mk index 5053e7d643..d9bbda84c5 100644 --- a/Android.mk +++ b/Android.mk @@ -1 +1,6 @@ -include $(call all-subdir-makefiles) +MM_DRIVER_PATH := $(call my-dir) +include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk +ifneq ($(TARGET_BOARD_PLATFORM), taro) +include $(MM_DRIVER_PATH)/sync_fence/Android.mk +endif + diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 4ee3326519..0563c64f97 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -2,11 +2,14 @@ ifneq ($(TARGET_BOARD_AUTO),true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + + ifneq ($(TARGET_BOARD_PLATFORM), taro) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko + endif endif endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 2f0db285b4..4d74d27bf4 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,3 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -PRODUCT_PACKAGES += msm_ext_display.ko sync_fence.ko +PRODUCT_PACKAGES += msm_ext_display.ko + +ifneq ($(TARGET_BOARD_PLATFORM), taro) +PRODUCT_PACKAGES += sync_fence.ko +endif From 03173f7cc81b11d359203f7ece6f3e41e7b91dc7 Mon Sep 17 00:00:00 2001 From: Narendra Muppalla Date: Wed, 30 Mar 2022 12:14:59 -0700 Subject: [PATCH 07/77] mm-drivers: sync-fence: add changes to serialize fence operations This change acquires fence_lock to serialize the enable_sw signalling operation on dma_fence_array. It bails out safely if the bind operation is called twice on the spec fence. The error level for bind failure with invalid user fd is changed to warning as this case can be treated non fatal. Change-Id: I688cbc84ba3cfb49c54de9b5e1bf8a9ec9d8da3a Signed-off-by: Narendra Muppalla --- sync_fence/src/qcom_sync_file.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 3cb2178412..3c006cc35b 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -276,6 +276,13 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) ret = -EINVAL; goto end; } + + if (fence_array->fences) { + pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", + sync_bind_info->out_bind_fd, dma_fence_get_status(fence), fence->flags); + goto end; + } + num_fences = fence_array->num_fences; counter = num_fences; @@ -298,11 +305,12 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) goto out; } + spin_lock(fence->lock); fence_array->fences = fence_list; for (i = 0; i < num_fences; i++) { user_fence = sync_file_get_fence(user_fds[i]); if (!user_fence) { - pr_err("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", + pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", user_fds[i], sync_bind_info->out_bind_fd); counter = i; ret = -EINVAL; @@ -314,6 +322,7 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) } clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + spin_unlock(fence->lock); dma_fence_enable_sw_signaling(&fence_array->base); clear_fence_array_tracker(false); @@ -327,6 +336,7 @@ bind_invalid: fence_array->fences = NULL; fence_array->num_fences = 0; dma_fence_set_error(fence, -EINVAL); + spin_unlock(fence->lock); dma_fence_signal(fence); clear_fence_array_tracker(false); } From 7deaa672388e7433d849acca267e517fbc8f69d6 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 4 Feb 2022 14:14:18 -0800 Subject: [PATCH 08/77] mm-drivers: sync: add api to wait for sync fence bind This change adds an api to check if a given speculative fence is bound. If fence is not bound, it will wait for the speculative fence ioctl to bind the fence, or else timeout. Change-Id: I9a86d09df410e89137264be47763ae39f06eea2b Signed-off-by: Ingrid Gallardo --- config/kalamammdrivers.conf | 1 + config/kalamammdriversconf.h | 1 + sync_fence/Kbuild | 2 ++ sync_fence/src/qcom_sync_file.c | 35 ++++++++++++++++++++++++++++++++- 4 files changed, 38 insertions(+), 1 deletion(-) diff --git a/config/kalamammdrivers.conf b/config/kalamammdrivers.conf index 3df22e75d5..4f932958dd 100644 --- a/config/kalamammdrivers.conf +++ b/config/kalamammdrivers.conf @@ -3,3 +3,4 @@ # Copyright (c) 2020, The Linux Foundation. All rights reserved. export CONFIG_MSM_EXT_DISPLAY=y +export CONFIG_QCOM_SPEC_SYNC=y diff --git a/config/kalamammdriversconf.h b/config/kalamammdriversconf.h index 26ca25d02f..59c3a05b66 100644 --- a/config/kalamammdriversconf.h +++ b/config/kalamammdriversconf.h @@ -5,3 +5,4 @@ */ #define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_QCOM_SPEC_SYNC 1 diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild index 48cb10624b..b91ec8c93c 100644 --- a/sync_fence/Kbuild +++ b/sync_fence/Kbuild @@ -3,8 +3,10 @@ KDIR := $(TOP)/kernel_platform/msm-kernel LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ +ifdef CONFIG_QCOM_SPEC_SYNC obj-m += sync_fence.o sync_fence-y := src/qcom_sync_file.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +endif diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index 3c006cc35b..b3ecf4eb1f 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -20,13 +20,13 @@ #include #include #include +#include #define CLASS_NAME "sync" #define DRV_NAME "spec_sync" #define DRV_VERSION 1 #define NAME_LEN 32 -#define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10 /* user flags for debug */ #define FENCE_MIN 1 #define FENCE_MAX 32 @@ -44,6 +44,7 @@ struct sync_device { uint32_t version; struct mutex l_lock; struct list_head fence_array_list; + wait_queue_head_t wait_queue; }; struct fence_array_node { @@ -254,6 +255,34 @@ static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long _ return 0; } +int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms) +{ + int ret; + + /* Check if fence-array is a speculative fence */ + if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) { + pr_err("invalid fence!\n"); + return -EINVAL; + } else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) { + /* This fence-array is already bound, just return success */ + return 0; + } + + /* Wait for the fence-array bind */ + ret = wait_event_timeout(sync_dev.wait_queue, + test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags), + msecs_to_jiffies(timeout_ms)); + if (!ret) { + pr_err("timed out waiting for bind fence-array %d\n", timeout_ms); + ret = -ETIMEDOUT; + } else { + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL(spec_sync_wait_bind_array); + static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) { struct dma_fence_array *fence_array; @@ -328,6 +357,9 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) clear_fence_array_tracker(false); bind_invalid: + set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags); + wake_up_all(&sync_dev.wait_queue); + if (ret) { for (i = counter - 1; i >= 0; i--) dma_fence_put(fence_array->fences[i]); @@ -434,6 +466,7 @@ static int spec_sync_register_device(void) mutex_init(&sync_dev.lock); mutex_init(&sync_dev.l_lock); INIT_LIST_HEAD(&sync_dev.fence_array_list); + init_waitqueue_head(&sync_dev.wait_queue); return 0; From 136755f1817b4a7ccfe08365215b82e2934b2e64 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 18 Apr 2022 20:47:21 -0700 Subject: [PATCH 09/77] mm-drivers: sync: resolve compilation of sync fence driver Sync fence driver setup only compiles if the CONFIG flag for sync driver is set, however, this requires config files of the parent folder to be included, which currently is not happening. Resolve this problem by including the parent mm-drivers config files. Change-Id: I812612b71003ed007d60c046dcef5bcbe09f6e7c Signed-off-by: Ingrid Gallardo --- sync_fence/Kbuild | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild index b91ec8c93c..fd631a4348 100644 --- a/sync_fence/Kbuild +++ b/sync_fence/Kbuild @@ -2,6 +2,8 @@ KDIR := $(TOP)/kernel_platform/msm-kernel LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/ +include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h ifdef CONFIG_QCOM_SPEC_SYNC obj-m += sync_fence.o From 77ae3f31f06e55407773e9546e607972164880d6 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 17 Nov 2021 17:16:04 -0800 Subject: [PATCH 10/77] mm-drivers: hw_fence: Add support for hw-fence driver This change adds support for the hw-fence driver that initialize, expose and manage the interfaces for the hw-fences, which are the synchronization primitives to allow the hardware to hardware signalization of the fences for the frame buffers shared between gpu and display hw-cores. Change-Id: If2313585d5a9f3ac90e16aad3464600641a6fa04 Signed-off-by: Ingrid Gallardo --- Android.mk | 1 + config/kalamammdrivers.conf | 1 + config/kalamammdriversconf.h | 1 + hw_fence/Android.mk | 41 + hw_fence/Kbuild | 18 + hw_fence/Makefile | 14 + hw_fence/include/hw_fence_drv_debug.h | 61 ++ hw_fence/include/hw_fence_drv_ipc.h | 90 ++ hw_fence/include/hw_fence_drv_priv.h | 386 ++++++++ hw_fence/include/hw_fence_drv_utils.h | 113 +++ hw_fence/src/hw_fence_drv_debug.c | 1000 +++++++++++++++++++ hw_fence/src/hw_fence_drv_ipc.c | 247 +++++ hw_fence/src/hw_fence_drv_priv.c | 1317 +++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_utils.c | 644 ++++++++++++ hw_fence/src/msm_hw_fence.c | 486 +++++++++ mm_driver_board.mk | 9 +- mm_driver_product.mk | 2 +- 17 files changed, 4427 insertions(+), 4 deletions(-) create mode 100644 hw_fence/Android.mk create mode 100644 hw_fence/Kbuild create mode 100644 hw_fence/Makefile create mode 100644 hw_fence/include/hw_fence_drv_debug.h create mode 100644 hw_fence/include/hw_fence_drv_ipc.h create mode 100644 hw_fence/include/hw_fence_drv_priv.h create mode 100644 hw_fence/include/hw_fence_drv_utils.h create mode 100644 hw_fence/src/hw_fence_drv_debug.c create mode 100644 hw_fence/src/hw_fence_drv_ipc.c create mode 100644 hw_fence/src/hw_fence_drv_priv.c create mode 100644 hw_fence/src/hw_fence_drv_utils.c create mode 100644 hw_fence/src/msm_hw_fence.c diff --git a/Android.mk b/Android.mk index d9bbda84c5..c703795324 100644 --- a/Android.mk +++ b/Android.mk @@ -1,5 +1,6 @@ MM_DRIVER_PATH := $(call my-dir) include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk +include $(MM_DRIVER_PATH)/hw_fence/Android.mk ifneq ($(TARGET_BOARD_PLATFORM), taro) include $(MM_DRIVER_PATH)/sync_fence/Android.mk endif diff --git a/config/kalamammdrivers.conf b/config/kalamammdrivers.conf index 4f932958dd..4e657d38be 100644 --- a/config/kalamammdrivers.conf +++ b/config/kalamammdrivers.conf @@ -4,3 +4,4 @@ export CONFIG_MSM_EXT_DISPLAY=y export CONFIG_QCOM_SPEC_SYNC=y +export CONFIG_QTI_HW_FENCE=y diff --git a/config/kalamammdriversconf.h b/config/kalamammdriversconf.h index 59c3a05b66..b9cb331bda 100644 --- a/config/kalamammdriversconf.h +++ b/config/kalamammdriversconf.h @@ -6,3 +6,4 @@ #define CONFIG_MSM_EXT_DISPLAY 1 #define CONFIG_QCOM_SPEC_SYNC 1 +#define CONFIG_QTI_HW_FENCE 1 diff --git a/hw_fence/Android.mk b/hw_fence/Android.mk new file mode 100644 index 0000000000..bad9f10b96 --- /dev/null +++ b/hw_fence/Android.mk @@ -0,0 +1,41 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_hw_fence +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := hw-fence-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_hw_fence.ko +LOCAL_MODULE_KBUILD_NAME := msm_hw_fence.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild new file mode 100644 index 0000000000..fcd6b6e7bb --- /dev/null +++ b/hw_fence/Kbuild @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel +include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf +LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ + +ifdef CONFIG_QTI_HW_FENCE +obj-m += msm_hw_fence.o + +msm_hw_fence-y := src/msm_hw_fence.o \ + src/hw_fence_drv_priv.o \ + src/hw_fence_drv_utils.o \ + src/hw_fence_drv_debug.o \ + src/hw_fence_drv_ipc.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +endif diff --git a/hw_fence/Makefile b/hw_fence/Makefile new file mode 100644 index 0000000000..ac6afd73be --- /dev/null +++ b/hw_fence/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h new file mode 100644 index 0000000000..d980331113 --- /dev/null +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_DEBUG +#define __HW_FENCE_DRV_DEBUG + +enum hw_fence_drv_prio { + HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */ + HW_FENCE_LOW = 0x000002, /* Low density debug messages */ + HW_FENCE_INFO = 0x000004, /* Informational prints */ + HW_FENCE_INIT = 0x00008, /* Initialization logs */ + HW_FENCE_QUEUE = 0x000010, /* Queue logs */ + HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */ + HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */ + HW_FENCE_PRINTK = 0x010000, +}; + +extern u32 msm_hw_fence_debug_level; + +#define dprintk(__level, __fmt, ...) \ + do { \ + if (msm_hw_fence_debug_level & __level) \ + if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \ + pr_err(__fmt, ##__VA_ARGS__); \ + } while (0) + + +#define HWFNC_ERR(fmt, ...) \ + pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +#define HWFNC_DBG_H(fmt, ...) \ + dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_L(fmt, ...) \ + dprintk(HW_FENCE_LOW, "[hwfence:%s:%d][dbgl]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_INFO(fmt, ...) \ + dprintk(HW_FENCE_INFO, "[hwfence:%s:%d][dbgi]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_INIT(fmt, ...) \ + dprintk(HW_FENCE_INIT, "[hwfence:%s:%d][dbg]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_Q(fmt, ...) \ + dprintk(HW_FENCE_QUEUE, "[hwfence:%s:%d][dbgq]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_LUT(fmt, ...) \ + dprintk(HW_FENCE_LUT, "[hwfence:%s:%d][dbglut]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_DBG_IRQ(fmt, ...) \ + dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define HWFNC_WARN(fmt, ...) \ + pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ + __builtin_return_address(0), ##__VA_ARGS__) + +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); + +#endif /* __HW_FENCE_DRV_DEBUG */ diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h new file mode 100644 index 0000000000..c24781ac36 --- /dev/null +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_IPC_H +#define __HW_FENCE_DRV_IPC_H + +#define HW_FENCE_IPC_CLIENT_ID_APPS 8 +#define HW_FENCE_IPC_CLIENT_ID_GPU 9 +#define HW_FENCE_IPC_CLIENT_ID_DPU 25 + +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA 2 + +#define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */ +#define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */ +#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kailua */ + +#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \ + (base + 0x14 + (0x40000*p) + (0x1000*c)) +#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c)) + +/** + * hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair. + * @drv_data: driver data. + * @tx_client_id: ipc client id that sends the ipc signal. + * @rx_client_id: ipc client id that receives the ipc signal. + * @signal_id: signal id to send. + * + * This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id' + */ +void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, + u32 tx_client_id, u32 rx_client_id, u32 signal_id); + +/** + * hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver. + * @drv_data: driver data. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); + +#ifdef HW_DPU_IPCC +/** + * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client. + * @drv_data: driver data. + * + * Return: 0 on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); +#endif /* HW_DPU_IPCC */ + +/** + * hw_fence_ipcc_get_client_id() - Returns the ipc client id that corresponds to the hw fence + * driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc client id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence + * driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc signal id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs to update rxq, false otherwise + */ +bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); + +#endif /* __HW_FENCE_DRV_IPC_H */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h new file mode 100644 index 0000000000..e15fd4159c --- /dev/null +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_INTERNAL_H +#define __HW_FENCE_DRV_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include + +/* Add define only for platforms that support IPCC in dpu-hw */ +#define HW_DPU_IPCC 1 + +/* max u64 to indicate invalid fence */ +#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) + +/* hash algorithm constants */ +#define HW_FENCE_HASH_A_MULT 4969 /* a multiplier for Hash algorithm */ +#define HW_FENCE_HASH_C_MULT 907 /* c multiplier for Hash algorithm */ + +/* number of queues per type (i.e. ctrl or client queues) */ +#define HW_FENCE_CTRL_QUEUES 2 /* Rx and Tx Queues */ +#define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */ + +/* hfi headers calculation */ +#define HW_FENCE_HFI_TABLE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_table_header)) +#define HW_FENCE_HFI_QUEUE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_header)) + +#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES)) + +#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CLIENT_QUEUES)) + +/* + * Max Payload size is the bigest size of the message that we can have in the CTRL queue + * in this case the max message is calculated like following, using 32-bits elements: + * 1 header + 1 msg-type + 1 client_id + 2 hash + 1 error + */ +#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE ((1 + 1 + 1 + 2 + 1) * sizeof(u32)) + +#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE +#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload)) + +/* Locks area for all the clients */ +#define HW_FENCE_MEM_LOCKS_SIZE (sizeof(u64) * (HW_FENCE_CLIENT_MAX - 1)) + +#define HW_FENCE_TX_QUEUE 1 +#define HW_FENCE_RX_QUEUE 2 + +/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */ +#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0) + +/** + * msm hw fence flags: + * MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled + */ +#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0) + +/** + * MSM_HW_FENCE_MAX_JOIN_PARENTS: + * Maximum number of parents that a fence can have for a join-fence + */ +#define MSM_HW_FENCE_MAX_JOIN_PARENTS 3 + +enum hw_fence_lookup_ops { + HW_FENCE_LOOKUP_OP_CREATE = 0x1, + HW_FENCE_LOOKUP_OP_DESTROY, + HW_FENCE_LOOKUP_OP_CREATE_JOIN, + HW_FENCE_LOOKUP_OP_FIND_FENCE +}; + +/** + * enum hw_fence_loopback_id - Enum with the clients having a loopback signal (i.e AP to AP signal). + * HW_FENCE_LOOPBACK_DPU_CTL_0: dpu client 0. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_1: dpu client 1. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_2: dpu client 2. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_3: dpu client 3. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc. + * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support. + */ +enum hw_fence_loopback_id { + HW_FENCE_LOOPBACK_DPU_CTL_0, + HW_FENCE_LOOPBACK_DPU_CTL_1, + HW_FENCE_LOOPBACK_DPU_CTL_2, + HW_FENCE_LOOPBACK_DPU_CTL_3, + HW_FENCE_LOOPBACK_DPU_CTL_4, + HW_FENCE_LOOPBACK_DPU_CTL_5, + HW_FENCE_LOOPBACK_GFX_CTX_0, + HW_FENCE_LOOPBACK_MAX, +}; + +#define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1) + +/** + * struct msm_hw_fence_queue - Structure holding the data of the hw fence queues. + * @va_queue: pointer to the virtual address of the queue elements + * @q_size_bytes: size of the queue + * @va_header: pointer to the hfi header virtual address + * @pa_queue: physical address of the queue + */ +struct msm_hw_fence_queue { + void *va_queue; + u32 q_size_bytes; + void *va_header; + phys_addr_t pa_queue; +}; + +/** + * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. + * @client_id: id of the client + * @mem_descriptor: hfi header memory descriptor + * @queues: queues descriptor + * @ipc_signal_id: id of the signal to be triggered for this client + * @ipc_client_id: id of the ipc client for this hw fence driver client + * @update_rxq: bool to indicate if client uses rx-queue + */ +struct msm_hw_fence_client { + enum hw_fence_client_id client_id; + struct msm_hw_fence_mem_addr mem_descriptor; + struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; + int ipc_signal_id; + int ipc_client_id; + bool update_rxq; +}; + +/** + * struct msm_hw_fence_mem_data - Structure holding internal memory attributes + * + * @attrs: attributes for the memory allocation + */ +struct msm_hw_fence_mem_data { + unsigned long attrs; +}; + +/** + * struct msm_hw_fence_dbg_data - Structure holding debugfs data + * + * @root: debugfs root + * @entry_rd: flag to indicate if debugfs dumps a single line or table + * @context_rd: debugfs setting to indicate which context id to dump + * @seqno_rd: debugfs setting to indicate which seqno to dump + * @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the + * hw-fences behavior, to release the hw-fences + * @create_hw_fences: boolean to continuosly create hw-fences within debugfs + * @clients_list: list of debug clients registered + * @clients_list_lock: lock to synchronize access to the clients list + */ +struct msm_hw_fence_dbg_data { + struct dentry *root; + + bool entry_rd; + u64 context_rd; + u64 seqno_rd; + + u32 hw_fence_sim_release_delay; + bool create_hw_fences; + + struct list_head clients_list; + struct mutex clients_list_lock; +}; + +/** + * struct hw_fence_driver_data - Structure holding internal hw-fence driver data + * + * @dev: device driver pointer + * @resources_ready: value set by driver at end of probe, once all resources are ready + * @hw_fence_table_entries: total number of hw-fences in the global table + * @hw_fence_mem_fences_table_size: hw-fences global table total size + * @hw_fence_queue_entries: total number of entries that can be available in the queue + * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload + * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq + * @hw_fence_client_queue_size: size of the client queue for the payload + * @hw_fence_mem_clients_queues_size: total size of client queues, including: header + rxq + txq + * @hw_fences_tbl: pointer to the hw-fences table + * @hw_fences_tbl_cnt: number of elements in the hw-fence table + * @client_lock_tbl: pointer to the per-client locks table + * @client_lock_tbl_cnt: number of elements in the locks table + * @hw_fences_mem_desc: memory descriptor for the hw-fence table + * @clients_locks_mem_desc: memory descriptor for the locks table + * @ctrl_queue_mem_desc: memory descriptor for the ctrl queues + * @ctrl_queues: pointer to the ctrl queues + * @io_mem_base: pointer to the carved-out io memory + * @res: resources for the carved out memory + * @size: size of the carved-out memory + * @label: label for the carved-out memory (this is used by SVM to find the memory) + * @peer_name: peer name for this carved-out memory + * @rm_nb: hyp resource manager notifier + * @memparcel: memparcel for the allocated memory + * @db_label: doorbell label + * @rx_dbl: handle to the Rx doorbell + * @debugfs_data: debugfs info + * @ipcc_reg_base: base for ipcc regs mapping + * @ipcc_io_mem: base for the ipcc io mem map + * @ipcc_size: size of the ipcc io mem mapping + * @protocol_id: ipcc protocol id used by this driver + * @ipcc_client_id: ipcc client id for this driver + * @ipc_clients_table: table with the ipcc mapping for each client of this driver + * @qtime_reg_base: qtimer register base address + * @qtime_io_mem: qtimer io mem map + * @qtime_size: qtimer io mem map size + * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc) + * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) + * @client_id_mask: bitmask for tracking registered client_ids + * @clients_mask_lock: lock to synchronize access to the clients mask + * @msm_hw_fence_client: table with the handles of the registered clients + * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized + */ +struct hw_fence_driver_data { + + struct device *dev; + bool resources_ready; + + /* Table & Queues info */ + u32 hw_fence_table_entries; + u32 hw_fence_mem_fences_table_size; + u32 hw_fence_queue_entries; + /* ctrl queues */ + u32 hw_fence_ctrl_queue_size; + u32 hw_fence_mem_ctrl_queues_size; + /* client queues */ + u32 hw_fence_client_queue_size; + u32 hw_fence_mem_clients_queues_size; + + /* HW Fences Table VA */ + struct msm_hw_fence *hw_fences_tbl; + u32 hw_fences_tbl_cnt; + + /* Table with a Per-Client Lock */ + u64 *client_lock_tbl; + u32 client_lock_tbl_cnt; + + /* Memory Descriptors */ + struct msm_hw_fence_mem_addr hw_fences_mem_desc; + struct msm_hw_fence_mem_addr clients_locks_mem_desc; + struct msm_hw_fence_mem_addr ctrl_queue_mem_desc; + struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES]; + + /* carved out memory */ + void __iomem *io_mem_base; + struct resource res; + size_t size; + u32 label; + u32 peer_name; + struct notifier_block rm_nb; + u32 memparcel; + + /* doorbell */ + u32 db_label; + + /* VM virq */ + void *rx_dbl; + + /* debugfs */ + struct msm_hw_fence_dbg_data debugfs_data; + + /* ipcc regs */ + phys_addr_t ipcc_reg_base; + void __iomem *ipcc_io_mem; + uint32_t ipcc_size; + u32 protocol_id; + u32 ipcc_client_id; + + /* table with mapping of ipc client for each hw-fence client */ + struct hw_fence_client_ipc_map *ipc_clients_table; + + /* qtime reg */ + phys_addr_t qtime_reg_base; + void __iomem *qtime_io_mem; + uint32_t qtime_size; + + /* base address for dpu ctl start regs */ + void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; + uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; + + /* bitmask for tracking registered client_ids */ + u64 client_id_mask; + struct mutex clients_mask_lock; + + /* table with registered client handles */ + struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; +#ifdef HW_DPU_IPCC + /* state variables */ + bool ipcc_dpu_initialized; +#endif /* HW_DPU_IPCC */ +}; + +/** + * struct msm_hw_fence_queue_payload - hardware fence clients queues payload. + * @ctxt_id: context id of the dma fence + * @seqno: sequence number of the dma fence + * @hash: fence hash + * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions + * @error: error code for this fence, fence controller receives this + * error from the signaling client through the tx queue and + * propagates the error to the waiting client through rx queue + */ +struct msm_hw_fence_queue_payload { + u64 ctxt_id; + u64 seqno; + u64 hash; + u64 flags; + u32 error; + u32 unused; /* align to 64-bit */ +}; + +/** + * struct msm_hw_fence - structure holding each hw fence data. + * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use + * @error: field to hold a hw-fence error + * @ctx_id: context id + * @seq_id: sequence id + * @wait_client_mask: bitmask holding the waiting-clients of the fence + * @fence_allocator: field to indicate the client_id that reserved the fence + * @fence_signal-client: + * @lock: this field is required to share information between the Driver & Driver || + * Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock. + * @flags: field to indicate the state of the fence + * @parent_list: list of indexes with the parents for a child-fence in a join-fence + * @parent_cnt: total number of parents for a child-fence in a join-fence + * @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic + * or locked + * @fence_create_time: debug info with the create time timestamp + * @fence_trigger_time: debug info with the trigger time timestamp + * @fence_wait_time: debug info with the register-for-wait timestamp + * @debug_refcount: refcount used for debugging + */ +struct msm_hw_fence { + u32 valid; + u32 error; + u64 ctx_id; + u64 seq_id; + u64 wait_client_mask; + u32 fence_allocator; + u32 fence_signal_client; + u64 lock; /* Datatype must be 64-bit. */ + u64 flags; + u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS]; + u32 parents_cnt; + u32 pending_child_cnt; + u64 fence_create_time; + u64 fence_trigger_time; + u64 fence_wait_time; + u64 debug_refcount; +}; + +int hw_fence_init(struct hw_fence_driver_data *drv_data); +int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_mem_addr *mem_descriptor); +int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client); +void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); +int hw_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash); +int hw_fence_destroy(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno); +int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence_array *array); +int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence); +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u32 error, int queue_type); +inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); +int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_queue_payload *payload, int queue_type); +int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); +struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash); + +#endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h new file mode 100644 index 0000000000..092bb625cf --- /dev/null +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __HW_FENCE_DRV_UTILS_H +#define __HW_FENCE_DRV_UTILS_H + +/** + * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. + * HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues. + * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region. + * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table. + * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues. + */ +enum hw_fence_mem_reserve { + HW_FENCE_MEM_RESERVE_CTRL_QUEUE, + HW_FENCE_MEM_RESERVE_LOCKS_REGION, + HW_FENCE_MEM_RESERVE_TABLE, + HW_FENCE_MEM_RESERVE_CLIENT_QUEUE +}; + +/** + * global_atomic_store() - Inter-processor lock + * @lock: memory to lock + * @val: if true, api locks the memory, if false it unlocks the memory + */ +void global_atomic_store(uint64_t *lock, bool val); + +/** + * hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_process_doorbell_mask() - Sends doorbell mask to process the signaled clients + * this API is only exported for simulation purposes. + * @drv_data: hw fence driver data. + * @db_flags: doorbell flag + */ +void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags); + +/** + * hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW + * Fence global table, locks and queues. + * @hw_fence_drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data); + +/** + * hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool. + * @drv_data: hw fence driver data. + * @type: memory reservation type. + * @phys: physical address of the carved-out memory pool + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id); + +/** + * hw_fence_utils_parse_dt_props() - Init dt properties + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_ipcc() - Maps IPCC registers and enable signaling + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_qtime() - Maps qtime register + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_map_ctl_start() - Maps ctl_start registers from dpu hw + * @drv_data: hw fence driver data + * + * Returns zero if success, otherwise returns negative error code. This API is only used + * for simulation purposes in platforms where dpu does not support ipc signal. + */ +int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data); + +/** + * hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client + * @drv_data: hw fence driver data + * @hw_fence_client: client, for which the fence must be cleared + * @hw_fence: hw-fence to cleanup + * @hash: hash of the hw-fence to cleanup + * @reset_flags: flags to determine how to handle the reset + * + * Returns zero if success, otherwise returns negative error code. + */ +int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u32 reset_flags); + +#endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c new file mode 100644 index 0000000000..c047a3b251 --- /dev/null +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_utils.h" + +#define HW_FENCE_NAME_SIZE 64 +#define HW_FENCE_DEBUG_MAX_LOOPS 200 + +u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; + +/** + * struct client_data - Structure holding the data of the debug clients. + * + * @client_id: client id. + * @dma_context: context id to create the dma-fences for the client. + * @seqno_cnt: sequence number, this is a counter to simulate the seqno for debugging. + * @client_handle: handle for the client, this is returned by the hw-fence driver after + * a successful registration of the client. + * @mem_descriptor: memory descriptor for the client-queues. This is populated by the hw-fence + * driver after a successful registration of the client. + * @list: client node. + */ +struct client_data { + int client_id; + u64 dma_context; + u64 seqno_cnt; + void *client_handle; + struct msm_hw_fence_mem_addr mem_descriptor; + struct list_head list; +}; + +/** + * struct hw_dma_fence - fences created by hw-fence for debugging. + * @base: base dma-fence structure, this must remain at beginning of the struct. + * @name: name of each fence. + * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence + * driver after a successful registration of the client and used by this fence + * during release. + */ +struct hw_dma_fence { + struct dma_fence base; + char name[HW_FENCE_NAME_SIZE]; + void *client_handle; +}; + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static int _get_debugfs_input_client(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos, + struct hw_fence_driver_data **drv_data) +{ + char buf[10]; + int client_id; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", !file); + return -EINVAL; + } + *drv_data = file->private_data; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &client_id)) + return -EFAULT; + + if (client_id < HW_FENCE_CLIENT_ID_CTX0 || client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, + HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_MAX); + return -EINVAL; + } + + return client_id; +} + +static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos, u32 tx_client, u32 rx_client) +{ + struct hw_fence_driver_data *drv_data; + int client_id, signal_id; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + /* Get signal-id that hw-fence driver would trigger for this client */ + signal_id = hw_fence_ipcc_get_signal_id(drv_data, client_id); + if (signal_id < 0) + return -EINVAL; + + HWFNC_DBG_IRQ("client_id:%d ipcc write tx_client:%d rx_client:%d signal_id:%d qtime:%llu\n", + client_id, tx_client, rx_client, signal_id, hw_fence_get_qtime(drv_data)); + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + return count; +} + +/** + * hw_fence_dbg_ipcc_write() - debugfs write to trigger an ipcc irq. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal + * from apps to apps for that client id. + */ +static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, + HW_FENCE_IPC_CLIENT_ID_APPS); +} + +#ifdef HW_DPU_IPCC +/** + * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal + * from apps to dpu for that client id. + */ +static ssize_t hw_fence_dbg_ipcc_dpu_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, + HW_FENCE_IPC_CLIENT_ID_DPU); + +} + +static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = { + .open = simple_open, + .write = hw_fence_dbg_ipcc_dpu_write, +}; +#endif /* HW_DPU_IPCC */ + +static const struct file_operations hw_fence_dbg_ipcc_fops = { + .open = simple_open, + .write = hw_fence_dbg_ipcc_write, +}; + +static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) +{ + return container_of(fence, struct hw_dma_fence, base); +} + +static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) +{ + if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { + HWFNC_ERR("invalid hwfence data, won't release hw_fence\n"); + return; + } + + /* release hw-fence */ + if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) + HWFNC_ERR("failed to release hw_fence\n"); +} + +static void hw_fence_dbg_release(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence; + + if (!fence) + return; + + HWFNC_DBG_H("release backing fence %pK\n", fence); + hw_dma_fence = to_hw_dma_fence(fence); + + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) + _hw_fence_release(hw_dma_fence); + + kfree(fence->lock); + kfree(hw_dma_fence); +} + +static struct dma_fence_ops hw_fence_dbg_ops = { + .get_driver_name = hw_fence_dbg_get_driver_name, + .get_timeline_name = hw_fence_dbg_get_timeline_name, + .enable_signaling = hw_fence_dbg_enable_signaling, + .wait = dma_fence_default_wait, + .release = hw_fence_dbg_release, +}; + +struct client_data *_get_client_node(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + struct client_data *node = NULL; + bool found = false; + + mutex_lock(&drv_data->debugfs_data.clients_list_lock); + list_for_each_entry(node, &drv_data->debugfs_data.clients_list, list) { + if (node->client_id == client_id) { + found = true; + break; + } + } + mutex_unlock(&drv_data->debugfs_data.clients_list_lock); + + return found ? node : NULL; +} + +/** + * hw_fence_dbg_reset_client_wr() - debugfs write to trigger reset in a debug hw-fence client. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id, and triggers a reset for + * this client. Note that this operation will only perform on hw-fence clients created through + * the debug framework. + */ +static ssize_t hw_fence_dbg_reset_client_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int client_id, ret; + struct client_data *client_info; + struct hw_fence_driver_data *drv_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + client_info = _get_client_node(drv_data, client_id); + if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("client:%d not registered as debug client\n", client_id); + return -EINVAL; + } + + HWFNC_DBG_H("resetting client: %d\n", client_id); + ret = msm_hw_fence_reset_client(client_info->client_handle, 0); + if (ret) + HWFNC_ERR("failed to reset client:%d\n", client_id); + + return count; +} + +/** + * hw_fence_dbg_register_clients_wr() - debugfs write to register a client with the hw-fence + * driver for debugging. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter a hw-fence driver client_id to register for debug. + * Note that if the client_id received was already registered by any other driver, the + * registration here will fail. + */ +static ssize_t hw_fence_dbg_register_clients_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + int client_id; + struct client_data *client_info; + struct hw_fence_driver_data *drv_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + /* we cannot create same debug client twice */ + if (_get_client_node(drv_data, client_id)) { + HWFNC_ERR("client:%d already registered as debug client\n", client_id); + return -EINVAL; + } + + client_info = kzalloc(sizeof(*client_info), GFP_KERNEL); + if (!client_info) + return -ENOMEM; + + HWFNC_DBG_H("register client %d\n", client_id); + client_info->client_handle = msm_hw_fence_register(client_id, + &client_info->mem_descriptor); + if (IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("error registering as debug client:%d\n", client_id); + client_info->client_handle = NULL; + return -EFAULT; + } + + client_info->dma_context = dma_fence_context_alloc(1); + client_info->client_id = client_id; + + mutex_lock(&drv_data->debugfs_data.clients_list_lock); + list_add(&client_info->list, &drv_data->debugfs_data.clients_list); + mutex_unlock(&drv_data->debugfs_data.clients_list_lock); + + return count; +} + +struct hw_fence_out_clients_map { + int ipc_client_id; /* ipc client id for the hw fence client */ + int ipc_signal_id; /* ipc signal id for the hw fence client */ +}; + +/* NOTE: These signals are the ones that the actual clients should be triggering, hw-fence driver + * does not need to have knowledge of these signals. Adding them here for debugging purposes. + * Only fence controller and the cliens know these id's, since these + * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_out_clients_map dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 12} /* CTL5 */ +}; + +/** + * hw_fence_dbg_tx_and_signal_clients_wr() - debugfs write to simulate the lifecycle of a hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter the number of iterations that the simulation will run, + * each iteration will: create, signal, register-for-signal and destroy a hw-fence. + * Note that this simulation relies in the user first registering the clients as debug-clients + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously + * registered as debug-clients, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + u32 input_data, client_id_src, client_id_dst, tx_client, rx_client; + struct client_data *client_info_src, *client_info_dst; + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_client *hw_fence_client, *hw_fence_client_dst; + u64 context, seqno, hash; + char buf[10]; + int signal_id, ret; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (kstrtouint(buf, 0, &input_data)) + return -EFAULT; + + if (input_data <= 0) { + HWFNC_ERR("won't do anything, write value greather than 0 to start..\n"); + return 0; + } else if (input_data > HW_FENCE_DEBUG_MAX_LOOPS) { + HWFNC_ERR("requested loops:%d exceed max:%d, setting max\n", input_data, + HW_FENCE_DEBUG_MAX_LOOPS); + input_data = HW_FENCE_DEBUG_MAX_LOOPS; + } + + client_id_src = HW_FENCE_CLIENT_ID_CTL0; + client_id_dst = HW_FENCE_CLIENT_ID_CTL1; + + client_info_src = _get_client_node(drv_data, client_id_src); + client_info_dst = _get_client_node(drv_data, client_id_dst); + + if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) || + !client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) { + /* Make sure we registered this client through debugfs */ + HWFNC_ERR("client_id_src:%d or client_id_dst:%d not registered as debug client!\n", + client_id_src, client_id_dst); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle; + hw_fence_client_dst = (struct msm_hw_fence_client *)client_info_dst->client_handle; + + while (drv_data->debugfs_data.create_hw_fences && input_data > 0) { + + /***********************************************************/ + /***** SRC CLIENT - CREATE HW FENCE & TX QUEUE UPDATE ******/ + /***********************************************************/ + + /* we will use the context and the seqno of the source client */ + context = client_info_src->dma_context; + seqno = client_info_src->seqno_cnt; + + /* linear increment of the seqno for the src client*/ + client_info_src->seqno_cnt++; + + /* Create hw fence for src client */ + ret = hw_fence_create(drv_data, hw_fence_client, context, seqno, &hash); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + goto exit; + } + + /* Write to Tx queue */ + hw_fence_update_queue(drv_data, hw_fence_client, context, seqno, hash, + 0, 0, HW_FENCE_TX_QUEUE - 1); // no flags and no error + + /**********************************************/ + /***** DST CLIENT - REGISTER WAIT CLIENT ******/ + /**********************************************/ + /* use same context and seqno that src client used to create fence */ + ret = hw_fence_register_wait_client(drv_data, hw_fence_client_dst, context, seqno); + if (ret) { + HWFNC_ERR("failed to register for wait\n"); + return -EINVAL; + } + + /*********************************************/ + /***** SRC CLIENT - TRIGGER IPCC SIGNAL ******/ + /*********************************************/ + + /* AFTER THIS IS WHEN SVM WILL GET CALLED AND WILL PROCESS SRC AND DST CLIENTS */ + + /* Trigger IPCC for SVM to read the queue */ + + /* Get signal-id that hw-fence driver would trigger for this client */ + signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; + if (signal_id < 0) + return -EINVAL; + + /* Write to ipcc to trigger the irq */ + tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + HWFNC_DBG_IRQ("client:%d tx_client:%d rx_client:%d signal:%d delay:%d in_data%d\n", + client_id_src, tx_client, rx_client, signal_id, + drv_data->debugfs_data.hw_fence_sim_release_delay, input_data); + + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + /********************************************/ + /******** WAIT ******************************/ + /********************************************/ + + /* wait between iterations */ + usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, + (drv_data->debugfs_data.hw_fence_sim_release_delay + 5)); + + /******************************************/ + /***** SRC CLIENT - CLEANUP HW FENCE ******/ + /******************************************/ + + /* cleanup hw fence for src client */ + ret = hw_fence_destroy(drv_data, hw_fence_client, context, seqno); + if (ret) { + HWFNC_ERR("Error destroying HW fence\n"); + goto exit; + } + + input_data--; + } /* LOOP.. */ + +exit: + return count; +} + +/** + * hw_fence_dbg_create_wr() - debugfs write to simulate the creation of a hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameter the client-id, for which the hw-fence will be created. + * Note that this simulation relies in the user first registering the client as a debug-client + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the client is not previously + * registered as debug-client, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_create_wr(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct msm_hw_fence_create_params params; + struct hw_fence_driver_data *drv_data; + struct client_data *client_info; + struct hw_dma_fence *dma_fence; + spinlock_t *fence_lock; + static u64 hw_fence_dbg_seqno = 1; + int client_id, ret; + u64 hash; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + client_info = _get_client_node(drv_data, client_id); + if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) { + HWFNC_ERR("client:%d not registered as debug client\n", client_id); + return -EINVAL; + } + + /* create debug dma_fence */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return -ENOMEM; + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + kfree(fence_lock); + return -ENOMEM; + } + + snprintf(dma_fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu", + client_id, client_info->dma_context, hw_fence_dbg_seqno); + + spin_lock_init(fence_lock); + dma_fence_init(&dma_fence->base, &hw_fence_dbg_ops, fence_lock, + client_info->dma_context, hw_fence_dbg_seqno); + + HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", client_id, + client_info->dma_context, hw_fence_dbg_seqno); + params.fence = &dma_fence->base; + params.handle = &hash; + ret = msm_hw_fence_create(client_info->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", + client_id, client_info->dma_context, hw_fence_dbg_seqno); + dma_fence_put(&dma_fence->base); + return -EINVAL; + } + hw_fence_dbg_seqno++; + + /* keep handle in dma_fence, to destroy hw-fence during release */ + dma_fence->client_handle = client_info->client_handle; + + return count; +} + +#define HFENCE_TBL_MSG \ + "[%d]hfence[%d] v:%d err:%d ctx:%d seqno:%d wait:0x%llx alloc:%d f:0x%lx tt:%llu wt:%llu\n" + +static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size, + u32 index, u32 cnt) +{ + int ret; + + ret = scnprintf(buf + len, max_size - len, HFENCE_TBL_MSG, + cnt, index, hw_fence->valid, hw_fence->error, + hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, + hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + + HWFNC_DBG_L(HFENCE_TBL_MSG, + cnt, index, hw_fence->valid, hw_fence->error, + hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, + hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + + return ret; +} + +static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, + int max_size) +{ + struct msm_hw_fence *hw_fence; + u64 context, seqno, hash = 0; + int len = 0; + + context = drv_data->debugfs_data.context_rd; + seqno = drv_data->debugfs_data.seqno_rd; + + hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu", context, seqno, hash); + len = scnprintf(buf + len, max_size - len, + "no valid hfence found for context:%lu seqno:%lu hash:%lu\n", + context, seqno, hash); + + goto exit; + } + + len = _dump_fence(hw_fence, buf, len, max_size, hash, 0); + +exit: + /* move idx to end of table to stop the dump */ + *index = drv_data->hw_fences_tbl_cnt; + + return len; +} + +static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, + u32 *cnt, int max_size, int entry_size) +{ + struct msm_hw_fence *hw_fence; + int len = 0; + + while (((*index)++ < drv_data->hw_fences_tbl_cnt) && (len < (max_size - entry_size))) { + hw_fence = &drv_data->hw_fences_tbl[*index]; + + if (!hw_fence->valid) + continue; + + len += _dump_fence(hw_fence, buf, len, max_size, *index, *cnt); + (*cnt)++; + } + + return len; +} + +/** + * hw_fence_dbg_dump_table_rd() - debugfs read to dump the hw-fences table. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps the hw-fence table. By default debugfs will dump all the valid entries of the + * whole table. However, if user only wants to dump only one particular entry, user can provide the + * context-id and seqno of the dma-fence of interest by writing to this debugfs node (see + * documentation for the write in 'hw_fence_dbg_dump_table_wr'). + */ +static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + int entry_size = sizeof(struct msm_hw_fence); + char *buf = NULL; + int len = 0, max_size = SZ_4K; + static u32 index, cnt; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (!drv_data->hw_fences_tbl) { + HWFNC_ERR("Failed to dump table: Null fence table\n"); + return -EINVAL; + } + + if (index >= drv_data->hw_fences_tbl_cnt) { + HWFNC_DBG_H("no more data index:%d cnt:%d\n", index, drv_data->hw_fences_tbl_cnt); + index = cnt = 0; + return 0; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = drv_data->debugfs_data.entry_rd ? + dump_single_entry(drv_data, buf, &index, max_size) : + dump_full_table(drv_data, buf, &index, &cnt, max_size, entry_size); + + if (len <= 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size); + len = 0; + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kfree(buf); + return len; +} + +/** + * hw_fence_dbg_dump_table_wr() - debugfs write to control the dump of the hw-fences table. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs receives as parameters the settings to dump either the whole hw-fences table + * or only one element on the table in the next read of the same debugfs node. + * If this debugfs receives two input values, it will interpret them as the 'context-id' and the + * 'sequence-id' to dump from the hw-fence table in the subsequent reads of the debugfs. + * Otherwise, if the debugfs receives only one input value, the next read from the debugfs, will + * dump the whole hw-fences table. + */ +static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, + const char __user *user_buf, size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + u64 param_0, param_1; + char buf[24]; + int num_input_params; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (user_buf_size >= sizeof(buf)) { + HWFNC_ERR("wrong size:%d size:%d\n", user_buf_size, sizeof(buf)); + return -EFAULT; + } + + if (copy_from_user(buf, user_buf, user_buf_size)) + return -EFAULT; + + buf[user_buf_size] = 0; /* end of string */ + + /* read the input params */ + num_input_params = sscanf(buf, "%lu %lu", ¶m_0, ¶m_1); + + if (num_input_params == 2) { /* if debugfs receives two input params */ + drv_data->debugfs_data.context_rd = param_0; + drv_data->debugfs_data.seqno_rd = param_1; + drv_data->debugfs_data.entry_rd = true; + } else if (num_input_params == 1) { /* if debugfs receives one param */ + drv_data->debugfs_data.context_rd = 0; + drv_data->debugfs_data.seqno_rd = 0; + drv_data->debugfs_data.entry_rd = false; + } else { + HWFNC_ERR("invalid num params:%d\n", num_input_params); + return -EFAULT; + } + + return user_buf_size; +} + +static void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) +{ + struct hw_dma_fence *dma_fence; + int idx; + + for (idx = i; idx >= 0 ; idx--) { + kfree(fences_lock[idx]); + + dma_fence = to_hw_dma_fence(fences[idx]); + kfree(dma_fence); + } + + kfree(fences_lock); + kfree(fences); +} + +/** + * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence. + * @file: file handler. + * @user_buf: user buffer content from debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs will: create, signal, register-for-signal and destroy a join hw-fence. + * Note that this simulation relies in the user first registering the clients as debug-clients + * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously + * registered as debug-clients, this simulation will fail and won't run. + */ +static ssize_t hw_fence_dbg_create_join_fence(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dma_fence_array *fence_array; + struct hw_fence_driver_data *drv_data; + struct dma_fence *fence_array_fence; + struct client_data *client_info_src, *client_info_dst; + u64 hw_fence_dbg_seqno = 1; + int client_id_src, client_id_dst; + struct msm_hw_fence_create_params params; + int i, ret = 0; + u64 hash; + struct msm_hw_fence_client *hw_fence_client; + int tx_client, rx_client, signal_id; + + /* creates 3 fences and a parent fence */ + int num_fences = 3; + struct dma_fence **fences = NULL; + spinlock_t **fences_lock = NULL; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + client_id_src = HW_FENCE_CLIENT_ID_CTL0; + client_id_dst = HW_FENCE_CLIENT_ID_CTL1; + client_info_src = _get_client_node(drv_data, client_id_src); + client_info_dst = _get_client_node(drv_data, client_id_dst); + if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) || + !client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) { + HWFNC_ERR("client_src:%d or client:%d is not register as debug client\n", + client_id_src, client_id_dst); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle; + + fences_lock = kcalloc(num_fences, sizeof(*fences_lock), GFP_KERNEL); + if (!fences_lock) + return -ENOMEM; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + kfree(fences_lock); + return -ENOMEM; + } + + /* Create the array of dma fences */ + for (i = 0; i < num_fences; i++) { + struct hw_dma_fence *dma_fence; + + fences_lock[i] = kzalloc(sizeof(*fences_lock), GFP_KERNEL); + if (!fences_lock[i]) { + _cleanup_fences(i, fences, fences_lock); + return -ENOMEM; + } + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + _cleanup_fences(i, fences, fences_lock); + return -ENOMEM; + } + fences[i] = &dma_fence->base; + + spin_lock_init(fences_lock[i]); + dma_fence_init(fences[i], &hw_fence_dbg_ops, fences_lock[i], + client_info_src->dma_context, hw_fence_dbg_seqno + i); + } + + /* create the fence array from array of dma fences */ + fence_array = dma_fence_array_create(num_fences, fences, + client_info_src->dma_context, hw_fence_dbg_seqno + num_fences, 0); + if (!fence_array) { + HWFNC_ERR("Error creating fence_array\n"); + _cleanup_fences(num_fences - 1, fences, fences_lock); + return -EINVAL; + } + + /* create hw fence and write to tx queue for each dma fence */ + for (i = 0; i < num_fences; i++) { + params.fence = fences[i]; + params.handle = &hash; + + ret = msm_hw_fence_create(client_info_src->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + count = -EINVAL; + goto error; + } + + /* Write to Tx queue */ + hw_fence_update_queue(drv_data, hw_fence_client, client_info_src->dma_context, + hw_fence_dbg_seqno + i, hash, 0, 0, + HW_FENCE_TX_QUEUE - 1); + } + + /* wait on the fence array */ + fence_array_fence = &fence_array->base; + msm_hw_fence_wait_update(client_info_dst->client_handle, &fence_array_fence, 1, 1); + + signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; + if (signal_id < 0) { + count = -EINVAL; + goto error; + } + + /* write to ipcc to trigger the irq */ + tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); + + usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, + (drv_data->debugfs_data.hw_fence_sim_release_delay + 5)); + +error: + /* this frees the memory for the fence-array and each dma-fence */ + dma_fence_put(&fence_array->base); + + /* + * free array of pointers, no need to call kfree in 'fences', since that is released + * from the fence-array release api + */ + kfree(fences_lock); + + return count; +} + +static const struct file_operations hw_fence_reset_client_fops = { + .open = simple_open, + .write = hw_fence_dbg_reset_client_wr, +}; + +static const struct file_operations hw_fence_register_clients_fops = { + .open = simple_open, + .write = hw_fence_dbg_register_clients_wr, +}; + +static const struct file_operations hw_fence_tx_and_signal_clients_fops = { + .open = simple_open, + .write = hw_fence_dbg_tx_and_signal_clients_wr, +}; + +static const struct file_operations hw_fence_create_fops = { + .open = simple_open, + .write = hw_fence_dbg_create_wr, +}; + +static const struct file_operations hw_fence_dump_table_fops = { + .open = simple_open, + .write = hw_fence_dbg_dump_table_wr, + .read = hw_fence_dbg_dump_table_rd, +}; + +static const struct file_operations hw_fence_create_join_fence_fops = { + .open = simple_open, + .write = hw_fence_dbg_create_join_fence, +}; + +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) +{ + struct dentry *debugfs_root; + + debugfs_root = debugfs_create_dir("hw_fence", NULL); + if (IS_ERR_OR_NULL(debugfs_root)) { + HWFNC_ERR("debugfs_root create_dir fail, error %ld\n", + PTR_ERR(debugfs_root)); + drv_data->debugfs_data.root = NULL; + return -EINVAL; + } + + mutex_init(&drv_data->debugfs_data.clients_list_lock); + INIT_LIST_HEAD(&drv_data->debugfs_data.clients_list); + drv_data->debugfs_data.root = debugfs_root; + drv_data->debugfs_data.create_hw_fences = true; + drv_data->debugfs_data.hw_fence_sim_release_delay = 8333; /* uS */ + + debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data, + &hw_fence_dbg_ipcc_fops); +#ifdef HW_DPU_IPCC + debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data, + &hw_fence_dbg_ipcc_dpu_fops); +#endif /* HW_DPU_IPCC */ + debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data, + &hw_fence_reset_client_fops); + debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data, + &hw_fence_register_clients_fops); + debugfs_create_file("hw_fence_tx_and_signal", 0600, debugfs_root, drv_data, + &hw_fence_tx_and_signal_clients_fops); + debugfs_create_file("hw_fence_create_join_fence", 0600, debugfs_root, drv_data, + &hw_fence_create_join_fence_fops); + debugfs_create_bool("create_hw_fences", 0600, debugfs_root, + &drv_data->debugfs_data.create_hw_fences); + debugfs_create_u32("sleep_range_us", 0600, debugfs_root, + &drv_data->debugfs_data.hw_fence_sim_release_delay); + debugfs_create_file("hw_fence_create", 0600, debugfs_root, drv_data, + &hw_fence_create_fops); + debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); + debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, + &hw_fence_dump_table_fops); + + return 0; +} + +#else +int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) +{ + return 0; +} +#endif /* CONFIG_DEBUG_FS */ diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c new file mode 100644 index 0000000000..7879d4f788 --- /dev/null +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +/** + * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. + * @ipc_client_id: ipc client id for the hw-fence client. + * @ipc_signal_id: ipc signal id for the hw-fence client. + * @update_rxq: bool to indicate if clinet uses rx-queue. + */ +struct hw_fence_client_ipc_map { + int ipc_client_id; + int ipc_signal_id; + bool update_rxq; +}; + +/** + * struct hw_fence_clients_ipc_map_no_dpu - Table makes the 'client to signal' mapping, which + * is used by the hw fence driver to trigger ipc signal when the hw fence is already + * signaled. + * This no_dpu version is for targets that do not support dpu client id + * + * Notes: + * The index of this struct must match the enum hw_fence_client_id. + * To change to a loopback signal instead of GMU, change ctx0 row to use: + * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */ +}; + +/** + * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for targets that support dpu client id. + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */ +}; + +int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_client_id; +} + +int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_signal_id; +} + +bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].update_rxq; +} + +/** + * _get_ipc_client_name() - Returns ipc client name, used for debugging. + */ +static inline char *_get_ipc_client_name(u32 client_id) +{ + switch (client_id) { + case HW_FENCE_IPC_CLIENT_ID_APPS: + return "APPS"; + case HW_FENCE_IPC_CLIENT_ID_GPU: + return "GPU"; + case HW_FENCE_IPC_CLIENT_ID_DPU: + return "DPU"; + } + + return "UNKNOWN"; +} + +void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, + u32 tx_client_id, u32 rx_client_id, u32 signal_id) +{ + void __iomem *ptr; + u32 val; + + /* Send signal */ + ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id, + tx_client_id); + val = (rx_client_id << 16) | signal_id; + + HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n", + _get_ipc_client_name(tx_client_id), tx_client_id, + _get_ipc_client_name(rx_client_id), rx_client_id, + signal_id, val, ptr); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + /* Make sure value is written */ + wmb(); +} + +/** + * _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data, + * according to the ipcc hw revision. + * @drv_data: driver data. + * @hwrev: ipcc hw revision. + */ +static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev) +{ + switch (hwrev) { + case HW_FENCE_IPCC_HW_REV_100: + drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; + HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n"); + break; + case HW_FENCE_IPCC_HW_REV_110: + drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; + HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n"); + break; + case HW_FENCE_IPCC_HW_REV_170: + drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA; + drv_data->ipc_clients_table = hw_fence_clients_ipc_map; + HWFNC_DBG_INIT("ipcc protocol_id: Kailua\n"); + break; + default: + return -1; + } + + return 0; +} + +int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) +{ + void __iomem *ptr; + u32 val; + + HWFNC_DBG_H("enable ipc +\n"); + + /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ + val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, + HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, HW_FENCE_IPC_CLIENT_ID_APPS)); + HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + + if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { + HWFNC_ERR("ipcc protocol id not supported\n"); + return -EINVAL; + } + + /* Enable compute l1 (protocol_id = 2) */ + val = 0x00000000; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, + HW_FENCE_IPC_CLIENT_ID_APPS); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + /* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */ + val = 0x000080000; + ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, + HW_FENCE_IPC_CLIENT_ID_APPS); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); + writel_relaxed(val, ptr); + + HWFNC_DBG_H("enable ipc -\n"); + + return 0; +} + +#ifdef HW_DPU_IPCC +int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) +{ + struct hw_fence_client_ipc_map *hw_fence_client; + void __iomem *ptr; + u32 val; + int i; + + HWFNC_DBG_H("enable dpu ipc +\n"); + + if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table) { + HWFNC_ERR("invalid drv data\n"); + return -1; + } + + HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem); + + /* + * Enable compute l1 (protocol_id = 2) for dpu (25) + * Sets bit(1) to clear when RECV_ID is read + */ + val = 0x00000001; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, + HW_FENCE_IPC_CLIENT_ID_DPU); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + + HWFNC_DBG_H("Initialize dpu signals\n"); + /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ + for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) { + hw_fence_client = &drv_data->ipc_clients_table[i]; + + /* skip any client that is not a dpu client */ + if (hw_fence_client->ipc_client_id != HW_FENCE_IPC_CLIENT_ID_DPU) + continue; + + /* Enable signals for dpu client */ + HWFNC_DBG_H("dpu:%d client:%d signal:%d\n", hw_fence_client->ipc_client_id, i, + hw_fence_client->ipc_signal_id); + val = 0x000080000 | (hw_fence_client->ipc_signal_id & 0xFFFF); + ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, + drv_data->protocol_id, HW_FENCE_IPC_CLIENT_ID_DPU); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + } + + HWFNC_DBG_H("enable dpu ipc -\n"); + + return 0; +} +#endif /* HW_DPU_IPCC */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c new file mode 100644 index 0000000000..edecc41cbc --- /dev/null +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -0,0 +1,1317 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +/* Global atomic lock */ +#define GLOBAL_ATOMIC_STORE(lock, val) global_atomic_store(lock, val) + +inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) +{ + return readl_relaxed(drv_data->qtime_io_mem); +} + +static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve mem_reserve_id, + struct msm_hw_fence_mem_addr *mem_descriptor, + struct msm_hw_fence_queue *queues, int queues_num, + int client_id) +{ + struct msm_hw_fence_hfi_queue_table_header *hfi_table_header; + struct msm_hw_fence_hfi_queue_header *hfi_queue_header; + void *ptr, *qptr; + phys_addr_t phys, qphys; + u32 size, start_queue_offset; + int headers_size, queue_size; + int i, ret = 0; + + HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); + switch (mem_reserve_id) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE; + queue_size = drv_data->hw_fence_ctrl_queue_size; + break; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE; + queue_size = drv_data->hw_fence_client_queue_size; + break; + default: + HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); + return -EINVAL; + } + + /* Reserve Virtual and Physical memory for HFI headers */ + ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id); + if (ret) { + HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; /* bytes */ + mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */ + + HWFNC_DBG_INIT("Initialize headers\n"); + /* Initialize headers info within hfi memory */ + hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr; + hfi_table_header->version = 0; + hfi_table_header->size = size; /* bytes */ + /* Offset, from the Base Address, where the first queue header starts */ + hfi_table_header->qhdr0_offset = + sizeof(struct msm_hw_fence_hfi_queue_table_header); + hfi_table_header->qhdr_size = + sizeof(struct msm_hw_fence_hfi_queue_header); + hfi_table_header->num_q = queues_num; /* number of queues */ + hfi_table_header->num_active_q = queues_num; + + /* Initialize Queues Info within HFI memory */ + + /* + * Calculate offset where hfi queue header starts, which it is at the + * end of the hfi table header + */ + HWFNC_DBG_INIT("Initialize queues\n"); + hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) + ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE); + for (i = 0; i < queues_num; i++) { + HWFNC_DBG_INIT("init queue[%d]\n", i); + + /* Calculate the offset where the Queue starts */ + start_queue_offset = headers_size + (i * queue_size); /* Bytes */ + qphys = phys + start_queue_offset; /* start of the PA for the queue elems */ + qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */ + + /* Set the physical start address in the HFI queue header */ + hfi_queue_header->start_addr = qphys; + + /* Set the queue type (i.e. RX or TX queue) */ + hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE; + + /* Set the size of this header */ + hfi_queue_header->queue_size = queue_size; + + /* Store Memory info in the Client data */ + queues[i].va_queue = qptr; + queues[i].pa_queue = qphys; + queues[i].va_header = hfi_queue_header; + queues[i].q_size_bytes = queue_size; + HWFNC_DBG_INIT("init:%s client:%d queue[%d]: va=0x%pK pa=0x%x va_hd:0x%pK sz:%d\n", + hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE", + client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, + queues[i].q_size_bytes); + + /* Next header */ + hfi_queue_header++; + } + + return ret; +} + +static inline _lock_client_queue(int queue_type) +{ + /* Only lock Rx Queue */ + return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false; +} + +char *_get_queue_type(int queue_type) +{ + return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ"; +} + +int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_queue_payload *payload, int queue_type) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + u32 read_idx; + u32 write_idx; + u32 to_read_idx; + u32 *read_ptr; + u32 payload_size_u32; + u32 q_size_u32; + struct msm_hw_fence_queue_payload *read_ptr_payload; + + if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) { + HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type, + hw_fence_client, payload); + return -EINVAL; + } + + queue = &hw_fence_client->queues[queue_type]; + hfi_header = queue->va_header; + + q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload)); + + if (!hfi_header || !payload) { + HWFNC_ERR("Invalid queue\n"); + return -EINVAL; + } + + /* Get read and write index */ + read_idx = readl_relaxed(&hfi_header->read_index); + write_idx = readl_relaxed(&hfi_header->write_index); + + /* Make sure we read the values */ + rmb(); + + HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", + hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, + read_idx, write_idx, queue); + + if (read_idx == write_idx) { + HWFNC_DBG_Q("Nothing to read!\n"); + return 0; + } + + /* Move the pointer where we need to read and cast it */ + read_ptr = ((u32 *)queue->va_queue + read_idx); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr, + queue->va_queue, queue->pa_queue, read_ptr_payload); + + /* Calculate the index after the read */ + to_read_idx = read_idx + payload_size_u32; + + /* + * wrap-around case, here we are reading the last element of the queue, therefore set + * to_read_idx, which is the index after the read, to the beginning of the + * queue + */ + if (to_read_idx >= q_size_u32) + to_read_idx = 0; + + /* Read the Client Queue */ + payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); + payload->seqno = readq_relaxed(&read_ptr_payload->seqno); + payload->hash = readq_relaxed(&read_ptr_payload->hash); + payload->flags = readq_relaxed(&read_ptr_payload->flags); + payload->error = readl_relaxed(&read_ptr_payload->error); + + /* update the read index */ + writel_relaxed(to_read_idx, &hfi_header->read_index); + + /* update memory for the index */ + wmb(); + + /* Return one if queue still has contents after read */ + return to_read_idx == write_idx ? 0 : 1; +} + +/* + * This function writes to the queue of the client. The 'queue_type' determines + * if this function is writing to the rx or tx queue + */ +int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, + u64 flags, u32 error, int queue_type) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + u32 read_idx; + u32 write_idx; + u32 to_write_idx; + u32 q_size_u32; + u32 q_free_u32; + u32 *q_payload_write_ptr; + u32 payload_size_u32; + struct msm_hw_fence_queue_payload *write_ptr_payload; + bool lock_client = false; + u32 lock_idx; + int ret = 0; + + if (queue_type >= HW_FENCE_CLIENT_QUEUES) { + HWFNC_ERR("Invalid queue type:%s\n", queue_type); + return -EINVAL; + } + + queue = &hw_fence_client->queues[queue_type]; + hfi_header = queue->va_header; + + q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + + if (!hfi_header) { + HWFNC_ERR("Invalid queue\n"); + return -EINVAL; + } + + /* + * We need to lock the client if there is an Rx Queue update, since that + * is the only time when HW Fence driver can have a race condition updating + * the Rx Queue, which also could be getting updated by the Fence CTL + */ + lock_client = _lock_client_queue(queue_type); + if (lock_client) { + lock_idx = hw_fence_client->client_id - 1; + + if (lock_idx >= drv_data->client_lock_tbl_cnt) { + HWFNC_ERR("lock for client id:%d exceed max:%d\n", + hw_fence_client->client_id, drv_data->client_lock_tbl_cnt); + return -EINVAL; + } + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); + + /* lock the client rx queue to update */ + GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 1); /* lock */ + } + + /* Get read and write index */ + read_idx = readl_relaxed(&hfi_header->read_index); + write_idx = readl_relaxed(&hfi_header->write_index); + + /* Make sure we read the values */ + rmb(); + + HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n", + hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, + read_idx, write_idx, queue, queue_type); + + /* Check queue to make sure message will fit */ + q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : + (read_idx - write_idx); + if (q_free_u32 <= payload_size_u32) { + HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32); + ret = -EINVAL; + goto exit; + } + HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32); + + /* Move the pointer where we need to write and cast it */ + q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx); + write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr; + HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n", + q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload); + + /* calculate the index after the write */ + to_write_idx = write_idx + payload_size_u32; + + HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size\n", to_write_idx, write_idx, + payload_size_u32); + HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n", + hw_fence_client->client_id, _get_queue_type(queue_type), + hash, ctxt_id, seqno, flags, error); + + /* + * wrap-around case, here we are writing to the last element of the queue, therefore + * set to_write_idx, which is the index after the write, to the beginning of the + * queue + */ + if (to_write_idx >= q_size_u32) + to_write_idx = 0; + + /* Update Client Queue */ + writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); + writeq_relaxed(seqno, &write_ptr_payload->seqno); + writeq_relaxed(hash, &write_ptr_payload->hash); + writeq_relaxed(flags, &write_ptr_payload->flags); + writel_relaxed(error, &write_ptr_payload->error); + + /* update memory for the message */ + wmb(); + + /* update the write index */ + writel_relaxed(to_write_idx, &hfi_header->write_index); + + /* update memory for the index */ + wmb(); + +exit: + if (lock_client) + GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ + + return ret; +} + +static int init_global_locks(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor = &drv_data->clients_locks_mem_desc; + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; + mem_descriptor->mem_data = NULL; /* not storing special info for now */ + + /* Initialize internal pointers for managing the tables */ + drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr; + drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64); + + return 0; +} + +static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_ERR("Failed to reserve table mem %d\n", ret); + return -ENOMEM; + } + HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size); + + /* Populate Memory descriptor with address */ + mem_descriptor = &drv_data->hw_fences_mem_desc; + mem_descriptor->virtual_addr = ptr; + mem_descriptor->device_addr = phys; + mem_descriptor->size = size; + mem_descriptor->mem_data = NULL; /* not storing special info for now */ + + /* Initialize internal pointers for managing the tables */ + drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr; + drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size / + sizeof(struct msm_hw_fence); + + HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl, + drv_data->hw_fences_tbl_cnt); + + return 0; +} + +static int init_ctrl_queue(struct hw_fence_driver_data *drv_data) +{ + struct msm_hw_fence_mem_addr *mem_descriptor; + int ret; + + mem_descriptor = &drv_data->ctrl_queue_mem_desc; + + /* Init ctrl queue */ + ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE, + mem_descriptor, drv_data->ctrl_queues, + HW_FENCE_CTRL_QUEUES, 0); + if (ret) + HWFNC_ERR("Failure to init ctrl queue\n"); + + return ret; +} + +int hw_fence_init(struct hw_fence_driver_data *drv_data) +{ + int ret; + __le32 *mem; + + ret = hw_fence_utils_parse_dt_props(drv_data); + if (ret) { + HWFNC_ERR("failed to set dt properties\n"); + goto exit; + } + + /* Allocate hw fence driver mem pool and share it with HYP */ + ret = hw_fence_utils_alloc_mem(drv_data); + if (ret) { + HWFNC_ERR("failed to alloc base memory\n"); + goto exit; + } + + /* Initialize ctrl queue */ + ret = init_ctrl_queue(drv_data); + if (ret) + goto exit; + + ret = init_global_locks(drv_data); + if (ret) + goto exit; + HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl, + drv_data->client_lock_tbl_cnt); + + /* Initialize hw fences table */ + ret = init_hw_fences_table(drv_data); + if (ret) + goto exit; + + /* Map ipcc registers */ + ret = hw_fence_utils_map_ipcc(drv_data); + if (ret) { + HWFNC_ERR("ipcc regs mapping failed\n"); + goto exit; + } + + /* Map time register */ + ret = hw_fence_utils_map_qtime(drv_data); + if (ret) { + HWFNC_ERR("qtime reg mapping failed\n"); + goto exit; + } + + /* Map ctl_start registers */ + ret = hw_fence_utils_map_ctl_start(drv_data); + if (ret) { + /* This is not fatal error, since platfoms with dpu-ipc + * won't use this option + */ + HWFNC_WARN("no ctl_start regs, won't trigger the frame\n"); + } + + /* Init debugfs */ + ret = hw_fence_debug_debugfs_register(drv_data); + if (ret) { + HWFNC_ERR("debugfs init failed\n"); + goto exit; + } + + /* Init vIRQ from VM */ + ret = hw_fence_utils_init_virq(drv_data); + if (ret) { + HWFNC_ERR("failed to init virq\n"); + goto exit; + } + + mem = drv_data->io_mem_base; + HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem); + + HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n", + drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt); + +exit: + return ret; +} + +int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + int ret; + + /* Init client queues */ + ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, + &hw_fence_client->mem_descriptor, hw_fence_client->queues, + HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id); + if (ret) { + HWFNC_ERR("Failure to init the queue for client:%d\n", + hw_fence_client->client_id); + goto exit; + } + + /* Init client memory descriptor */ + memcpy(mem_descriptor, &hw_fence_client->mem_descriptor, + sizeof(struct msm_hw_fence_mem_addr)); + +exit: + return ret; +} + +int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + int ret = 0; + + /* + * Initialize IPCC Signals for this client + * + * NOTE: Fore each Client HW-Core, the client drivers might be the ones making + * it's own initialization (in case that any hw-sequence must be enforced), + * however, if that is not the case, any per-client ipcc init to enable the + * signaling, can go here. + */ + switch (hw_fence_client->client_id) { + case HW_FENCE_CLIENT_ID_CTX0: + /* nothing to initialize for gpu client */ + break; + case HW_FENCE_CLIENT_ID_CTL0: + case HW_FENCE_CLIENT_ID_CTL1: + case HW_FENCE_CLIENT_ID_CTL2: + case HW_FENCE_CLIENT_ID_CTL3: + case HW_FENCE_CLIENT_ID_CTL4: + case HW_FENCE_CLIENT_ID_CTL5: +#ifdef HW_DPU_IPCC + /* initialize ipcc signals for dpu clients */ + HWFNC_DBG_H("init_controller_signal: DPU client:%d initialized:%d\n", + hw_fence_client->client_id, drv_data->ipcc_dpu_initialized); + if (!drv_data->ipcc_dpu_initialized) { + drv_data->ipcc_dpu_initialized = true; + + /* Init dpu client ipcc signal */ + hw_fence_ipcc_enable_dpu_signaling(drv_data); + } +#endif /* HW_DPU_IPCC */ + break; + default: + HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id); + ret = -EINVAL; + break; + } + + return ret; +} + +int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client) +{ + + /* + * Initialize Fence Controller resources for this Client, + * here we need to use the CTRL queue to communicate to the Fence + * Controller the shared memory for the Rx/Tx queue for this client + * as well as any information that Fence Controller might need to + * know for this client. + * + * NOTE: For now, we are doing a static allocation of the + * client's queues, so currently we don't need any notification + * to the Fence CTL here through the CTRL queue. + * Later-on we might need it, once the PVM to SVM (and vice versa) + * communication for initialization is supported. + */ + + return 0; +} + +void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + /* + * Deallocate any resource allocated for this client. + * If fence controller was notified about existence of this client, + * we will need to notify fence controller that this client is gone + * + * NOTE: Since currently we are doing a 'fixed' memory for the clients queues, + * we don't need any notification to the Fence Controller, yet.. + * however, if the memory allocation is removed from 'fixed' to a dynamic + * allocation, then we will need to notify FenceCTL about the client that is + * going-away here. + */ + mutex_lock(&drv_data->clients_mask_lock); + drv_data->client_id_mask &= ~BIT(hw_fence_client->client_id); + drv_data->clients[hw_fence_client->client_id] = NULL; + mutex_unlock(&drv_data->clients_mask_lock); + + /* Deallocate client's object */ + HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id); + kfree(hw_fence_client); +} + +static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno, + u64 step, u64 *hash) +{ + u64 m_size = table_total_entries; + int val = 0; + + if (step == 0) { + u64 a_multiplier = HW_FENCE_HASH_A_MULT; + u64 c_multiplier = HW_FENCE_HASH_C_MULT; + u64 b_multiplier = context + (context - 1); /* odd multiplier */ + + /* + * if m, is power of 2, we can optimize with right shift, + * for now we don't do it, to avoid assuming a power of two + */ + *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size; + } else { + if (step >= m_size) { + /* + * If we already traversed the whole table, return failure since this means + * there are not available spots, table is either full or full-enough + * that we couldn't find an available spot after traverse the whole table. + * Ideally table shouldn't be so full that we cannot find a value after some + * iterations, so this maximum step size could be optimized to fail earlier. + */ + HWFNC_ERR("Fence Table tranversed and no available space!\n"); + val = -EINVAL; + } else { + /* + * Linearly increment the hash value to find next element in the table + * note that this relies in the 'scrambled' data from the original hash + * Also, add a mod division to wrap-around in case that we reached the + * end of the table + */ + *hash = (*hash + 1) % m_size; + } + } + + return val; +} + +static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries, + struct msm_hw_fence *hw_fences_tbl, + u64 hash) +{ + if (hash >= table_total_entries) { + HWFNC_ERR("hash:%llu out of max range:%llu\n", + hash, table_total_entries); + return NULL; + } + + return &hw_fences_tbl[hash]; +} + +static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) +{ + /* If valid is set, the hw fence is not free */ + return hw_fence->valid ? false : true; +} + +static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno) +{ + return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false); +} + +/* clears everything but the 'valid' field */ +static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) +{ + int i; + + hw_fence->error = 0; + wmb(); /* update memory to avoid mem-abort */ + hw_fence->ctx_id = 0; + hw_fence->seq_id = 0; + hw_fence->wait_client_mask = 0; + hw_fence->fence_allocator = 0; + hw_fence->fence_signal_client = 0; + + hw_fence->flags = 0; + + hw_fence->fence_create_time = 0; + hw_fence->fence_trigger_time = 0; + hw_fence->fence_wait_time = 0; + hw_fence->debug_refcount = 0; + hw_fence->parents_cnt = 0; + hw_fence->pending_child_cnt = 0; + + for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++) + hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE; +} + +/* This function must be called with the hw fence lock */ +static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, + u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +{ + _cleanup_hw_fence(hw_fence); + + /* reserve this HW fence */ + hw_fence->valid = 1; + + hw_fence->ctx_id = context; + hw_fence->seq_id = seqno; + hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */ + hw_fence->fence_allocator = client_id; + hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); + hw_fence->debug_refcount++; + + HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +/* This function must be called with the hw fence lock */ +static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, + u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +{ + _cleanup_hw_fence(hw_fence); + + /* unreserve this HW fence */ + hw_fence->valid = 0; + + HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +/* This function must be called with the hw fence lock */ +static void _reserve_join_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, u64 context, + u64 seqno, u32 hash, u32 pending_child_cnt) +{ + _cleanup_hw_fence(hw_fence); + + /* reserve this HW fence */ + hw_fence->valid = true; + + hw_fence->ctx_id = context; + hw_fence->seq_id = seqno; + hw_fence->fence_allocator = client_id; + hw_fence->fence_create_time = hw_fence_get_qtime(drv_data); + hw_fence->debug_refcount++; + + hw_fence->pending_child_cnt = pending_child_cnt; + + HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +/* This function must be called with the hw fence lock */ +static void _fence_found(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u32 client_id, + u64 context, u64 seqno, u32 hash, u32 pending_child_cnt) +{ + /* + * Do nothing, when this find fence fn is invoked, all processing is done outside. + * Currently just keeping this function for debugging purposes, can be removed + * in final versions + */ + HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n", + client_id, context, seqno, hash); +} + +char *_get_op_mode(enum hw_fence_lookup_ops op_code) +{ + switch (op_code) { + case HW_FENCE_LOOKUP_OP_CREATE: + return "CREATE"; + case HW_FENCE_LOOKUP_OP_DESTROY: + return "DESTROY"; + case HW_FENCE_LOOKUP_OP_CREATE_JOIN: + return "CREATE_JOIN"; + case HW_FENCE_LOOKUP_OP_FIND_FENCE: + return "FIND_FENCE"; + default: + return "UNKNOWN"; + } + + return "UNKNOWN"; +} + +struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id, + u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash) +{ + bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno); + void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence, + u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending); + struct msm_hw_fence *hw_fence = NULL; + u64 step = 0; + int ret = 0; + bool hw_fence_found = false; + + if (!hash | !drv_data | !hw_fences_tbl) { + HWFNC_ERR("Invalid input for hw_fence_lookup\n"); + return NULL; + } + + *hash = ~0; + + HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code); + + switch (op_code) { + case HW_FENCE_LOOKUP_OP_CREATE: + compare_fnc = &_is_hw_fence_free; + process_fnc = &_reserve_hw_fence; + break; + case HW_FENCE_LOOKUP_OP_DESTROY: + compare_fnc = &_hw_fence_match; + process_fnc = &_unreserve_hw_fence; + break; + case HW_FENCE_LOOKUP_OP_CREATE_JOIN: + compare_fnc = &_is_hw_fence_free; + process_fnc = &_reserve_join_fence; + break; + case HW_FENCE_LOOKUP_OP_FIND_FENCE: + compare_fnc = &_hw_fence_match; + process_fnc = &_fence_found; + break; + default: + HWFNC_ERR("Unknown op code:%d\n", op_code); + return NULL; + } + + while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) { + + /* Calculate the Hash for the Fence */ + ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash); + if (ret) { + HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n", + context, seqno, *hash); + break; + } + HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context, + seqno); + + /* Get element from the table using the hash */ + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash); + HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n", + hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad); + if (!hw_fence) { + HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n", + context, seqno, *hash); + break; + } + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); + + /* compare to either find a free fence or find an allocated fence */ + if (compare_fnc(hw_fence, context, seqno)) { + + /* Process the hw fence found by the algorithm */ + if (process_fnc) { + process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash, + pending_child_cnt); + + /* update memory table with processing */ + wmb(); + } + + HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n", + client_id, _get_op_mode(op_code), context, seqno, *hash, step); + + hw_fence_found = true; + } else { + if ((op_code == HW_FENCE_LOOKUP_OP_CREATE || + op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) && + seqno == hw_fence->seq_id && context == hw_fence->ctx_id) { + /* ctx & seqno must be unique creating a hw-fence */ + HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n", + context, seqno); + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + break; + } + /* compare can fail if we have a collision, we will linearly resolve it */ + HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash, + context, seqno); + } + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + + /* Increment step for the next loop */ + step++; + } + + /* If we iterated through the whole list and didn't find the fence, return null */ + if (!hw_fence_found) { + HWFNC_ERR("fail to create hw-fence step:%llu\n", step); + hw_fence = NULL; + } + + HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n", + op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1); + + return hw_fence; +} + +int hw_fence_create(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash) +{ + u32 client_id = hw_fence_client->client_id; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + + int ret = 0; + + /* allocate hw fence in table */ + if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) { + HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + ret = -EINVAL; + } + + return ret; +} + +static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) { + u64 hash; + + if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl, + context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash)) + return -EINVAL; + + return 0; +} + +int hw_fence_destroy(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno) +{ + u32 client_id = hw_fence_client->client_id; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + int ret = 0; + + /* remove hw fence from table*/ + if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) { + HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + ret = -EINVAL; + } + + return ret; +} + +static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence_array *array, u64 *hash, bool create) +{ + struct msm_hw_fence *hw_fences_tbl; + struct msm_hw_fence *join_fence = NULL; + u64 context, seqno; + u32 client_id, pending_child_cnt; + + /* + * NOTE: For now we are allocating the join fences from the same table as all + * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however, + * this might impact the lookup algorithm, since the "join-fences" are created with the + * context and seqno of a fence-array, and those might not be changing by the client, + * so this will linearly increment the look-up and very likely impact the other fences if + * these join-fences start to fill-up a particular region of the fences global table. + * So we might have to allocate a different table altogether for these join fences. + * However, to do this, just alloc another table and change it here: + */ + hw_fences_tbl = drv_data->hw_fences_tbl; + + context = array->base.context; + seqno = array->base.seqno; + pending_child_cnt = array->num_fences; + client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID; + + if (create) { + /* allocate the fence */ + join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, + seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash); + if (!join_fence) + HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + } else { + /* destroy the fence */ + if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) + HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + } + + return join_fence; +} + +struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + u64 context, u64 seqno, u64 *hash) +{ + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence; + u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff; + + /* find the hw fence */ + hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context, + seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash); + if (!hw_fence) + HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n", + client_id, context, seqno); + + return hw_fence; +} + +static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u64 flags, u32 error) +{ + u32 tx_client_id = drv_data->ipcc_client_id; + u32 rx_client_id = hw_fence_client->ipc_client_id; + + HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); + + /* Write to Rx queue */ + if (hw_fence_client->update_rxq) + hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1); + + /* Signal the hw fence now */ + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); +} + +static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array, + struct msm_hw_fence *join_fence, u64 hash_join_fence) +{ + struct dma_fence *child_fence; + struct msm_hw_fence *hw_fence_child; + int idx, j; + u64 hash = 0; + + /* cleanup the child-fences from the parent join-fence */ + for (idx = iteration; idx >= 0; idx--) { + child_fence = array->fences[idx]; + + hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, + child_fence->seqno, &hash); + if (!hw_fence_child) { + HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n", + child_fence->context, child_fence->seqno, hash); + + /* + * ideally this should not have happened, but if it did, try to keep + * cleaning-up other fences after printing the error + */ + continue; + } + + /* lock the child while we clean it up from the parent join-fence */ + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + for (j = hw_fence_child->parents_cnt; j > 0; j--) { + + if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n", + hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + + j = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } + + if (hw_fence_child->parent_list[j - 1] == hash_join_fence) { + hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE; + + if (hw_fence_child->parents_cnt) + hw_fence_child->parents_cnt--; + + /* update memory for the table update */ + wmb(); + } + } + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + } + + /* destroy join fence */ + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + false); +} + +int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array) +{ + struct msm_hw_fence *join_fence; + struct msm_hw_fence *hw_fence_child; + struct dma_fence *child_fence; + u32 signaled_fences = 0; + u64 hash_join_fence, hash; + int i, ret = 0; + + /* + * Create join fence from the join-fences table, + * This function initializes: + * join_fence->pending_child_count = array->num_fences + */ + join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array, + &hash_join_fence, true); + if (!join_fence) { + HWFNC_ERR("cannot alloc hw fence for join fence array\n"); + return -EINVAL; + } + + /* update this as waiting client of the join-fence */ + GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + join_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + + /* Iterate through fences of the array */ + for (i = 0; i < array->num_fences; i++) { + child_fence = array->fences[i]; + + /* Nested fence-arrays are not supported */ + if (to_dma_fence_array(child_fence)) { + HWFNC_ERR("This is a nested fence, fail!\n"); + ret = -EINVAL; + goto error_array; + } + + /* All elements in the fence-array must be hw-fences */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) { + HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n"); + ret = -EINVAL; + goto error_array; + } + + /* Find the HW Fence in the Global Table */ + hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, + child_fence->seqno, &hash); + if (!hw_fence_child) { + HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n", + child_fence->context, child_fence->seqno, hash); + ret = -EINVAL; + goto error_array; + } + + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + + /* child fence is already signaled */ + GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + join_fence->pending_child_cnt--; + + /* update memory for the table update */ + wmb(); + + GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + signaled_fences++; + } else { + + /* child fence is not signaled */ + hw_fence_child->parents_cnt++; + + if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS + || hw_fence_child->parents_cnt < 1) { + + /* Max number of parents for a fence is exceeded */ + HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n", + hw_fence_child->parents_cnt); + hw_fence_child->parents_cnt--; + + /* update memory for the table update */ + wmb(); + + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + ret = -EINVAL; + goto error_array; + } + + hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] = + hash_join_fence; + + /* update memory for the table update */ + wmb(); + } + GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + } + + /* all fences were signaled, signal client now */ + if (signaled_fences == array->num_fences) { + + /* signal the join hw fence */ + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); + + /* + * job of the join-fence is finished since we already signaled, + * we can delete it now. This can happen when all the fences that + * are part of the join-fence are already signaled. + */ + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + false); + } + + return ret; + +error_array: + _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence, + hash_join_fence); + + return -EINVAL; +} + +int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno) +{ + struct msm_hw_fence *hw_fence; + u64 hash; + + /* find the hw fence within the table */ + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("Cannot find fence!\n"); + return -EINVAL; + } + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + + /* register client in the hw fence */ + hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); + hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); + hw_fence->debug_refcount++; + + /* update memory for the table update */ + wmb(); + + /* if hw fence already signaled, signal the client */ + if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ + + return 0; +} + +int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, + struct dma_fence *fence) +{ + int ret = 0; + + if (!drv_data | !hw_fence_client | !fence) { + HWFNC_ERR("Invalid Input!\n"); + return -EINVAL; + } + /* fence must be hw-fence */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags); + return -EINVAL; + } + + ret = hw_fence_register_wait_client(drv_data, hw_fence_client, fence->context, + fence->seqno); + if (ret) + HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); + + return ret; +} + +int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, + u32 reset_flags) +{ + int ret = 0; + enum hw_fence_client_id wait_client_id; + struct msm_hw_fence_client *hw_fence_wait_client; + int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; + + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) { + HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n", + hw_fence_client->client_id, hw_fence->ctx_id, + hw_fence->seq_id); + hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id); + + /* update memory for the table update */ + wmb(); + } + GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ + + if (hw_fence->fence_allocator == hw_fence_client->client_id) { + + /* signal with an error all the waiting clients for this fence */ + for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { + if (hw_fence->wait_client_mask & BIT(wait_client_id)) { + hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (hw_fence_wait_client) + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, error); + } + } + + if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) + goto skip_destroy; + + ret = hw_fence_destroy(drv_data, hw_fence_client, + hw_fence->ctx_id, hw_fence->seq_id); + if (ret) { + HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n", + hw_fence->ctx_id, hw_fence->seq_id); + } + } + +skip_destroy: + return ret; +} diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c new file mode 100644 index 0000000000..226df1e0cb --- /dev/null +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +static void _lock(uint64_t *wait) +{ + /* WFE Wait */ +#if defined(__aarch64__) + __asm__("SEVL\n\t" + "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" + "1:\n\t" + "WFE\n\t" + "LDAXR W5, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n\t" + "STXR W5, W0, [%x[i_lock]]\n\t" + "CBNZ W5, 1b\n" + : + : [i_lock] "r" (wait) + : "memory"); +#endif +} + +static void _unlock(uint64_t *lock) +{ + /* Signal Client */ +#if defined(__aarch64__) + __asm__("STLR WZR, [%x[i_out]]\n\t" + "SEV\n" + : + : [i_out] "r" (lock) + : "memory"); +#endif +} + +void global_atomic_store(uint64_t *lock, bool val) +{ + if (val) + _lock(lock); + else + _unlock(lock); +} + +/* + * Each bit in this mask represents each of the loopback clients supported in + * the enum hw_fence_loopback_id + */ +#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f + +static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */ + void *ctl_start_reg; + u32 val; + + if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) { + HWFNC_ERR("invalid ctl_id:%d\n", ctl_id); + return -EINVAL; + } + + ctl_start_reg = drv_data->ctl_start_ptr[ctl_id]; + if (!ctl_start_reg) { + HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id); + return -EINVAL; + } + + HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id); + + val = 0x1; /* ctl_start trigger */ +#ifdef CTL_START_SIM + HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id); + writel_relaxed(val, ctl_start_reg); +#else + HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id, + ctl_start_reg, val); +#endif + + return 0; +} + +static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + struct msm_hw_fence_queue_payload payload; + int read = 1; + + HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id); + while (read) { + /* + * 'client_id' is the loopback-client-id, not the hw-fence client_id, + * so use GFX hw-fence client id, to get the client data + */ + read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload, + queue_type); + if (read < 0) { + HWFNC_ERR("unable to read gfx rxq\n"); + break; + } + HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n", + payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error); + } + + return read; +} + +static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id) +{ + int ret; + + HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id); + switch (client_id) { + case HW_FENCE_LOOPBACK_DPU_CTL_0: + case HW_FENCE_LOOPBACK_DPU_CTL_1: + case HW_FENCE_LOOPBACK_DPU_CTL_2: + case HW_FENCE_LOOPBACK_DPU_CTL_3: + case HW_FENCE_LOOPBACK_DPU_CTL_4: + case HW_FENCE_LOOPBACK_DPU_CTL_5: + ret = _process_dpu_client_loopback(drv_data, client_id); + break; + case HW_FENCE_LOOPBACK_GFX_CTX_0: + ret = _process_gfx_client_loopback(drv_data, client_id); + break; + default: + HWFNC_ERR("unknown client:%d\n", client_id); + ret = -EINVAL; + } + + return ret; +} + +void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) +{ + int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0; + u64 mask; + + for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) { + mask = 1 << client_id; + if (mask & db_flags) { + HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags); + + /* process client */ + if (_process_doorbell_client(drv_data, client_id)) + HWFNC_ERR("Failed to process client:%d\n", client_id); + + /* clear mask for this client and if nothing else pending finish */ + db_flags = db_flags & ~(mask); + HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", + client_id, db_flags, mask, ~(mask)); + if (!db_flags) + break; + } + } +} + +/* doorbell callback */ +static void _hw_fence_cb(int irq, void *data) +{ + struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; + gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK; + int ret; + + if (!drv_data) + return; + + ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0); + if (ret) { + HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret); + return; + } + + HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label, + irq, clear_flags, hw_fence_get_qtime(drv_data)); + + hw_fence_utils_process_doorbell_mask(drv_data, clear_flags); +} + +int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-db"; + int ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label); + drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data); + if (IS_ERR_OR_NULL(drv_data->rx_dbl)) { + ret = PTR_ERR(drv_data->rx_dbl); + HWFNC_ERR("Failed to register doorbell\n"); + return ret; + } + + return 0; +} + +static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, + gh_vmid_t self, gh_vmid_t peer) +{ + u32 src_vmlist[1] = {self}; + int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + int dst_vmlist[2] = {self, peer}; + int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE}; + struct gh_acl_desc *acl; + struct gh_sgl_desc *sgl; + int ret; + + ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), + src_vmlist, 1, dst_vmlist, dst_perms, 2); + if (ret) { + HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n", + __func__, drv_data->res.start, drv_data->size, ret); + return ret; + } + + acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL); + if (!acl) + return -ENOMEM; + sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL); + if (!sgl) { + kfree(acl); + return -ENOMEM; + } + acl->n_acl_entries = 2; + acl->acl_entries[0].vmid = (u16)self; + acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W; + acl->acl_entries[1].vmid = (u16)peer; + acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W; + + sgl->n_sgl_entries = 1; + sgl->sgl_entries[0].ipa_base = drv_data->res.start; + sgl->sgl_entries[0].size = resource_size(&drv_data->res); + + ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, + acl, sgl, NULL, &drv_data->memparcel); + if (ret) { + HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", + __func__, drv_data->res.start, drv_data->size, ret); + /* Attempt to give resource back to HLOS */ + hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), + dst_vmlist, 2, + src_vmlist, src_perms, 1); + ret = -EPROBE_DEFER; + } + + kfree(acl); + kfree(sgl); + + return ret; +} + +static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data) +{ + struct gh_rm_notif_vm_status_payload *vm_status_payload; + struct hw_fence_driver_data *drv_data; + gh_vmid_t peer_vmid; + gh_vmid_t self_vmid; + + drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb); + + HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd); + if (cmd != GH_RM_NOTIF_VM_STATUS) + goto end; + + vm_status_payload = data; + HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status); + if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY && + vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET) + goto end; + + if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid)) + goto end; + + if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) + goto end; + + if (peer_vmid != vm_status_payload->vmid) + goto end; + + switch (vm_status_payload->vm_status) { + case GH_RM_VM_STATUS_READY: + HWFNC_DBG_INIT("init mem\n"); + if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) + HWFNC_ERR("failed to share memory\n"); + break; + case GH_RM_VM_STATUS_RESET: + HWFNC_DBG_INIT("reset\n"); + break; + } + +end: + return NOTIFY_DONE; +} + +/* Allocates carved-out mapped memory */ +int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) +{ + struct device_node *node = drv_data->dev->of_node; + struct device_node *node_compat; + const char *compat = "qcom,msm-hw-fence-mem"; + struct device *dev = drv_data->dev; + struct device_node *np; + int notifier_ret, ret; + + node_compat = of_find_compatible_node(node, NULL, compat); + if (!node_compat) { + HWFNC_ERR("Failed to find dev node with compat:%s\n", compat); + return -EINVAL; + } + + ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label); + if (ret) { + HWFNC_ERR("failed to find label info %d\n", ret); + return ret; + } + + np = of_parse_phandle(node_compat, "shared-buffer", 0); + if (!np) { + HWFNC_ERR("failed to read shared-buffer info\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(np, 0, &drv_data->res); + of_node_put(np); + if (ret) { + HWFNC_ERR("of_address_to_resource failed %d\n", ret); + return -EINVAL; + } + + drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start, + resource_size(&drv_data->res)); + if (!drv_data->io_mem_base) { + HWFNC_ERR("ioremap failed!\n"); + return -ENXIO; + } + drv_data->size = resource_size(&drv_data->res); + + HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n", + drv_data->io_mem_base, drv_data->res.start, + drv_data->res.end, drv_data->size, drv_data->res.name); + + memset_io(drv_data->io_mem_base, 0x0, drv_data->size); + + /* Register memory with HYP */ + ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name); + if (ret) + drv_data->peer_name = GH_SELF_VM; + + drv_data->rm_nb.notifier_call = hw_fence_rm_cb; + drv_data->rm_nb.priority = INT_MAX; + notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb); + HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret, + drv_data->peer_name, notifier_ret); + if (notifier_ret) { + HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret); + return -EPROBE_DEFER; + } + + return 0; +} + +char *_get_mem_reserve_type(enum hw_fence_mem_reserve type) +{ + switch (type) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE"; + case HW_FENCE_MEM_RESERVE_LOCKS_REGION: + return "HW_FENCE_MEM_RESERVE_LOCKS_REGION"; + case HW_FENCE_MEM_RESERVE_TABLE: + return "HW_FENCE_MEM_RESERVE_TABLE"; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE"; + } + + return "Unknown"; +} + +/* Calculates the memory range for each of the elements in the carved-out memory */ +int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, + enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id) +{ + int ret = 0; + u32 start_offset = 0; + + switch (type) { + case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: + start_offset = 0; + *size = drv_data->hw_fence_mem_ctrl_queues_size; + break; + case HW_FENCE_MEM_RESERVE_LOCKS_REGION: + /* Locks region starts at the end of the ctrl queues */ + start_offset = drv_data->hw_fence_mem_ctrl_queues_size; + *size = HW_FENCE_MEM_LOCKS_SIZE; + break; + case HW_FENCE_MEM_RESERVE_TABLE: + /* HW Fence table starts at the end of the Locks region */ + start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE; + *size = drv_data->hw_fence_mem_fences_table_size; + break; + case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("unexpected client_id:%d\n", client_id); + ret = -EINVAL; + goto exit; + } + + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE + + drv_data->hw_fence_mem_fences_table_size) + + ((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size); + *size = drv_data->hw_fence_mem_clients_queues_size; + + break; + default: + HWFNC_ERR("Invalid mem reserve type:%d\n", type); + ret = -EINVAL; + break; + } + + if (start_offset + *size > drv_data->size) { + HWFNC_ERR("reservation request:%lu exceeds total size:%d\n", + start_offset + *size, drv_data->size); + return -ENOMEM; + } + + HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n", + _get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start, + start_offset, *size); + + + *phys = drv_data->res.start + (phys_addr_t)start_offset; + *pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */ + HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa); + +exit: + return ret; +} + +int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) +{ + int ret; + u32 val = 0; + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); + if (ret || !val) { + HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val); + return ret; + } + drv_data->hw_fence_table_entries = val; + + if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) { + HWFNC_ERR("table entries:%lu will overflow table size\n", + drv_data->hw_fence_table_entries); + return -EINVAL; + } + drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) * + drv_data->hw_fence_table_entries); + + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val); + if (ret || !val) { + HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val); + return ret; + } + drv_data->hw_fence_queue_entries = val; + + /* ctrl queues init */ + + if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) { + HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n", + drv_data->hw_fence_queue_entries); + return -EINVAL; + } + drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD * + drv_data->hw_fence_queue_entries; + + if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) / + HW_FENCE_CTRL_QUEUES) { + HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n", + drv_data->hw_fence_ctrl_queue_size); + return -EINVAL; + } + drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE + + (HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size); + + /* clients queues init */ + + if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { + HWFNC_ERR("queue entries:%lu will overflow client queue size\n", + drv_data->hw_fence_queue_entries); + return -EINVAL; + } + drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * + drv_data->hw_fence_queue_entries; + + if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) - + HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) { + HWFNC_ERR("queue size:%lu will overflow client queue mem size\n", + drv_data->hw_fence_client_queue_size); + return -EINVAL; + } + drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE + + (HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size)); + + HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", + drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, + drv_data->hw_fence_queue_entries); + HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b", + drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size, + drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size); + + return 0; +} + +int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data) +{ + int ret; + u32 reg_config[2]; + void __iomem *ptr; + + /* Get ipcc memory range */ + ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg", + reg_config, 2); + if (ret) { + HWFNC_ERR("failed to read ipcc reg: %d\n", ret); + return ret; + } + drv_data->ipcc_reg_base = reg_config[0]; + drv_data->ipcc_size = reg_config[1]; + + /* Mmap ipcc registers */ + ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size); + if (!ptr) { + HWFNC_ERR("failed to ioremap ipcc regs\n"); + return -ENOMEM; + } + drv_data->ipcc_io_mem = ptr; + + HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n", + drv_data->ipcc_reg_base, drv_data->ipcc_size, + drv_data->ipcc_io_mem); + + hw_fence_ipcc_enable_signaling(drv_data); + + return ret; +} + +int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data) +{ + int ret = 0; + unsigned int reg_config[2]; + void __iomem *ptr; + + ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg", + reg_config, 2); + if (ret) { + HWFNC_ERR("failed to read qtimer reg: %d\n", ret); + return ret; + } + + drv_data->qtime_reg_base = reg_config[0]; + drv_data->qtime_size = reg_config[1]; + + ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size); + if (!ptr) { + HWFNC_ERR("failed to ioremap qtime regs\n"); + return -ENOMEM; + } + + drv_data->qtime_io_mem = ptr; + + return ret; +} + +static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id, + void **iomem_ptr, uint32_t *iomem_size) +{ + u32 reg_config[2]; + void __iomem *ptr; + char name[30] = {0}; + int ret; + + snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2); + if (ret) + return 0; /* this is an optional property */ + + /* Mmap registers */ + ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]); + if (!ptr) { + HWFNC_ERR("failed to ioremap %s reg\n", name); + return -ENOMEM; + } + + *iomem_ptr = ptr; + *iomem_size = reg_config[1]; + + HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n", + ctl_id, name, reg_config[0], reg_config[1], ptr); + + return 0; +} + +int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data) +{ + u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0; + + for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) { + if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id], + &drv_data->ctl_start_size[ctl_id])) { + HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id); + } else { + if (drv_data->ctl_start_ptr[ctl_id]) + HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n", + ctl_id, drv_data->ctl_start_ptr[ctl_id], + drv_data->ctl_start_size[ctl_id]); + } + } + + return 0; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c new file mode 100644 index 0000000000..e2e61947c4 --- /dev/null +++ b/hw_fence/src/msm_hw_fence.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_debug.h" +#include "hw_fence_drv_ipc.h" + +struct hw_fence_driver_data *hw_fence_drv_data; + +void *msm_hw_fence_register(enum hw_fence_client_id client_id, + struct msm_hw_fence_mem_addr *mem_descriptor) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + HWFNC_DBG_H("++ client_id:%d\n", client_id); + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return ERR_PTR(-EAGAIN); + } + + if (!mem_descriptor || client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: %d client_id:%d\n", + !mem_descriptor, client_id); + return ERR_PTR(-EINVAL); + } + + /* Avoid race condition if multiple-threads request same client at same time */ + mutex_lock(&hw_fence_drv_data->clients_mask_lock); + if (hw_fence_drv_data->client_id_mask & BIT(client_id)) { + HWFNC_ERR("client with id %d already registered\n", client_id); + mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + return ERR_PTR(-EINVAL); + } + + /* Mark client as registered */ + hw_fence_drv_data->client_id_mask |= BIT(client_id); + mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + + /* Alloc client handle */ + hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); + if (!hw_fence_client) { + mutex_lock(&hw_fence_drv_data->clients_mask_lock); + hw_fence_drv_data->client_id_mask &= ~BIT(client_id); + mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + return ERR_PTR(-ENOMEM); + } + hw_fence_client->client_id = client_id; + hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id); + + if (hw_fence_client->ipc_client_id <= 0) { + HWFNC_ERR("Failed to find client:%d ipc id\n", client_id); + ret = -EINVAL; + goto error; + } + + hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id); + if (hw_fence_client->ipc_signal_id < 0) { + HWFNC_ERR("Failed to find client:%d signal\n", client_id); + ret = -EINVAL; + goto error; + } + + hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + + hw_fence_drv_data->clients[client_id] = hw_fence_client; + + /* Alloc Client HFI Headers and Queues */ + ret = hw_fence_alloc_client_resources(hw_fence_drv_data, + hw_fence_client, mem_descriptor); + if (ret) + goto error; + + /* Initialize signal for communication withe FenceCTL */ + ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client); + if (ret) + goto error; + + /* + * Update Fence Controller with the address of the Queues and + * the Fences Tables for this client + */ + ret = hw_fence_init_controller_resources(hw_fence_client); + if (ret) + goto error; + + HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc_client_id:%d\n", + hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, + hw_fence_client->ipc_client_id); + + return (void *)hw_fence_client; +error: + + /* Free all the allocated resources */ + hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); + + HWFNC_ERR("failed with error:%d\n", ret); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(msm_hw_fence_register); + +int msm_hw_fence_deregister(void *client_handle) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Free all the allocated resources */ + hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_deregister); + +int msm_hw_fence_create(void *client_handle, + struct msm_hw_fence_create_params *params) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + struct dma_fence *fence; + int ret; + + if (IS_ERR_OR_NULL(client_handle) || !params || !params->fence || !params->handle) { + HWFNC_ERR("Invalid input\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + fence = (struct dma_fence *)params->fence; + + HWFNC_DBG_H("+\n"); + + /* Block any Fence-Array, we should only get individual fences */ + array = to_dma_fence_array(fence); + if (array) { + HWFNC_ERR("HW Fence must be created for individual fences\n"); + return -EINVAL; + } + + /* This Fence is already a HW-Fence */ + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence already has HW Fence Flag set\n"); + return -EINVAL; + } + + /* Create the HW Fence, i.e. add entry in the Global Table for this Fence */ + ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, + fence->context, fence->seqno, params->handle); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + return ret; + } + + /* If no error, set the HW Fence Flag in the dma-fence */ + set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_create); + +int msm_hw_fence_destroy(void *client_handle, + struct dma_fence *fence) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + int ret; + + if (IS_ERR_OR_NULL(client_handle) || !fence) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Block any Fence-Array, we should only get individual fences */ + array = to_dma_fence_array(fence); + if (array) { + HWFNC_ERR("HW Fence must be destroy for individual fences\n"); + return -EINVAL; + } + + /* This Fence not a HW-Fence */ + if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%llx\n", fence->flags); + return -EINVAL; + } + + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ + ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, + fence->context, fence->seqno); + if (ret) { + HWFNC_ERR("Error destroying the HW fence\n"); + return ret; + } + + /* Clear the HW Fence Flag in the dma-fence */ + clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags); + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_destroy); + +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fence_list, u32 num_fences, bool create) +{ + struct msm_hw_fence_client *hw_fence_client; + struct dma_fence_array *array; + int i, ret = 0; + + if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("+\n"); + + /* Process all the list of fences */ + for (i = 0; i < num_fences; i++) { + struct dma_fence *fence = fence_list[i]; + + /* Process a Fence-Array */ + array = to_dma_fence_array(fence); + if (array) { + ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, + array); + if (ret) { + HWFNC_ERR("Failed to create FenceArray\n"); + return ret; + } + } else { + /* Process individual Fence */ + ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence); + if (ret) { + HWFNC_ERR("Failed to create Fence\n"); + return ret; + } + } + } + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_wait_update); + +int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence *hw_fences_tbl; + int i; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle!\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; + + HWFNC_DBG_L("reset fences for client:%d\n", hw_fence_client->client_id); + for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++) + hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client, + &hw_fences_tbl[i], i, reset_flags); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_reset_client); + +int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle) || + (handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) { + HWFNC_ERR("Invalid handle:%d or client handle:%d max:%d\n", handle, + IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + /* Write to Tx queue */ + hw_fence_update_queue(hw_fence_drv_data, hw_fence_client, + hw_fence_drv_data->hw_fences_tbl[handle].ctx_id, + hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle, + flags, error, HW_FENCE_TX_QUEUE - 1); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_update_txq); + +int msm_hw_fence_trigger_signal(void *client_handle, + u32 tx_client_id, u32 rx_client_id, + u32 signal_id) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id); + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_id, + rx_client_id, signal_id); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_trigger_signal); + +/* Function used for simulation purposes only. */ +int msm_hw_fence_driver_doorbell_sim(u64 db_mask) +{ + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } + + HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n", + db_mask, hw_fence_get_qtime(hw_fence_drv_data)); + + hw_fence_utils_process_doorbell_mask(hw_fence_drv_data, db_mask); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_driver_doorbell_sim); + +static int msm_hw_fence_probe_init(struct platform_device *pdev) +{ + int rc; + + HWFNC_DBG_H("+\n"); + + hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL); + if (!hw_fence_drv_data) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, hw_fence_drv_data); + hw_fence_drv_data->dev = &pdev->dev; + + /* Initialize HW Fence Driver resources */ + rc = hw_fence_init(hw_fence_drv_data); + if (rc) + goto error; + + mutex_init(&hw_fence_drv_data->clients_mask_lock); + + /* set ready ealue so clients can register */ + hw_fence_drv_data->resources_ready = true; + + HWFNC_DBG_H("-\n"); + + return rc; + +error: + dev_set_drvdata(&pdev->dev, NULL); + kfree(hw_fence_drv_data); + hw_fence_drv_data = (void *) -EPROBE_DEFER; + + HWFNC_ERR("error %d\n", rc); + return rc; +} + +static int msm_hw_fence_probe(struct platform_device *pdev) +{ + int rc = -EINVAL; + + HWFNC_DBG_H("+\n"); + + if (!pdev) { + HWFNC_ERR("null platform dev\n"); + return -EINVAL; + } + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) + rc = msm_hw_fence_probe_init(pdev); + if (rc) + goto err_exit; + + HWFNC_DBG_H("-\n"); + + return 0; + +err_exit: + HWFNC_ERR("error %d\n", rc); + return rc; +} + +static int msm_hw_fence_remove(struct platform_device *pdev) +{ + HWFNC_DBG_H("+\n"); + + if (!pdev) { + HWFNC_ERR("null platform dev\n"); + return -EINVAL; + } + + hw_fence_drv_data = dev_get_drvdata(&pdev->dev); + if (!hw_fence_drv_data) { + HWFNC_ERR("null driver data\n"); + return -EINVAL; + } + + dev_set_drvdata(&pdev->dev, NULL); + kfree(hw_fence_drv_data); + hw_fence_drv_data = (void *) -EPROBE_DEFER; + + HWFNC_DBG_H("-\n"); + + return 0; +} + +static const struct of_device_id msm_hw_fence_dt_match[] = { + {.compatible = "qcom,msm-hw-fence"}, + {} +}; + +static struct platform_driver msm_hw_fence_driver = { + .probe = msm_hw_fence_probe, + .remove = msm_hw_fence_remove, + .driver = { + .name = "msm-hw-fence", + .of_match_table = of_match_ptr(msm_hw_fence_dt_match), + }, +}; + +static int __init msm_hw_fence_init(void) +{ + int rc = 0; + + HWFNC_DBG_H("+\n"); + + rc = platform_driver_register(&msm_hw_fence_driver); + if (rc) { + HWFNC_ERR("%s: failed to register platform driver\n", + __func__); + return rc; + } + + HWFNC_DBG_H("-\n"); + + return 0; +} + +static void __exit msm_hw_fence_exit(void) +{ + HWFNC_DBG_H("+\n"); + + platform_driver_unregister(&msm_hw_fence_driver); + + HWFNC_DBG_H("-\n"); +} + +module_init(msm_hw_fence_init); +module_exit(msm_hw_fence_exit); + +MODULE_DESCRIPTION("QTI HW Fence Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 0563c64f97..72954fa842 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -2,9 +2,12 @@ ifneq ($(TARGET_BOARD_AUTO),true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 4d74d27bf4..1f352c0b8a 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -PRODUCT_PACKAGES += msm_ext_display.ko +PRODUCT_PACKAGES += msm_ext_display.ko msm_hw_fence.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) PRODUCT_PACKAGES += sync_fence.ko From efe7847b7a8d433c2b2bf40f4d2209566de37dd1 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 25 Apr 2022 12:07:40 -0700 Subject: [PATCH 11/77] mm-drivers: hw_fence: avoid compiling hw_fence driver for taro hw_fence driver is not required for taro variants. Since the Display SI 3.0 is shared with taro dev SI variant, avoid compiling hw_fence as dlkm for taro target. Change-Id: I84637f2546fd0818d956880fbc1bb86a30a7c916 Signed-off-by: Ingrid Gallardo --- Android.mk | 2 +- mm_driver_board.mk | 18 +++++++++--------- mm_driver_product.mk | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Android.mk b/Android.mk index c703795324..c14968715d 100644 --- a/Android.mk +++ b/Android.mk @@ -1,7 +1,7 @@ MM_DRIVER_PATH := $(call my-dir) include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk -include $(MM_DRIVER_PATH)/hw_fence/Android.mk ifneq ($(TARGET_BOARD_PLATFORM), taro) +include $(MM_DRIVER_PATH)/hw_fence/Android.mk include $(MM_DRIVER_PATH)/sync_fence/Android.mk endif diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 72954fa842..127c8dcc31 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -2,17 +2,17 @@ ifneq ($(TARGET_BOARD_AUTO),true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko endif endif endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 1f352c0b8a..c7d11b3449 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -PRODUCT_PACKAGES += msm_ext_display.ko msm_hw_fence.ko +PRODUCT_PACKAGES += msm_ext_display.ko ifneq ($(TARGET_BOARD_PLATFORM), taro) -PRODUCT_PACKAGES += sync_fence.ko +PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko endif From 47157b13975f4cca2a4c61e72d504bef1be1ccd3 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 27 May 2022 15:05:45 -0700 Subject: [PATCH 12/77] mm-drivers: hw_fence: populate payload size in hfi header Populate the payload size for the ctrl, rx and tx queues that communicate with the fence controller. Change-Id: Idc7dafcccd6ea16821e4f595bdab7395a5e0745b Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index edecc41cbc..cdfe9a81c2 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -31,7 +31,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, void *ptr, *qptr; phys_addr_t phys, qphys; u32 size, start_queue_offset; - int headers_size, queue_size; + int headers_size, queue_size, payload_size; int i, ret = 0; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); @@ -39,10 +39,12 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE; queue_size = drv_data->hw_fence_ctrl_queue_size; + payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE; queue_size = drv_data->hw_fence_client_queue_size; + payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; break; default: HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); @@ -102,6 +104,9 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, /* Set the size of this header */ hfi_queue_header->queue_size = queue_size; + /* Set the payload size */ + hfi_queue_header->pkt_size = payload_size; + /* Store Memory info in the Client data */ queues[i].va_queue = qptr; queues[i].pa_queue = qphys; From 2ae3dcadde4b7acc83e4734d5c7f52ddc0114987 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 3 Jun 2022 11:00:48 -0700 Subject: [PATCH 13/77] mm-drivers: hw_fence: avoid hw fences creation until fctl ready This change adds a check to avoid hw-fences creation until the fence controller is ready. Change-Id: I613c19d9dfd8836f8ded6bcb0162bef647df7bc3 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 3 +++ hw_fence/src/hw_fence_drv_utils.c | 2 ++ hw_fence/src/msm_hw_fence.c | 28 ++++++++++++++++++++++++---- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index e15fd4159c..c5565a63ff 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -211,6 +211,7 @@ struct msm_hw_fence_dbg_data { * @client_id_mask: bitmask for tracking registered client_ids * @clients_mask_lock: lock to synchronize access to the clients mask * @msm_hw_fence_client: table with the handles of the registered clients + * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized */ struct hw_fence_driver_data { @@ -286,6 +287,8 @@ struct hw_fence_driver_data { /* table with registered client handles */ struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; + + bool vm_ready; #ifdef HW_DPU_IPCC /* state variables */ bool ipcc_dpu_initialized; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 226df1e0cb..bd02ada6a2 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -306,6 +306,8 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da HWFNC_DBG_INIT("init mem\n"); if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) HWFNC_ERR("failed to share memory\n"); + else + drv_data->vm_ready = true; break; case GH_RM_VM_STATUS_RESET: HWFNC_DBG_INIT("reset\n"); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e2e61947c4..0c8fd65d60 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -142,6 +142,12 @@ int msm_hw_fence_create(void *client_handle, HWFNC_ERR("Invalid input\n"); return -EINVAL; } + + if (!hw_fence_drv_data->vm_ready) { + HWFNC_DBG_H("VM not ready, cannot create fence\n"); + return -EAGAIN; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; fence = (struct dma_fence *)params->fence; @@ -233,6 +239,12 @@ int msm_hw_fence_wait_update(void *client_handle, HWFNC_ERR("Invalid data\n"); return -EINVAL; } + + if (!hw_fence_drv_data->vm_ready) { + HWFNC_DBG_H("VM not ready, cannot destroy fence\n"); + return -EAGAIN; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; HWFNC_DBG_H("+\n"); @@ -276,6 +288,12 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) HWFNC_ERR("Invalid client handle!\n"); return -EINVAL; } + + if (!hw_fence_drv_data->vm_ready) { + HWFNC_DBG_H("VM not ready, cannot reset client\n"); + return -EAGAIN; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; @@ -292,8 +310,9 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro { struct msm_hw_fence_client *hw_fence_client; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || + !hw_fence_drv_data->vm_ready) { + HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle) || (handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) { @@ -319,8 +338,9 @@ int msm_hw_fence_trigger_signal(void *client_handle, { struct msm_hw_fence_client *hw_fence_client; - if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { - HWFNC_ERR("hw fence driver not ready\n"); + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready + || !hw_fence_drv_data->vm_ready) { + HWFNC_ERR("hw fence driver or vm not ready\n"); return -EAGAIN; } else if (IS_ERR_OR_NULL(client_handle)) { HWFNC_ERR("Invalid client\n"); From 91cfcb8220c61b62f81a35b9c7c00fd1f39b0598 Mon Sep 17 00:00:00 2001 From: Prabhanjan Kandula Date: Fri, 20 May 2022 11:20:43 -0700 Subject: [PATCH 14/77] mm-drivers: add support for compiling out mm driver modules This change provides required support to disable mm driver modules compilation along with all modules and supports specific flag for override to enable compilation if required. Change-Id: I3ea1383855a6be49ed12a23a3585e9d6ebb1810a Signed-off-by: Prabhanjan Kandula (cherry picked from commit 91a337989da77e6071fdfbd03b43c423356c14c0) --- Android.mk | 17 +++++++++++++---- mm_driver_board.mk | 28 ++++++++++++++++++---------- mm_driver_product.mk | 14 +++++++++++--- 3 files changed, 42 insertions(+), 17 deletions(-) diff --git a/Android.mk b/Android.mk index c14968715d..86e3104278 100644 --- a/Android.mk +++ b/Android.mk @@ -1,7 +1,16 @@ MM_DRIVER_PATH := $(call my-dir) -include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk -ifneq ($(TARGET_BOARD_PLATFORM), taro) -include $(MM_DRIVER_PATH)/hw_fence/Android.mk -include $(MM_DRIVER_PATH)/sync_fence/Android.mk + +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif endif +ifeq ($(MM_DRV_DLKM_ENABLE), true) + include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk + ifneq ($(TARGET_BOARD_PLATFORM), taro) + include $(MM_DRIVER_PATH)/hw_fence/Android.mk + include $(MM_DRIVER_PATH)/sync_fence/Android.mk + endif +endif diff --git a/mm_driver_board.mk b/mm_driver_board.mk index 127c8dcc31..7e18d8bc4e 100644 --- a/mm_driver_board.mk +++ b/mm_driver_board.mk @@ -1,18 +1,26 @@ #SPDX-License-Identifier: GPL-2.0-only -ifneq ($(TARGET_BOARD_AUTO),true) - ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif - ifneq ($(TARGET_BOARD_PLATFORM), taro) - BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ +ifeq ($(MM_DRV_DLKM_ENABLE), true) + ifneq ($(TARGET_BOARD_AUTO),true) + ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko + ifneq ($(TARGET_BOARD_PLATFORM), taro) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ - $(KERNEL_MODULES_OUT)/msm_hw_fence.ko - BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \ + $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \ $(KERNEL_MODULES_OUT)/msm_hw_fence.ko + endif endif endif endif diff --git a/mm_driver_product.mk b/mm_driver_product.mk index c7d11b3449..4c2a5d2fe9 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -1,7 +1,15 @@ -# SPDX-License-Identifier: GPL-2.0-only PRODUCT_PACKAGES += msm_ext_display.ko -ifneq ($(TARGET_BOARD_PLATFORM), taro) -PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko +MM_DRV_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false) + MM_DRV_DLKM_ENABLE := false + endif +endif + +ifeq ($(MM_DRV_DLKM_ENABLE), true) + ifneq ($(TARGET_BOARD_PLATFORM), taro) + PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko + endif endif From f73a4b179a18fef496c5ab0203187eb15c2a4c2a Mon Sep 17 00:00:00 2001 From: Shirisha Kollapuram Date: Thu, 26 May 2022 14:58:49 +0530 Subject: [PATCH 15/77] mm-drivers: hw-fence: add hardware fence driver validation ioctls This change adds support to validate the hw_fence driver by adding IOCTLs that expose the hw_fence interfaces so that validation clients can register/unregister, create/destroy and wait/signal fences. IOCTL's will be available for debug purpose only when the debugfs config is set. Change-Id: Idb0d04ee245718e9b19ccd12ac760829831426b0 Signed-off-by: Shirisha Kollapuram --- hw_fence/Kbuild | 2 + hw_fence/include/hw_fence_drv_debug.h | 133 +++++ hw_fence/include/hw_fence_drv_priv.h | 22 + hw_fence/src/hw_fence_drv_debug.c | 149 ++---- hw_fence/src/hw_fence_drv_ipc.c | 18 + hw_fence/src/hw_fence_drv_priv.c | 11 + hw_fence/src/hw_fence_drv_utils.c | 11 + hw_fence/src/hw_fence_ioctl.c | 711 ++++++++++++++++++++++++++ hw_fence/src/msm_hw_fence.c | 4 + 9 files changed, 947 insertions(+), 114 deletions(-) create mode 100644 hw_fence/src/hw_fence_ioctl.c diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index fcd6b6e7bb..8948d581e9 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -14,5 +14,7 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_debug.o \ src/hw_fence_drv_ipc.o +msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o + CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" endif diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index d980331113..de0e6e7a37 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -6,6 +6,10 @@ #ifndef __HW_FENCE_DRV_DEBUG #define __HW_FENCE_DRV_DEBUG +#include "hw_fence_drv_ipc.h" + +#define HW_FENCE_NAME_SIZE 64 + enum hw_fence_drv_prio { HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */ HW_FENCE_LOW = 0x000002, /* Low density debug messages */ @@ -58,4 +62,133 @@ extern u32 msm_hw_fence_debug_level; int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); +#if IS_ENABLED(CONFIG_DEBUG_FS) + +int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); + +extern const struct file_operations hw_sync_debugfs_fops; + +struct hw_fence_out_clients_map { + int ipc_client_id; /* ipc client id for the hw fence client */ + int ipc_signal_id; /* ipc signal id for the hw fence client */ +}; + +/* These signals are the ones that the actual clients should be triggering, hw-fence driver + * does not need to have knowledge of these signals. Adding them here for debugging purposes. + * Only fence controller and the cliens know these id's, since these + * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' + * The index of this struct must match the enum hw_fence_client_id + */ +static const struct hw_fence_out_clients_map + dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 12}, /* CTL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 21}, /* VAL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22}, /* VAL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23}, /* VAL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24}, /* VAL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25}, /* VAL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26}, /* VAL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27}, /* VAL6 */ +}; + +/** + * struct hw_dma_fence - fences created by hw-fence for debugging. + * @base: base dma-fence structure, this must remain at beginning of the struct. + * @name: name of each fence. + * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence + * driver after a successful registration of the client and used by this fence + * during release. + */ +struct hw_dma_fence { + struct dma_fence base; + char name[HW_FENCE_NAME_SIZE]; + void *client_handle; +}; + +static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) +{ + return container_of(fence, struct hw_dma_fence, base); +} + +static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) +{ + struct hw_dma_fence *dma_fence; + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) { + kfree(fences_lock[fence_idx]); + + dma_fence = to_hw_dma_fence(fences[fence_idx]); + kfree(dma_fence); + } + + kfree(fences_lock); + kfree(fences); +} + +static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); + + return hw_dma_fence->name; +} + +static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) +{ + if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { + HWFNC_ERR("invalid hwfence data, won't release hw_fence!\n"); + return; + } + + /* release hw-fence */ + if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) + HWFNC_ERR("failed to release hw_fence!\n"); +} + +static void hw_fence_dbg_release(struct dma_fence *fence) +{ + struct hw_dma_fence *hw_dma_fence; + + if (!fence) + return; + + HWFNC_DBG_H("release backing fence %pK\n", fence); + hw_dma_fence = to_hw_dma_fence(fence); + + if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) + _hw_fence_release(hw_dma_fence); + + kfree(fence->lock); + kfree(hw_dma_fence); +} + +static struct dma_fence_ops hw_fence_dbg_ops = { + .get_driver_name = hw_fence_dbg_get_driver_name, + .get_timeline_name = hw_fence_dbg_get_timeline_name, + .enable_signaling = hw_fence_dbg_enable_signaling, + .wait = dma_fence_default_wait, + .release = hw_fence_dbg_release, +}; + +#endif /* CONFIG_DEBUG_FS */ + #endif /* __HW_FENCE_DRV_DEBUG */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index e15fd4159c..5f7b97e33b 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -85,6 +85,13 @@ enum hw_fence_lookup_ops { * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc. * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc. * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support. + * HW_FENCE_LOOPBACK_VAL_0: debug validation client 0. + * HW_FENCE_LOOPBACK_VAL_1: debug validation client 1. + * HW_FENCE_LOOPBACK_VAL_2: debug validation client 2. + * HW_FENCE_LOOPBACK_VAL_3: debug validation client 3. + * HW_FENCE_LOOPBACK_VAL_4: debug validation client 4. + * HW_FENCE_LOOPBACK_VAL_5: debug validation client 5. + * HW_FENCE_LOOPBACK_VAL_6: debug validation client 6. */ enum hw_fence_loopback_id { HW_FENCE_LOOPBACK_DPU_CTL_0, @@ -94,6 +101,15 @@ enum hw_fence_loopback_id { HW_FENCE_LOOPBACK_DPU_CTL_4, HW_FENCE_LOOPBACK_DPU_CTL_5, HW_FENCE_LOOPBACK_GFX_CTX_0, +#if IS_ENABLED(CONFIG_DEBUG_FS) + HW_FENCE_LOOPBACK_VAL_0, + HW_FENCE_LOOPBACK_VAL_1, + HW_FENCE_LOOPBACK_VAL_2, + HW_FENCE_LOOPBACK_VAL_3, + HW_FENCE_LOOPBACK_VAL_4, + HW_FENCE_LOOPBACK_VAL_5, + HW_FENCE_LOOPBACK_VAL_6, +#endif /* CONFIG_DEBUG_FS */ HW_FENCE_LOOPBACK_MAX, }; @@ -121,6 +137,8 @@ struct msm_hw_fence_queue { * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_id: id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue + * @wait_queue: wait queue for the validation clients + * @val_signal: doorbell flag to signal the validation clients in the wait queue */ struct msm_hw_fence_client { enum hw_fence_client_id client_id; @@ -129,6 +147,10 @@ struct msm_hw_fence_client { int ipc_signal_id; int ipc_client_id; bool update_rxq; +#if IS_ENABLED(CONFIG_DEBUG_FS) + wait_queue_head_t wait_queue; + atomic_t val_signal; +#endif /* CONFIG_DEBUG_FS */ }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index c047a3b251..f872c4c197 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -12,7 +12,6 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_utils.h" -#define HW_FENCE_NAME_SIZE 64 #define HW_FENCE_DEBUG_MAX_LOOPS 200 u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; @@ -38,20 +37,6 @@ struct client_data { struct list_head list; }; -/** - * struct hw_dma_fence - fences created by hw-fence for debugging. - * @base: base dma-fence structure, this must remain at beginning of the struct. - * @name: name of each fence. - * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence - * driver after a successful registration of the client and used by this fence - * during release. - */ -struct hw_dma_fence { - struct dma_fence base; - char name[HW_FENCE_NAME_SIZE]; - void *client_handle; -}; - #if IS_ENABLED(CONFIG_DEBUG_FS) static int _get_debugfs_input_client(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos, @@ -155,67 +140,6 @@ static const struct file_operations hw_fence_dbg_ipcc_fops = { .write = hw_fence_dbg_ipcc_write, }; -static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence) -{ - return container_of(fence, struct hw_dma_fence, base); -} - -static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); - - return hw_dma_fence->name; -} - -static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence); - - return hw_dma_fence->name; -} - -static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence) -{ - return true; -} - -static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence) -{ - if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) { - HWFNC_ERR("invalid hwfence data, won't release hw_fence\n"); - return; - } - - /* release hw-fence */ - if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base)) - HWFNC_ERR("failed to release hw_fence\n"); -} - -static void hw_fence_dbg_release(struct dma_fence *fence) -{ - struct hw_dma_fence *hw_dma_fence; - - if (!fence) - return; - - HWFNC_DBG_H("release backing fence %pK\n", fence); - hw_dma_fence = to_hw_dma_fence(fence); - - if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) - _hw_fence_release(hw_dma_fence); - - kfree(fence->lock); - kfree(hw_dma_fence); -} - -static struct dma_fence_ops hw_fence_dbg_ops = { - .get_driver_name = hw_fence_dbg_get_driver_name, - .get_timeline_name = hw_fence_dbg_get_timeline_name, - .enable_signaling = hw_fence_dbg_enable_signaling, - .wait = dma_fence_default_wait, - .release = hw_fence_dbg_release, -}; - struct client_data *_get_client_node(struct hw_fence_driver_data *drv_data, u32 client_id) { struct client_data *node = NULL; @@ -321,29 +245,6 @@ static ssize_t hw_fence_dbg_register_clients_wr(struct file *file, return count; } -struct hw_fence_out_clients_map { - int ipc_client_id; /* ipc client id for the hw fence client */ - int ipc_signal_id; /* ipc signal id for the hw fence client */ -}; - -/* NOTE: These signals are the ones that the actual clients should be triggering, hw-fence driver - * does not need to have knowledge of these signals. Adding them here for debugging purposes. - * Only fence controller and the cliens know these id's, since these - * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller' - * - * Note that the index of this struct must match the enum hw_fence_client_id - */ -struct hw_fence_out_clients_map dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 12} /* CTL5 */ -}; - /** * hw_fence_dbg_tx_and_signal_clients_wr() - debugfs write to simulate the lifecycle of a hw-fence. * @file: file handler. @@ -761,21 +662,7 @@ static ssize_t hw_fence_dbg_dump_table_wr(struct file *file, return user_buf_size; } -static void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock) -{ - struct hw_dma_fence *dma_fence; - int idx; - for (idx = i; idx >= 0 ; idx--) { - kfree(fences_lock[idx]); - - dma_fence = to_hw_dma_fence(fences[idx]); - kfree(dma_fence); - } - - kfree(fences_lock); - kfree(fences); -} /** * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence. @@ -840,7 +727,7 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, for (i = 0; i < num_fences; i++) { struct hw_dma_fence *dma_fence; - fences_lock[i] = kzalloc(sizeof(*fences_lock), GFP_KERNEL); + fences_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); if (!fences_lock[i]) { _cleanup_fences(i, fences, fences_lock); return -ENOMEM; @@ -916,6 +803,39 @@ error: return count; } +int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, + int client_id) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (client_id < HW_FENCE_LOOPBACK_VAL_0 || client_id > HW_FENCE_LOOPBACK_VAL_6) { + HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id, + HW_FENCE_LOOPBACK_VAL_0, HW_FENCE_LOOPBACK_VAL_6); + return -EINVAL; + } + + mutex_lock(&drv_data->clients_mask_lock); + + if (!drv_data->clients[client_id]) { + mutex_unlock(&drv_data->clients_mask_lock); + return -EINVAL; + } + + hw_fence_client = drv_data->clients[client_id]; + + HWFNC_DBG_IRQ("Processing validation client workaround client_id:%d\n", client_id); + + /* set the atomic flag, to signal the client wait */ + atomic_set(&hw_fence_client->val_signal, 1); + + /* wake-up waiting client */ + wake_up_all(&hw_fence_client->wait_queue); + + mutex_unlock(&drv_data->clients_mask_lock); + + return 0; +} + static const struct file_operations hw_fence_reset_client_fops = { .open = simple_open, .write = hw_fence_dbg_reset_client_wr, @@ -988,6 +908,7 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, &hw_fence_dump_table_fops); + debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); return 0; } diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 7879d4f788..c3414a20da 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -40,6 +40,15 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */ {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ }; /** @@ -59,6 +68,15 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */ {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ +#endif /* CONFIG_DEBUG_FS */ }; int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index edecc41cbc..e6aa770468 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -539,6 +539,17 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_CTX0: /* nothing to initialize for gpu client */ break; +#if IS_ENABLED(CONFIG_DEBUG_FS) + case HW_FENCE_CLIENT_ID_VAL0: + case HW_FENCE_CLIENT_ID_VAL1: + case HW_FENCE_CLIENT_ID_VAL2: + case HW_FENCE_CLIENT_ID_VAL3: + case HW_FENCE_CLIENT_ID_VAL4: + case HW_FENCE_CLIENT_ID_VAL5: + case HW_FENCE_CLIENT_ID_VAL6: + /* nothing to initialize for validation clients */ + break; +#endif /* CONFIG_DEBUG_FS */ case HW_FENCE_CLIENT_ID_CTL0: case HW_FENCE_CLIENT_ID_CTL1: case HW_FENCE_CLIENT_ID_CTL2: diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 226df1e0cb..73d2c83273 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -134,6 +134,17 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c case HW_FENCE_LOOPBACK_GFX_CTX_0: ret = _process_gfx_client_loopback(drv_data, client_id); break; +#if IS_ENABLED(CONFIG_DEBUG_FS) + case HW_FENCE_LOOPBACK_VAL_0: + case HW_FENCE_LOOPBACK_VAL_1: + case HW_FENCE_LOOPBACK_VAL_2: + case HW_FENCE_LOOPBACK_VAL_3: + case HW_FENCE_LOOPBACK_VAL_4: + case HW_FENCE_LOOPBACK_VAL_5: + case HW_FENCE_LOOPBACK_VAL_6: + ret = process_validation_client_loopback(drv_data, client_id); + break; +#endif /* CONFIG_DEBUG_FS */ default: HWFNC_ERR("unknown client:%d\n", client_id); ret = -EINVAL; diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c new file mode 100644 index 0000000000..8ff2bdfb02 --- /dev/null +++ b/hw_fence/src/hw_fence_ioctl.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_utils.h" +#include "hw_fence_drv_ipc.h" +#include "hw_fence_drv_debug.h" + +#define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls) +#define HW_FENCE_ARRAY_SIZE 10 +#define HW_SYNC_IOC_MAGIC 'W' +#define HW_SYNC_IOC_REG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long) +#define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long) +#define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\ + struct hw_fence_sync_create_data) +#define HW_SYNC_IOC_DESTROY_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 13,\ + struct hw_fence_sync_create_data) +#define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\ + struct hw_fence_array_sync_create_data) +#define HW_SYNC_IOC_DESTROY_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 15,\ + struct hw_fence_array_sync_create_data) +#define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int) +#define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long) +#define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int) +#define HW_SYNC_IOC_RESET_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long) +#define HW_FENCE_IOCTL_NR(n) (_IOC_NR(n) - 2) +#define HW_IOCTL_DEF(ioctl, _func) \ + [HW_FENCE_IOCTL_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .name = #ioctl \ + } + +/** + * struct hw_sync_obj - per client hw sync object. + * @context: context id used to create fences. + * @client_id: to uniquely represent client. + * @client_handle: Pointer to the structure holding the resources + * allocated to the client. + * @mem_descriptor: Memory descriptor of the queue allocated by the + * hardware fence driver for each client during register. + */ +struct hw_sync_obj { + u64 context; + int client_id; + void *client_handle; + struct msm_hw_fence_mem_addr mem_descriptor; +}; + +/** + * struct hw_fence_sync_create_data - data used in creating fences. + * @seqno: sequence number. + * @incr_context: if set, then the context would be incremented. + * @fence: returns the fd of the new sync_file with the created fence. + * @hash: fence hash + */ +struct hw_fence_sync_create_data { + u64 seqno; + bool incr_context; + __s32 fence; + u64 hash; +}; + +/** + * struct hw_fence_array_sync_create_data - data used in creating multiple fences. + * @seqno: array of sequence numbers used to create fences. + * @num_fences: number of fences to be created. + * @fence: return the fd of the new sync_file with the created fence. + * @hash: array of fence hash + */ +struct hw_fence_array_sync_create_data { + u64 seqno[HW_FENCE_ARRAY_SIZE]; + int num_fences; + __s32 fence; + u64 hash[HW_FENCE_ARRAY_SIZE]; +}; + +/** + * struct hw_fence_sync_signal_data - data used to signal fences. + * @hash: hash of the fence. + * @error_flag: error flag + */ +struct hw_fence_sync_signal_data { + u64 hash; + u32 error_flag; +}; + +/** + * struct hw_fence_sync_wait_data - data used to wait on fences. + * @fence: fence fd. + * @timeout_ms: fence wait time out. + */ +struct hw_fence_sync_wait_data { + __s32 fence; + u64 timeout_ms; +}; + +/** + * struct hw_fence_sync_reset_data - data used to reset client. + * @client_id: client id. + * @reset_flag: reset flag + */ +struct hw_fence_sync_reset_data { + int client_id; + u32 reset_flag; +}; + +typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg); + +/** + * struct hw_sync_ioctl_def - hw_sync driver ioctl entry + * @cmd: ioctl command number, without flags + * @func: handler for this ioctl + * @name: user-readable name for debug output + */ +struct hw_sync_ioctl_def { + unsigned int cmd; + hw_fence_ioctl_t *func; + const char *name; +}; + +static bool _is_valid_client(struct hw_sync_obj *obj) +{ + if (!obj) + return false; + + if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + return false; + } + + return true; +} + +static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id; + + if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id))) + return -EFAULT; + + if (!obj) + return -EINVAL; + + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + return -EINVAL; + } + + return client_id; +} + +static void *_hw_sync_get_fence(int fd) +{ + return fd >= 0 ? sync_file_get_fence(fd) : NULL; +} + +static int hw_sync_debugfs_open(struct inode *inode, struct file *file) +{ + struct hw_sync_obj *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + obj->context = dma_fence_context_alloc(1); + file->private_data = obj; + + return 0; +} + +static int hw_sync_debugfs_release(struct inode *inode, struct file *file) +{ + struct hw_sync_obj *obj = file->private_data; + + if (!obj) + return -EINVAL; + + kfree(obj); + + return 0; +} + +static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id = _get_client_id(obj, arg); + + if (IS_ERR(&client_id)) { + return client_id; + } else if (obj->client_handle) { + HWFNC_ERR("client:%d already registered as validation client\n", client_id); + return -EINVAL; + } + + obj->client_id = client_id; + obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor); + if (IS_ERR_OR_NULL(obj->client_handle)) + return -EINVAL; + + return 0; +} + +static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int client_id = _get_client_id(obj, arg); + + if (IS_ERR(&client_id)) + return client_id; + + return msm_hw_fence_deregister(obj->client_handle); +} + +static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_create_params params; + struct hw_fence_sync_create_data data; + struct hw_dma_fence *fence; + spinlock_t *fence_lock; + u64 hash; + struct sync_file *sync_file; + int fd, ret; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + /* create dma fence */ + fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return -ENOMEM; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) { + kfree(fence_lock); + return -ENOMEM; + } + + snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu", + obj->client_id, obj->context, data.seqno); + + spin_lock_init(fence_lock); + dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, obj->context, data.seqno); + + HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", obj->client_id, + obj->context, data.seqno); + params.fence = &fence->base; + params.handle = &hash; + + /* create hw fence */ + ret = msm_hw_fence_create(obj->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", + obj->client_id, obj->context, data.seqno); + dma_fence_put(&fence->base); + return -EINVAL; + } + + /* keep handle in dma_fence, to destroy hw-fence during release */ + fence->client_handle = obj->client_handle; + + if (data.incr_context) + obj->context = dma_fence_context_alloc(1); + + /* create fd */ + fd = get_unused_fd_flags(0); + if (fd < 0) { + HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); + dma_fence_put(&fence->base); + return fd; + } + + sync_file = sync_file_create(&fence->base); + if (sync_file == NULL) { + HWFNC_ERR("couldn't create fence fd, %d\n", fd); + dma_fence_put(&fence->base); + ret = -EINVAL; + goto exit; + } + + /* Decrement the refcount that sync_file_create increments */ + dma_fence_put(&fence->base); + + data.fence = fd; + data.hash = hash; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + dma_fence_put(&fence->base); + fput(sync_file->file); + ret = -EFAULT; + goto exit; + } + + fd_install(fd, sync_file->file); + + return 0; + +exit: + put_unused_fd(fd); + return ret; +} + +static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long arg) +{ + int fd; + struct hw_dma_fence *fence; + struct hw_fence_sync_create_data data; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); + + if (!fence) { + HWFNC_ERR("fence for fd:%d not found\n", fd); + return -EINVAL; + } + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(&fence->base); + + /* To destroy fence */ + dma_fence_put(&fence->base); + + return 0; +} + +static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence_array *fence_array; + struct hw_fence_array_sync_create_data data; + struct dma_fence **fences = NULL; + struct msm_hw_fence_create_params params; + struct sync_file *sync_file; + spinlock_t **fence_lock = NULL; + int num_fences, i, fd, ret; + u64 hash; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + num_fences = data.num_fences; + if (num_fences >= HW_FENCE_ARRAY_SIZE) { + HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n", + num_fences, HW_FENCE_ARRAY_SIZE); + return -EINVAL; + } + fence_lock = kcalloc(num_fences, sizeof(*fence_lock), GFP_KERNEL); + if (!fence_lock) + return -ENOMEM; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + kfree(fence_lock); + return -ENOMEM; + } + + /* + * Create the array of dma fences + * This API takes seqno[num_fences] as the seqno for the fence-array + * and from 0 to (num_fences - 1) for the fences in the array. + */ + for (i = 0; i < num_fences; i++) { + struct hw_dma_fence *dma_fence; + + fence_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!fence_lock[i]) { + _cleanup_fences(i, fences, fence_lock); + return -ENOMEM; + } + + dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); + if (!dma_fence) { + _cleanup_fences(i, fences, fence_lock); + return -ENOMEM; + } + fences[i] = &dma_fence->base; + + spin_lock_init(fence_lock[i]); + dma_fence_init(fences[i], &hw_fence_dbg_ops, fence_lock[i], + obj->context, data.seqno[i]); + } + + /* create the fence array from array of dma fences */ + fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno[i], 0); + if (!fence_array) { + HWFNC_ERR("Error creating fence_array\n"); + _cleanup_fences(num_fences - 1, fences, fence_lock); + return -EINVAL; + } + + /* create hw fences */ + for (i = 0; i < num_fences; i++) { + params.fence = fences[i]; + params.handle = &hash; + + ret = msm_hw_fence_create(obj->client_handle, ¶ms); + if (ret) { + HWFNC_ERR("Error creating HW fence\n"); + dma_fence_put(&fence_array->base); + /* + * free array of pointers, no need to call kfree in 'fences', + * since that is released from the fence-array release api + */ + kfree(fence_lock); + kfree(fence_array); + return -EINVAL; + } + + /* keep handle in dma_fence, to destroy hw-fence during release */ + to_hw_dma_fence(fences[i])->client_handle = obj->client_handle; + data.hash[i] = hash; + } + + /* create fd */ + fd = get_unused_fd_flags(0); + if (fd < 0) { + HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); + dma_fence_put(&fence_array->base); + kfree(fence_lock); + kfree(fence_array); + return fd; + } + + sync_file = sync_file_create(&fence_array->base); + if (sync_file == NULL) { + HWFNC_ERR("couldn't create fence fd, %d\n", fd); + dma_fence_put(&fence_array->base); + kfree(fence_lock); + kfree(fence_array); + ret = -EINVAL; + goto exit; + } + + /* Decrement the refcount that sync_file_create increments */ + dma_fence_put(&fence_array->base); + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + fput(sync_file->file); + dma_fence_put(&fence_array->base); + kfree(fence_lock); + kfree(fence_array); + ret = -EFAULT; + goto exit; + } + + fd_install(fd, sync_file->file); + + return 0; + +exit: + put_unused_fd(fd); + return ret; +} + +static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence_array *fence_array; + struct dma_fence *fence; + struct hw_fence_array_sync_create_data data; + int fd; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + fence_array = to_dma_fence_array(fence); + if (!fence_array) { + HWFNC_ERR("Invalid fence array fd: %d\n", fd); + return -EINVAL; + } + + /* Destroy fence array */ + dma_fence_put(&fence_array->base); + + return 0; +} + +/* + * this IOCTL only supports receiving one fence as input-parameter, which can be + * either a "dma_fence" or a "dma_fence_array", but eventually we would expand + * this API to receive more fences + */ +static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg) +{ + struct dma_fence *fence; + int ret, fd, num_fences = 1; + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&fd, (void __user *)arg, sizeof(fd))) + return -EFAULT; + + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1); + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + return ret; +} + +static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg) +{ + struct hw_fence_sync_signal_data data; + int ret, tx_client, rx_client, signal_id; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag); + if (ret) { + HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id); + return ret; + } + + signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id; + if (signal_id < 0) + return -EINVAL; + + tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); + if (ret) { + HWFNC_ERR("hw fence trigger signal has failed\n"); + return ret; + } + + return 0; +} + +static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence_queue_payload payload; + struct hw_fence_sync_wait_data data; + struct dma_fence *fence; + int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ + + if (!_is_valid_client(obj)) + return -EINVAL; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + fd = data.fence; + fence = (struct dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + HWFNC_ERR("Invalid fence fd: %d\n", fd); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle; + if (!hw_fence_client) { + HWFNC_ERR("invalid client handle for fd:%d\n", fd); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -EINVAL; + } + + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(data.timeout_ms)); + if (!ret) { + HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms); + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return -ETIMEDOUT; + } + + /* clear doorbell signal flag */ + atomic_set(&hw_fence_client->val_signal, 0); + + while (read) { + read = hw_fence_read_queue(obj->client_handle, &payload, queue_type); + if (read < 0) { + HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id); + break; + } + HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%lu\n", + payload.hash, payload.flags, payload.error); + if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) { + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + return 0; + } + } + + /* Decrement the refcount that hw_sync_get_fence increments */ + dma_fence_put(fence); + + HWFNC_ERR("fence received did not match the fence expected\n"); + HWFNC_ERR("fence received: context:%d seqno:%d fence expected: context:%d seqno:%d\n", + payload.ctxt_id, payload.seqno, fence->context, fence->seqno); + + return read; +} + +static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg) +{ + int ret; + struct hw_fence_sync_reset_data data; + + if (!_is_valid_client(obj)) { + return -EINVAL; + } else if (IS_ERR_OR_NULL(obj->client_handle)) { + HWFNC_ERR("client:%d handle doesn't exists\n", data.client_id); + return -EINVAL; + } + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag); + if (ret) { + HWFNC_ERR("hw fence reset client has failed\n"); + return ret; + } + + return 0; +} + +static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = { + HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client), + HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client), + HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence), + HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE, hw_sync_ioctl_destroy_fence), + HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array), + HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE_ARRAY, hw_sync_ioctl_destroy_fence_array), + HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait), + HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal), + HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait), + HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client) +}; + +static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hw_sync_obj *obj = file->private_data; + int num = HW_FENCE_IOCTL_NR(cmd); + hw_fence_ioctl_t *func; + + if (num >= HW_SYNC_IOCTL_COUNT) { + HWFNC_ERR("invalid ioctl num = %d\n", num); + return -EINVAL; + } + + func = (&hw_sync_debugfs_ioctls[num])->func; + if (unlikely(!func)) { + HWFNC_ERR("no function num = %d\n", num); + return -ENOTTY; + } + + return func(obj, arg); +} + +const struct file_operations hw_sync_debugfs_fops = { + .open = hw_sync_debugfs_open, + .release = hw_sync_debugfs_release, + .unlocked_ioctl = hw_sync_debugfs_ioctl, +}; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e2e61947c4..9d34aa7ea1 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -98,6 +98,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_id); +#if IS_ENABLED(CONFIG_DEBUG_FS) + init_waitqueue_head(&hw_fence_client->wait_queue); +#endif /* CONFIG_DEBUG_FS */ + return (void *)hw_fence_client; error: From c11e6e06aad06feb8c85a3de77bf23da58044d90 Mon Sep 17 00:00:00 2001 From: Christina Oliveira Date: Wed, 29 Jun 2022 15:04:42 -0700 Subject: [PATCH 16/77] mm-drivers: sync: export sync_fence module symbols This change updates makefile to export sync_fence module symbols, so these can be imported by other external kernel modules. Change-Id: Idd64fae8f8797cbcb4b4012666ed5621fa83062f Signed-off-by: Christina Oliveira --- sync_fence/Android.mk | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sync_fence/Android.mk b/sync_fence/Android.mk index 59ee256f05..d784b18e9c 100644 --- a/sync_fence/Android.mk +++ b/sync_fence/Android.mk @@ -18,6 +18,15 @@ KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR) KBUILD_OPTIONS += MODNAME=sync_fence KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := sync-fence-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +include $(DLKM_DIR)/Build_external_kernelmodule.mk ########################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) From 0a5f30607f23bcc190dc8790288de0fa7fab2973 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 29 Jun 2022 10:18:02 -0700 Subject: [PATCH 17/77] mm-drivers: hw_fence: enable hw-fence driver based on cmdline var This change ensures that the hw-fence driver is disabled by default and can be enabled or disabled based on a kernel command line argument. If the hw-fence driver is disabled, msm_hw_fence_probe returns an error. Change-Id: I248f29158c17a43151aa8b0c980a7ce0f5e758d6 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 0c8fd65d60..71288b7919 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -15,6 +15,7 @@ #include "hw_fence_drv_ipc.h" struct hw_fence_driver_data *hw_fence_drv_data; +static bool hw_fence_driver_enable; void *msm_hw_fence_register(enum hw_fence_client_id client_id, struct msm_hw_fence_mem_addr *mem_descriptor) @@ -420,6 +421,11 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return -EINVAL; } + if (!hw_fence_driver_enable) { + HWFNC_DBG_INFO("hw fence driver not enabled\n"); + return -EOPNOTSUPP; + } + if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) rc = msm_hw_fence_probe_init(pdev); if (rc) @@ -499,6 +505,9 @@ static void __exit msm_hw_fence_exit(void) HWFNC_DBG_H("-\n"); } +module_param_named(enable, hw_fence_driver_enable, bool, 0600); +MODULE_PARM_DESC(enable, "Enable hardware fences"); + module_init(msm_hw_fence_init); module_exit(msm_hw_fence_exit); From 8ad0ce90d22bd18576af5d34403ff001b1c5eb4b Mon Sep 17 00:00:00 2001 From: Bruce Hoo Date: Mon, 9 May 2022 18:56:17 +0800 Subject: [PATCH 18/77] mm-drivers: spec_fence: increasing device_available Spec_fence device is already used by surfaceflinger, increase device_available by 1. Change-Id: I3795ffc40fb2ca95e933d4ed056dc9a4c628ba1e Signed-off-by: Bruce Hoo --- sync_fence/src/qcom_sync_file.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index b3ecf4eb1f..f054f80e34 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -105,14 +105,17 @@ static void clear_fence_array_tracker(bool force_clear) static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) { - if (atomic_read(&obj->device_available)) + if (atomic_read(&obj->device_available) > 1) { + pr_err("number of device fds are limited by 2, device opened:%d\n", + atomic_read(&obj->device_available)); return NULL; + } else if (!atomic_read(&obj->device_available)) { + memset(obj->name, 0, NAME_LEN); + strscpy(obj->name, name, sizeof(obj->name)); + } atomic_inc(&obj->device_available); - memset(obj->name, 0, NAME_LEN); - strlcpy(obj->name, name, sizeof(obj->name)); - return obj; } @@ -153,14 +156,16 @@ static int spec_sync_release(struct inode *inode, struct file *file) mutex_lock(&sync_dev.lock); if (!atomic_read(&obj->device_available)) { - pr_err("sync release failed !!\n"); + pr_err("no device to release!!\n"); ret = -ENODEV; goto end; } - clear_fence_array_tracker(true); atomic_dec(&obj->device_available); + if (!atomic_read(&obj->device_available)) + clear_fence_array_tracker(true); + end: mutex_unlock(&sync_dev.lock); return ret; From 93afde537dddfaab1a96eacf1e515a3396754930 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Tue, 21 Jun 2022 09:45:18 -0700 Subject: [PATCH 19/77] mm-drivers: hw_fence: move mem barrier before mem read Move memory barriers to ensure data is available before the read of indexes from the queues. Change-Id: I3b5a7903f038cc62b461fbfc9cbeb143b862a1f1 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index cdfe9a81c2..a980fb6bfa 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -166,13 +166,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, return -EINVAL; } + /* Make sure data is ready before read */ + mb(); + /* Get read and write index */ read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); - /* Make sure we read the values */ - rmb(); - HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); @@ -274,13 +274,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 1); /* lock */ } + /* Make sure data is ready before read */ + mb(); + /* Get read and write index */ read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); - /* Make sure we read the values */ - rmb(); - HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue, queue_type); From 05b50f9290f4ab2f425b7a03b648207b2e59574a Mon Sep 17 00:00:00 2001 From: Ashwin Pillai Date: Thu, 14 Jul 2022 16:50:18 -0400 Subject: [PATCH 20/77] mm-drivers: add support for build.sh techpack display_tp add environment variable to be used by display-techpack.mk for build.sh techpack display_tp. Change-Id: I46b0ac3fb40371e3282191c75a501230243d9f52 Signed-off-by: Ashwin Pillai --- mm_driver_product.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm_driver_product.mk b/mm_driver_product.mk index 4c2a5d2fe9..bb98492d0a 100644 --- a/mm_driver_product.mk +++ b/mm_driver_product.mk @@ -13,3 +13,5 @@ ifeq ($(MM_DRV_DLKM_ENABLE), true) PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko endif endif + +DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko \ No newline at end of file From c344a18254ddcb9beb242026b327a09c82f3ef3d Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 1 Jun 2022 12:12:46 -0700 Subject: [PATCH 21/77] mm-drivers: hw_fence: add timestamp to the queue Add qtimer timestamps to queue payloads. This timestamp is to be updated by the client that adds the entry to the queue. Change-Id: I69dd4420ec18b7470f99d5cfe46129c10b3f3391 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 3 +- hw_fence/src/hw_fence_drv_debug.c | 86 ++++++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_priv.c | 1 + 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 8de604ee0d..8ce864639c 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -326,6 +326,7 @@ struct hw_fence_driver_data { * @error: error code for this fence, fence controller receives this * error from the signaling client through the tx queue and * propagates the error to the waiting client through rx queue + * @timestamp: qtime when the payload is written into the queue */ struct msm_hw_fence_queue_payload { u64 ctxt_id; @@ -333,7 +334,7 @@ struct msm_hw_fence_queue_payload { u64 hash; u64 flags; u32 error; - u32 unused; /* align to 64-bit */ + u32 timestamp; }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index f872c4c197..a1e80ace58 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -536,6 +536,85 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 return len; } +/** + * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @count: size of the user buffer. + * @ppos: position offset of the user buffer. + * + * This debugfs dumps the hw-fence queues. Takes as input the desired client to dump. + * Dumps to debug msgs the contents of the TX and RX queues for that client, if they exist. + */ +static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + struct msm_hw_fence_queue *rx_queue; + struct msm_hw_fence_queue *tx_queue; + u64 hash, ctx_id, seqno, timestamp, flags; + u32 *read_ptr, error; + int client_id, i; + struct msm_hw_fence_queue_payload *read_ptr_payload; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data); + if (client_id < 0) + return -EINVAL; + + if (!drv_data->clients[client_id] || + IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]) || + IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1])) { + HWFNC_ERR("client %d not initialized\n", client_id); + return -EINVAL; + } + + HWFNC_DBG_L("Queues for client %d\n", client_id); + + rx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]; + tx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1]; + + HWFNC_DBG_L("-------RX QUEUE------\n"); + for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { + read_ptr = ((u32 *)rx_queue->va_queue + + (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + + ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); + seqno = readq_relaxed(&read_ptr_payload->seqno); + hash = readq_relaxed(&read_ptr_payload->hash); + flags = readq_relaxed(&read_ptr_payload->flags); + error = readl_relaxed(&read_ptr_payload->error); + timestamp = readl_relaxed(&read_ptr_payload->timestamp); + + HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + i, hash, ctx_id, seqno, flags, error, timestamp); + } + + HWFNC_DBG_L("-------TX QUEUE------\n"); + for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { + read_ptr = ((u32 *)tx_queue->va_queue + + (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; + + ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); + seqno = readq_relaxed(&read_ptr_payload->seqno); + hash = readq_relaxed(&read_ptr_payload->hash); + flags = readq_relaxed(&read_ptr_payload->flags); + error = readl_relaxed(&read_ptr_payload->error); + timestamp = readl_relaxed(&read_ptr_payload->timestamp); + HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + i, hash, ctx_id, seqno, flags, error, timestamp); + } + + return count; +} + /** * hw_fence_dbg_dump_table_rd() - debugfs read to dump the hw-fences table. * @file: file handler. @@ -862,6 +941,11 @@ static const struct file_operations hw_fence_dump_table_fops = { .read = hw_fence_dbg_dump_table_rd, }; +static const struct file_operations hw_fence_dump_queues_fops = { + .open = simple_open, + .write = hw_fence_dbg_dump_queues_wr, +}; + static const struct file_operations hw_fence_create_join_fence_fops = { .open = simple_open, .write = hw_fence_dbg_create_join_fence, @@ -908,6 +992,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level); debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data, &hw_fence_dump_table_fops); + debugfs_create_file("hw_fence_dump_queues", 0600, debugfs_root, drv_data, + &hw_fence_dump_queues_fops); debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); return 0; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index d2db557c4e..c88d8cf1a5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -324,6 +324,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, writeq_relaxed(hash, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); writel_relaxed(error, &write_ptr_payload->error); + writel_relaxed(hw_fence_get_qtime(drv_data), &write_ptr_payload->timestamp); /* update memory for the message */ wmb(); From b30002d731cae7cf1c2fef522ec1f769ab4053ef Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 29 Jul 2022 14:58:06 -0700 Subject: [PATCH 22/77] mm-drivers: hw_fence: add bounds check for hw fence deregistration Ensure that clients deregister hardware fences for client ids strictly less than HW_FENCE_CLIENT_MAX. This prevents out of bounds array accesses. Change-Id: I3453135cfd7a74373421d8db32c3ecb0fffc70d0 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 9df871e05a..0f693be07b 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -124,6 +124,11 @@ int msm_hw_fence_deregister(void *client_handle) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); + return -EINVAL; + } + HWFNC_DBG_H("+\n"); /* Free all the allocated resources */ From f168780f740baaeb889d7d2e29770082ebcca3d7 Mon Sep 17 00:00:00 2001 From: Manoj Kumar AVM Date: Wed, 3 Aug 2022 23:25:46 -0700 Subject: [PATCH 23/77] mm-drivers: hw-fence: fix static analysis issue Fix static analysis issue where uninitialized variable is being accessed. Change-Id: Iab6210fb4c67f35c7f1bada592800c10f0ad76bc Signed-off-by: Manoj Kumar AVM --- hw_fence/src/hw_fence_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 8ff2bdfb02..3ccd2dd7de 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -655,7 +655,7 @@ static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long ar if (!_is_valid_client(obj)) { return -EINVAL; } else if (IS_ERR_OR_NULL(obj->client_handle)) { - HWFNC_ERR("client:%d handle doesn't exists\n", data.client_id); + HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id); return -EINVAL; } From 99948e971483dcbd72eba32a14e4f114da3d4472 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 26 Jul 2022 17:32:10 -0700 Subject: [PATCH 24/77] mm-drivers: hw_fence: modify hw fence queue payload structure Add size, type, version, and client_data fields to hw fence queue payload and update 32-bit timestamp field to full 64-bit timestamp field. Change-Id: Iafb0eb80f83acd5753786fa50a31c1fb74f1a2fa Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 29 ++++++++++++++++++++++++++-- hw_fence/src/hw_fence_drv_debug.c | 10 ++++++---- hw_fence/src/hw_fence_drv_priv.c | 17 +++++++++++----- 3 files changed, 45 insertions(+), 11 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 8ce864639c..1efc41cc41 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -69,6 +69,12 @@ */ #define MSM_HW_FENCE_MAX_JOIN_PARENTS 3 +/** + * HW_FENCE_PAYLOAD_REV: + * Payload version with major and minor version information + */ +#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF)) + enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_CREATE = 0x1, HW_FENCE_LOOKUP_OP_DESTROY, @@ -129,6 +135,13 @@ struct msm_hw_fence_queue { phys_addr_t pa_queue; }; +/** + * enum payload_type - Enum with the queue payload types. + */ +enum payload_type { + HW_FENCE_PAYLOAD_TYPE_1 = 1 +}; + /** * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. * @client_id: id of the client @@ -319,22 +332,34 @@ struct hw_fence_driver_data { /** * struct msm_hw_fence_queue_payload - hardware fence clients queues payload. + * @size: size of queue payload + * @type: type of queue payload + * @version: version of queue payload. High eight bits are for major and lower eight + * bits are for minor version * @ctxt_id: context id of the dma fence * @seqno: sequence number of the dma fence * @hash: fence hash * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions + * @client_data: data passed from and returned to waiting client upon fence signaling * @error: error code for this fence, fence controller receives this * error from the signaling client through the tx queue and * propagates the error to the waiting client through rx queue - * @timestamp: qtime when the payload is written into the queue + * @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue + * @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue */ struct msm_hw_fence_queue_payload { + u32 size; + u16 type; + u16 version; u64 ctxt_id; u64 seqno; u64 hash; u64 flags; + u64 client_data; u32 error; - u32 timestamp; + u32 timestamp_lo; + u32 timestamp_hi; + u32 reserve; }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index a1e80ace58..28674dcca9 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -590,9 +590,10 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user hash = readq_relaxed(&read_ptr_payload->hash); flags = readq_relaxed(&read_ptr_payload->flags); error = readl_relaxed(&read_ptr_payload->error); - timestamp = readl_relaxed(&read_ptr_payload->timestamp); + timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | + ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", i, hash, ctx_id, seqno, flags, error, timestamp); } @@ -607,8 +608,9 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user hash = readq_relaxed(&read_ptr_payload->hash); flags = readq_relaxed(&read_ptr_payload->flags); error = readl_relaxed(&read_ptr_payload->error); - timestamp = readl_relaxed(&read_ptr_payload->timestamp); - HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n", + timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | + ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); + HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", i, hash, ctx_id, seqno, flags, error, timestamp); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index c88d8cf1a5..f5bc6f3198 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -112,10 +112,10 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, queues[i].pa_queue = qphys; queues[i].va_header = hfi_queue_header; queues[i].q_size_bytes = queue_size; - HWFNC_DBG_INIT("init:%s client:%d queue[%d]: va=0x%pK pa=0x%x va_hd:0x%pK sz:%d\n", + HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n", hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE", client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, - queues[i].q_size_bytes); + queues[i].q_size_bytes, payload_size); /* Next header */ hfi_queue_header++; @@ -232,10 +232,11 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u32 q_size_u32; u32 q_free_u32; u32 *q_payload_write_ptr; - u32 payload_size_u32; + u32 payload_size, payload_size_u32; struct msm_hw_fence_queue_payload *write_ptr_payload; bool lock_client = false; u32 lock_idx; + u64 timestamp; int ret = 0; if (queue_type >= HW_FENCE_CLIENT_QUEUES) { @@ -247,7 +248,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, hfi_header = queue->va_header; q_size_u32 = (queue->q_size_bytes / sizeof(u32)); - payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)); + payload_size = sizeof(struct msm_hw_fence_queue_payload); + payload_size_u32 = (payload_size / sizeof(u32)); if (!hfi_header) { HWFNC_ERR("Invalid queue\n"); @@ -319,12 +321,17 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, to_write_idx = 0; /* Update Client Queue */ + writeq_relaxed(payload_size, &write_ptr_payload->size); + writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type); + writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version); writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id); writeq_relaxed(seqno, &write_ptr_payload->seqno); writeq_relaxed(hash, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); writel_relaxed(error, &write_ptr_payload->error); - writel_relaxed(hw_fence_get_qtime(drv_data), &write_ptr_payload->timestamp); + timestamp = hw_fence_get_qtime(drv_data); + writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo); + writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi); /* update memory for the message */ wmb(); From f4e1ed257858d422cbfc0277997ccd45b5898229 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 24 Aug 2022 14:26:45 -0700 Subject: [PATCH 25/77] mm-drivers: hw_fence: avoid signal during reset for signaled hw fences During a client reset, hw fences that are already signaled should not require to be signaled again, otherwise waiting clients can receive the signal from unexpected hw fences that have been already signaled long time back. Add check to only signal hw fences that are not in signaled state during the client reset. Change-Id: I6f6a6ba142889f9c7ee2bd8680c30592c3c0987f Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 33 ++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f5bc6f3198..8bb35aad1d 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1291,13 +1291,29 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, return ret; } +static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence *hw_fence, u64 hash, int error) +{ + enum hw_fence_client_id wait_client_id; + struct msm_hw_fence_client *hw_fence_wait_client; + + /* signal with an error all the waiting clients for this fence */ + for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { + if (hw_fence->wait_client_mask & BIT(wait_client_id)) { + hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (hw_fence_wait_client) + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, error); + } + } +} + int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags) { int ret = 0; - enum hw_fence_client_id wait_client_id; - struct msm_hw_fence_client *hw_fence_wait_client; int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ @@ -1314,16 +1330,9 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, if (hw_fence->fence_allocator == hw_fence_client->client_id) { - /* signal with an error all the waiting clients for this fence */ - for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { - if (hw_fence->wait_client_mask & BIT(wait_client_id)) { - hw_fence_wait_client = drv_data->clients[wait_client_id]; - - if (hw_fence_wait_client) - _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, error); - } - } + /* if fence is not signaled, signal with error all the waiting clients */ + if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL)) + _signal_all_wait_clients(drv_data, hw_fence, hash, error); if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY) goto skip_destroy; From 51925b9f9642a825f9884d676833721ec73616f5 Mon Sep 17 00:00:00 2001 From: Amine Najahi Date: Mon, 29 Aug 2022 12:44:40 -0400 Subject: [PATCH 26/77] mm-drivers: configure max driver instances base on build config Add #ifdef to configure the maximum allowed driver instances base on the build configuration, to avoid uninitialized access to fences array. Change-Id: I83ea5ade33a93e23edee21a0435ed7257fe5c9c9 Signed-off-by: Amine Najahi --- sync_fence/src/qcom_sync_file.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index f054f80e34..e292b368ec 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -30,6 +30,12 @@ #define FENCE_MIN 1 #define FENCE_MAX 32 +#if IS_ENABLED(CONFIG_DEBUG_FS) + #define MAX_DEVICE_SUPPORTED 2 +#else + #define MAX_DEVICE_SUPPORTED 1 +#endif + struct sync_device { /* device info */ struct class *dev_class; @@ -105,9 +111,9 @@ static void clear_fence_array_tracker(bool force_clear) static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name) { - if (atomic_read(&obj->device_available) > 1) { - pr_err("number of device fds are limited by 2, device opened:%d\n", - atomic_read(&obj->device_available)); + if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) { + pr_err("number of device fds are limited to %d, device opened:%d\n", + MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available)); return NULL; } else if (!atomic_read(&obj->device_available)) { memset(obj->name, 0, NAME_LEN); From 877fea198c3d4173c7f2ae79c8a102de7617dcb1 Mon Sep 17 00:00:00 2001 From: Alex Danila Date: Thu, 6 Oct 2022 10:29:48 -0400 Subject: [PATCH 27/77] mm-drivers: hw_fence: add missing return type Change addresses a compiler error for missing return type Change-Id: I82f22cefef069988e60608210533250307e516b3 Signed-off-by: Alex Danila --- hw_fence/src/hw_fence_drv_priv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8bb35aad1d..af935b0407 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -124,7 +124,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, return ret; } -static inline _lock_client_queue(int queue_type) +static inline bool _lock_client_queue(int queue_type) { /* Only lock Rx Queue */ return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false; From b13dcfb79e442eb7f9cbb68ba5544efef9930bd5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 12 Sep 2022 10:36:44 -0700 Subject: [PATCH 28/77] mm-drivers: hw_fence: remove client id mask registration logic Remove client id bitmask to track registered clients. This allows support of more than 64 transmit clients. Change-Id: Ia2b4667d008bfceb0b46bfd3e14302e5bec82cb3 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 7 +++---- hw_fence/src/hw_fence_drv_debug.c | 6 +++--- hw_fence/src/hw_fence_drv_priv.c | 5 ++--- hw_fence/src/msm_hw_fence.c | 27 +++++++++++---------------- 4 files changed, 19 insertions(+), 26 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 1efc41cc41..ca15fdb5a9 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -244,7 +244,7 @@ struct msm_hw_fence_dbg_data { * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc) * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) * @client_id_mask: bitmask for tracking registered client_ids - * @clients_mask_lock: lock to synchronize access to the clients mask + * @clients_register_lock: lock to synchronize clients registration and deregistration * @msm_hw_fence_client: table with the handles of the registered clients * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized @@ -316,9 +316,8 @@ struct hw_fence_driver_data { void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; - /* bitmask for tracking registered client_ids */ - u64 client_id_mask; - struct mutex clients_mask_lock; + /* synchronize client_ids registration and deregistration */ + struct mutex clients_register_lock; /* table with registered client handles */ struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 28674dcca9..1844c2926c 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -895,10 +895,10 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, return -EINVAL; } - mutex_lock(&drv_data->clients_mask_lock); + mutex_lock(&drv_data->clients_register_lock); if (!drv_data->clients[client_id]) { - mutex_unlock(&drv_data->clients_mask_lock); + mutex_unlock(&drv_data->clients_register_lock); return -EINVAL; } @@ -912,7 +912,7 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, /* wake-up waiting client */ wake_up_all(&hw_fence_client->wait_queue); - mutex_unlock(&drv_data->clients_mask_lock); + mutex_unlock(&drv_data->clients_register_lock); return 0; } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8bb35aad1d..af6db58a91 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -624,10 +624,9 @@ void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, * allocation, then we will need to notify FenceCTL about the client that is * going-away here. */ - mutex_lock(&drv_data->clients_mask_lock); - drv_data->client_id_mask &= ~BIT(hw_fence_client->client_id); + mutex_lock(&drv_data->clients_register_lock); drv_data->clients[hw_fence_client->client_id] = NULL; - mutex_unlock(&drv_data->clients_mask_lock); + mutex_unlock(&drv_data->clients_register_lock); /* Deallocate client's object */ HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 0f693be07b..037b95e277 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -35,27 +35,24 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, !mem_descriptor, client_id); return ERR_PTR(-EINVAL); } + /* Alloc client handle */ + hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); + if (!hw_fence_client) + return ERR_PTR(-ENOMEM); /* Avoid race condition if multiple-threads request same client at same time */ - mutex_lock(&hw_fence_drv_data->clients_mask_lock); - if (hw_fence_drv_data->client_id_mask & BIT(client_id)) { + mutex_lock(&hw_fence_drv_data->clients_register_lock); + if (hw_fence_drv_data->clients[client_id]) { HWFNC_ERR("client with id %d already registered\n", client_id); - mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + mutex_unlock(&hw_fence_drv_data->clients_register_lock); + kfree(hw_fence_client); return ERR_PTR(-EINVAL); } /* Mark client as registered */ - hw_fence_drv_data->client_id_mask |= BIT(client_id); - mutex_unlock(&hw_fence_drv_data->clients_mask_lock); + hw_fence_drv_data->clients[client_id] = hw_fence_client; + mutex_unlock(&hw_fence_drv_data->clients_register_lock); - /* Alloc client handle */ - hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); - if (!hw_fence_client) { - mutex_lock(&hw_fence_drv_data->clients_mask_lock); - hw_fence_drv_data->client_id_mask &= ~BIT(client_id); - mutex_unlock(&hw_fence_drv_data->clients_mask_lock); - return ERR_PTR(-ENOMEM); - } hw_fence_client->client_id = client_id; hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id); @@ -74,8 +71,6 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); - hw_fence_drv_data->clients[client_id] = hw_fence_client; - /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, hw_fence_client, mem_descriptor); @@ -401,7 +396,7 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) if (rc) goto error; - mutex_init(&hw_fence_drv_data->clients_mask_lock); + mutex_init(&hw_fence_drv_data->clients_register_lock); /* set ready ealue so clients can register */ hw_fence_drv_data->resources_ready = true; From d62205ae1c563a951522e48810ed28d4d1106110 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 3 Aug 2022 11:22:21 -0700 Subject: [PATCH 29/77] mm-drivers: hw_fence: read qtimer for timestamps Move timestamps to use qtimer instead of sleep timer. Change-Id: I1a5f20c3d1ec31ba13e95713828024a309a53ba1 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8bb35aad1d..63ebae2f2a 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -17,7 +17,11 @@ inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { +#ifdef HWFENCE_USE_SLEEP_TIMER return readl_relaxed(drv_data->qtime_io_mem); +#else /* USE QTIMER */ + return arch_timer_read_counter(); +#endif /* HWFENCE_USE_SLEEP_TIMER */ } static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, From a5e17f3fa7243790f8581ac6fdbdd7344460ec6e Mon Sep 17 00:00:00 2001 From: Bruce Hoo Date: Fri, 2 Sep 2022 17:24:27 +0800 Subject: [PATCH 30/77] mm-drivers: spec_fence: create dummy spec_fence for fence array creation Create dummy spec_fence and pass it to dma_fence_array_create(), to avoid NULL pointer access in dma_fence_array_create(). Change-Id: I7a283753169cccbed6c842090a48cbb6e185cf9a Signed-off-by: Bruce Hoo --- sync_fence/src/qcom_sync_file.c | 116 +++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 26 deletions(-) diff --git a/sync_fence/src/qcom_sync_file.c b/sync_fence/src/qcom_sync_file.c index e292b368ec..04d8951233 100644 --- a/sync_fence/src/qcom_sync_file.c +++ b/sync_fence/src/qcom_sync_file.c @@ -36,6 +36,14 @@ #define MAX_DEVICE_SUPPORTED 1 #endif +#define DUMMY_CONTEXT 0xfafadadafafadada +#define DUMMY_SEQNO 0xefa9ce00efa9ce00 + +struct dummy_spec_fence { + struct dma_fence fence; + spinlock_t lock; +}; + struct sync_device { /* device info */ struct class *dev_class; @@ -43,6 +51,7 @@ struct sync_device { struct device *dev; struct cdev *cdev; struct mutex lock; + struct dummy_spec_fence *dummy_fence; /* device drv data */ atomic_t device_available; @@ -61,6 +70,16 @@ struct fence_array_node { /* Speculative Sync Device Driver State */ static struct sync_device sync_dev; +static const char *spec_fence_get_name_dummy(struct dma_fence *fence) +{ + return "dummy_fence"; +} + +static const struct dma_fence_ops dummy_spec_fence_ops = { + .get_driver_name = spec_fence_get_name_dummy, + .get_timeline_name = spec_fence_get_name_dummy, +}; + static bool sanitize_fence_array(struct dma_fence_array *fence) { struct fence_array_node *node; @@ -193,8 +212,10 @@ static int spec_sync_create_array(struct fence_create_data *f) struct sync_file *sync_file; struct dma_fence_array *fence_array; struct fence_array_node *node; + struct dma_fence **fences; + struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence; bool signal_any; - int ret = 0; + int i, ret = 0; if (fd < 0) { pr_err("failed to get_unused_fd_flags\n"); @@ -207,10 +228,39 @@ static int spec_sync_create_array(struct fence_create_data *f) goto error_args; } + fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); + if (!fences) { + ret = -ENOMEM; + goto error_args; + } + + for (i = 0; i < f->num_fences; i++) { + fences[i] = &dummy_fence_p->fence; + /* + * Increase dummy-fences refcount here, we must do this since any call to + * fence-array release while dummy-fences are the children of the fence-array + * will decrement the dummy_fence refcount. Therefore, to prevent the release + * of the dummy_fence fences, we must keep an extra refcount for every time that + * the fence-array->release can decrement its children's refcount. the extra + * refcount will be decreased impilictly when dma_fence_put(&fence_array->base) + * called. + */ + dma_fence_get(&dummy_fence_p->fence); + } + signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true; - fence_array = dma_fence_array_create(f->num_fences, NULL, + fence_array = dma_fence_array_create(f->num_fences, fences, dma_fence_context_alloc(1), 0, signal_any); + if (!fence_array) { + /* fence-array create failed, remove extra refcounts */ + for (i = 0; i < f->num_fences; i++) + dma_fence_put(&dummy_fence_p->fence); + + kfree(fences); + ret = -EINVAL; + goto error_args; + } /* Set the enable signal such that signalling is not done during wait*/ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags); @@ -299,9 +349,8 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) struct dma_fence_array *fence_array; struct dma_fence *fence = NULL; struct dma_fence *user_fence = NULL; - struct dma_fence **fence_list; int *user_fds, ret = 0, i; - u32 num_fences, counter; + u32 num_fences; fence = sync_file_get_fence(sync_bind_info->out_bind_fd); if (!fence) { @@ -309,6 +358,13 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) return -EINVAL; } + if (dma_fence_is_signaled(fence)) { + pr_err("spec fence is already signaled, out_fd:%d\n", + sync_bind_info->out_bind_fd); + ret = -EINVAL; + goto end; + } + fence_array = container_of(fence, struct dma_fence_array, base); if (!sanitize_fence_array(fence_array)) { pr_err("spec fence not found in the registered list out_fd:%d\n", @@ -317,14 +373,18 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) goto end; } - if (fence_array->fences) { - pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", - sync_bind_info->out_bind_fd, dma_fence_get_status(fence), fence->flags); - goto end; - } - num_fences = fence_array->num_fences; - counter = num_fences; + + for (i = 0; i < num_fences; i++) { + if (!(fence_array->fences[i]->context == DUMMY_CONTEXT && + fence_array->fences[i]->seqno == DUMMY_SEQNO)) { + pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n", + sync_bind_info->out_bind_fd, dma_fence_get_status(fence), + fence->flags); + ret = -EINVAL; + goto end; + } + } user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL); if (!user_fds) { @@ -332,31 +392,28 @@ static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info) goto end; } - fence_list = kmalloc_array(num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO); - if (!fence_list) { - ret = -ENOMEM; - goto out; - } - if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds, num_fences * sizeof(int))) { - kfree(fence_list); ret = -EFAULT; goto out; } spin_lock(fence->lock); - fence_array->fences = fence_list; for (i = 0; i < num_fences; i++) { user_fence = sync_file_get_fence(user_fds[i]); if (!user_fence) { pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n", user_fds[i], sync_bind_info->out_bind_fd); - counter = i; ret = -EINVAL; goto bind_invalid; } fence_array->fences[i] = user_fence; + /* + * At this point the fence-array fully contains valid fences and no more the + * dummy-fence, therefore, we must release the extra refcount that the + * creation of the speculative fence added to the dummy-fence. + */ + dma_fence_put(&sync_dev.dummy_fence->fence); pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd, i, user_fds[i], fence_array->fences[i]->error); } @@ -372,12 +429,6 @@ bind_invalid: wake_up_all(&sync_dev.wait_queue); if (ret) { - for (i = counter - 1; i >= 0; i--) - dma_fence_put(fence_array->fences[i]); - - kfree(fence_list); - fence_array->fences = NULL; - fence_array->num_fences = 0; dma_fence_set_error(fence, -EINVAL); spin_unlock(fence->lock); dma_fence_signal(fence); @@ -437,6 +488,7 @@ const struct file_operations spec_sync_fops = { static int spec_sync_register_device(void) { + struct dummy_spec_fence *dummy_fence_p = NULL; int ret; sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME); @@ -479,6 +531,17 @@ static int spec_sync_register_device(void) INIT_LIST_HEAD(&sync_dev.fence_array_list); init_waitqueue_head(&sync_dev.wait_queue); + dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL); + if (!dummy_fence_p) { + ret = -ENOMEM; + goto cdev_add_err; + } + + spin_lock_init(&dummy_fence_p->lock); + dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock, + DUMMY_CONTEXT, DUMMY_SEQNO); + sync_dev.dummy_fence = dummy_fence_p; + return 0; cdev_add_err: @@ -511,6 +574,7 @@ static void __exit spec_sync_deinit(void) device_destroy(sync_dev.dev_class, sync_dev.dev_num); unregister_chrdev_region(sync_dev.dev_num, 1); class_destroy(sync_dev.dev_class); + dma_fence_put(&sync_dev.dummy_fence->fence); } module_init(spec_sync_init); From d881744a7249c76a677dc28f68aec870e1836ca0 Mon Sep 17 00:00:00 2001 From: Nilaan Gunabalachandran Date: Tue, 8 Nov 2022 13:37:36 -0500 Subject: [PATCH 31/77] mm-drivers: fix prink argument errors This change fixes printk arguments in mm-drivers which is found with additional compilation flags and add compile flags too. Change-Id: Ic83f044467dca6d391221182096b9c50b7da36de Signed-off-by: Nilaan Gunabalachandran --- hw_fence/Kbuild | 2 ++ hw_fence/src/hw_fence_drv_debug.c | 3 ++- hw_fence/src/hw_fence_drv_priv.c | 2 +- msm_ext_display/Kbuild | 2 ++ sync_fence/Kbuild | 2 ++ 5 files changed, 9 insertions(+), 2 deletions(-) diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 8948d581e9..2cf74d291b 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -18,3 +18,5 @@ msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" endif +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull \ No newline at end of file diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 1844c2926c..b159c9cee4 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -500,7 +500,8 @@ static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash); if (!hw_fence) { - HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu", context, seqno, hash); + HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu hash:%lu", + context, seqno, hash); len = scnprintf(buf + len, max_size - len, "no valid hfence found for context:%lu seqno:%lu hash:%lu\n", context, seqno, hash); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 22a932c773..cb59f3d4d3 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -310,7 +310,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* calculate the index after the write */ to_write_idx = write_idx + payload_size_u32; - HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size\n", to_write_idx, write_idx, + HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx, payload_size_u32); HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n", hw_fence_client->client_id, _get_queue_type(queue_type), diff --git a/msm_ext_display/Kbuild b/msm_ext_display/Kbuild index 284134c0af..a54149152b 100644 --- a/msm_ext_display/Kbuild +++ b/msm_ext_display/Kbuild @@ -8,3 +8,5 @@ obj-m += msm_ext_display.o msm_ext_display-y := src/msm_ext_display.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull \ No newline at end of file diff --git a/sync_fence/Kbuild b/sync_fence/Kbuild index fd631a4348..b1f9db20d7 100644 --- a/sync_fence/Kbuild +++ b/sync_fence/Kbuild @@ -12,3 +12,5 @@ sync_fence-y := src/qcom_sync_file.o CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" endif +EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull From 9ff114eee87b210728e52d0969adfcf9816c31e5 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 28 Sep 2022 15:58:25 -0600 Subject: [PATCH 32/77] mm-drivers: hw_fence: Add per client ipc interrupt property Not all clients need ipc interrupt for an already signaled fence. Set the per client property based on whether a client needs the interrupt or not. Also, set update_rxq property for GPU client to false, as GPU doesn't need already signaled fences to be sent to GPU Rx Queue. Change-Id: I08a6bbd598695b112124ce6ec409db75d5e11e0f Signed-off-by: Harshdeep Dhatt --- hw_fence/include/hw_fence_drv_ipc.h | 10 ++++ hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_ipc.c | 70 ++++++++++++++++------------ hw_fence/src/hw_fence_drv_priv.c | 5 +- hw_fence/src/msm_hw_fence.c | 1 + 5 files changed, 56 insertions(+), 32 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index c24781ac36..8a3f922b36 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -87,4 +87,14 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien */ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id); +/** + * hw_fence_ipcc_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt for + * already signaled fences + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * Return: true if client needs ipc interrupt for signaled fences, false otherwise + */ +bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id); + #endif /* __HW_FENCE_DRV_IPC_H */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index ca15fdb5a9..52bf413579 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -150,6 +150,7 @@ enum payload_type { * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_id: id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue + * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -160,6 +161,7 @@ struct msm_hw_fence_client { int ipc_signal_id; int ipc_client_id; bool update_rxq; + bool send_ipc; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index c3414a20da..a36163db88 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -13,11 +13,13 @@ * @ipc_client_id: ipc client id for the hw-fence client. * @ipc_signal_id: ipc signal id for the hw-fence client. * @update_rxq: bool to indicate if clinet uses rx-queue. + * @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences */ struct hw_fence_client_ipc_map { int ipc_client_id; int ipc_signal_id; bool update_rxq; + bool send_ipc; }; /** @@ -32,22 +34,22 @@ struct hw_fence_client_ipc_map { * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false, true}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false, true}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false, true}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false, true}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false, true}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false, true}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ }; @@ -60,22 +62,22 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M * Note that the index of this struct must match the enum hw_fence_client_id */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ + {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false, true}, /* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false, true}, /* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false, true}, /* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false, true}, /* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false, true}, /* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false, true}, /* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ }; @@ -103,6 +105,14 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c return drv_data->ipc_clients_table[client_id].update_rxq; } +bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].send_ipc; +} + /** * _get_ipc_client_name() - Returns ipc client name, used for debugging. */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index cb59f3d4d3..593b365e26 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1058,8 +1058,9 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1); /* Signal the hw fence now */ - hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, - hw_fence_client->ipc_signal_id); + if (hw_fence_client->send_ipc) + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); } static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 037b95e277..8eb520527f 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -70,6 +70,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, From fbea8f77fa5a0cf9e2fc60a781cf0e856055a65c Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Mon, 19 Sep 2022 12:54:55 -0600 Subject: [PATCH 33/77] mm-drivers: hw_fence: Set MSM_HW_FENCE_FLAG_SIGNALED_BIT flag Set this flag if a hw fence (for which a client wants to wait) has already been signaled. Clients can check this flag and indicate to their respective hardware (or firmware) that this fence is already signaled. Change-Id: I9337cabb771197f2d35ac4386402a25941d73311 Signed-off-by: Harshdeep Dhatt --- hw_fence/include/hw_fence_drv_priv.h | 3 ++- hw_fence/src/hw_fence_drv_debug.c | 3 ++- hw_fence/src/hw_fence_drv_priv.c | 11 ++++++++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 52bf413579..de978eb316 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -430,7 +430,8 @@ inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); + struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index b159c9cee4..2dd3ae4ec6 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -340,7 +340,8 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /***** DST CLIENT - REGISTER WAIT CLIENT ******/ /**********************************************/ /* use same context and seqno that src client used to create fence */ - ret = hw_fence_register_wait_client(drv_data, hw_fence_client_dst, context, seqno); + ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, + seqno); if (ret) { HWFNC_ERR("failed to register for wait\n"); return -EINVAL; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 593b365e26..efa0145d85 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1220,6 +1220,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* signal the join hw fence */ _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); + set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* * job of the join-fence is finished since we already signaled, @@ -1240,7 +1241,8 @@ error_array: } int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno) + struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, + u64 seqno) { struct msm_hw_fence *hw_fence; u64 hash; @@ -1263,8 +1265,11 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, wmb(); /* if hw fence already signaled, signal the client */ - if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) + if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { + if (fence != NULL) + set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); + } GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ @@ -1287,7 +1292,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, return -EINVAL; } - ret = hw_fence_register_wait_client(drv_data, hw_fence_client, fence->context, + ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, fence->seqno); if (ret) HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); From 54256aa9e045237c79dceb2966e2e3b86aa21075 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 21 Sep 2022 15:28:53 -0600 Subject: [PATCH 34/77] mm-drivers: hw_fence: Fix join fence signaling It is possible that one (or more) child fences get signaled by fence controller, right after we add the join fence as parent fence of the child fence. If so, the join fence pending child count may become 0 which means we can safely signal the join fence. Change-Id: I0222b93a62db13eeb7867f3741c1db944df036b1 Signed-off-by: Harshdeep Dhatt --- hw_fence/src/hw_fence_drv_priv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index efa0145d85..f820a824e5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1124,7 +1124,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; struct dma_fence *child_fence; - u32 signaled_fences = 0; + bool signal_join_fence = false; u64 hash_join_fence, hash; int i, ret = 0; @@ -1178,13 +1178,13 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* child fence is already signaled */ GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ - join_fence->pending_child_cnt--; + if (--join_fence->pending_child_cnt == 0) + signal_join_fence = true; /* update memory for the table update */ wmb(); GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ - signaled_fences++; } else { /* child fence is not signaled */ @@ -1216,7 +1216,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } /* all fences were signaled, signal client now */ - if (signaled_fences == array->num_fences) { + if (signal_join_fence) { /* signal the join hw fence */ _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); From ecef24aa6215f786549243d02f5a470a8b2d0c8a Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 16 Sep 2022 14:59:56 -0700 Subject: [PATCH 35/77] mm-drivers: hw_fence: update ipc regs config to support phys-id Starting pineapple, each ipc client has a different physical-id and virtual-id for registers access and configuration. This change updates the ipc to handle this different configuration. Change-Id: I36fa84b07ffd209ce3fb323ff796f9e7721d7dd2 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_debug.h | 33 ++-- hw_fence/include/hw_fence_drv_ipc.h | 37 +++- hw_fence/include/hw_fence_drv_priv.h | 12 +- hw_fence/src/hw_fence_drv_debug.c | 20 ++- hw_fence/src/hw_fence_drv_ipc.c | 235 ++++++++++++++++++-------- hw_fence/src/hw_fence_drv_priv.c | 4 +- hw_fence/src/hw_fence_ioctl.c | 11 +- hw_fence/src/msm_hw_fence.c | 21 ++- 8 files changed, 252 insertions(+), 121 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index de0e6e7a37..4f22b94664 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -69,7 +69,8 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, in extern const struct file_operations hw_sync_debugfs_fops; struct hw_fence_out_clients_map { - int ipc_client_id; /* ipc client id for the hw fence client */ + int ipc_client_id_vid; /* ipc client virtual id for the hw fence client */ + int ipc_client_id_pid; /* ipc client physical id for the hw fence client */ int ipc_signal_id; /* ipc signal id for the hw fence client */ }; @@ -81,21 +82,21 @@ struct hw_fence_out_clients_map { */ static const struct hw_fence_out_clients_map dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 12}, /* CTL5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 21}, /* VAL0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22}, /* VAL1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23}, /* VAL2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24}, /* VAL3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25}, /* VAL4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26}, /* VAL5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27}, /* VAL6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 4}, /* CTL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 6}, /* CTL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 8}, /* CTL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 10}, /* CTL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 12}, /* CTL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21}, /* VAL0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22}, /* VAL1 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23}, /* VAL2 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24}, /* VAL3 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25}, /* VAL4 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */ }; /** diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 8a3f922b36..e905ea8ed6 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -6,17 +6,26 @@ #ifndef __HW_FENCE_DRV_IPC_H #define __HW_FENCE_DRV_IPC_H -#define HW_FENCE_IPC_CLIENT_ID_APPS 8 -#define HW_FENCE_IPC_CLIENT_ID_GPU 9 -#define HW_FENCE_IPC_CLIENT_ID_DPU 25 +/* ipc clients virtual client-id */ +#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8 +#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9 +#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 + +/* ipc clients physical client-id */ +#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3 +#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4 +#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 -#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 +#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 +#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 #define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */ #define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */ -#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kailua */ +#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ +#define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ #define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c)) #define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c)) @@ -55,8 +64,8 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); #endif /* HW_DPU_IPCC */ /** - * hw_fence_ipcc_get_client_id() - Returns the ipc client id that corresponds to the hw fence - * driver client. + * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the + * hw fence driver client. * @drv_data: driver data. * @client_id: hw fence driver client id. * @@ -64,7 +73,19 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); * * Return: client_id on success or negative errno (-EINVAL) */ -int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id); +int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id); + +/** + * hw_fence_ipcc_get_client_phys_id() - Returns the ipc client physical id that corresponds to the + * hw fence driver client. + * @drv_data: driver data. + * @client_id: hw fence driver client id. + * + * The ipc client id returned by this API is used by the hw fence driver when signaling the fence. + * + * Return: client_id on success or negative errno (-EINVAL) + */ +int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id); /** * hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index de978eb316..14a302871d 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -148,7 +148,8 @@ enum payload_type { * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor * @ipc_signal_id: id of the signal to be triggered for this client - * @ipc_client_id: id of the ipc client for this hw fence driver client + * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client + * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences * @wait_queue: wait queue for the validation clients @@ -159,7 +160,8 @@ struct msm_hw_fence_client { struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; int ipc_signal_id; - int ipc_client_id; + int ipc_client_vid; + int ipc_client_pid; bool update_rxq; bool send_ipc; #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -238,7 +240,8 @@ struct msm_hw_fence_dbg_data { * @ipcc_io_mem: base for the ipcc io mem map * @ipcc_size: size of the ipcc io mem mapping * @protocol_id: ipcc protocol id used by this driver - * @ipcc_client_id: ipcc client id for this driver + * @ipcc_client_vid: ipcc client virtual-id for this driver + * @ipcc_client_pid: ipcc client physical-id for this driver * @ipc_clients_table: table with the ipcc mapping for each client of this driver * @qtime_reg_base: qtimer register base address * @qtime_io_mem: qtimer io mem map @@ -304,7 +307,8 @@ struct hw_fence_driver_data { void __iomem *ipcc_io_mem; uint32_t ipcc_size; u32 protocol_id; - u32 ipcc_client_id; + u32 ipcc_client_vid; + u32 ipcc_client_pid; /* table with mapping of ipc client for each hw-fence client */ struct hw_fence_client_ipc_map *ipc_clients_table; diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 2dd3ae4ec6..a1db824278 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -106,8 +106,10 @@ static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf, static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, - HW_FENCE_IPC_CLIENT_ID_APPS); + struct hw_fence_driver_data *drv_data = file->private_data; + + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, + drv_data->ipcc_client_vid); } #ifdef HW_DPU_IPCC @@ -124,8 +126,10 @@ static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *use static ssize_t hw_fence_dbg_ipcc_dpu_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - return _debugfs_ipcc_trigger(file, user_buf, count, ppos, HW_FENCE_IPC_CLIENT_ID_APPS, - HW_FENCE_IPC_CLIENT_ID_DPU); + struct hw_fence_driver_data *drv_data = file->private_data; + + return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid, + hw_fence_ipcc_get_client_virt_id(drv_data, HW_FENCE_CLIENT_ID_CTL0)); } @@ -361,8 +365,8 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, return -EINVAL; /* Write to ipcc to trigger the irq */ - tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; - rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + tx_client = drv_data->ipcc_client_pid; + rx_client = drv_data->ipcc_client_vid; HWFNC_DBG_IRQ("client:%d tx_client:%d rx_client:%d signal:%d delay:%d in_data%d\n", client_id_src, tx_client, rx_client, signal_id, drv_data->debugfs_data.hw_fence_sim_release_delay, input_data); @@ -866,8 +870,8 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, } /* write to ipcc to trigger the irq */ - tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; - rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + tx_client = drv_data->ipcc_client_pid; + rx_client = drv_data->ipcc_client_vid; hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id); usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a36163db88..a3cccfbf31 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -3,6 +3,7 @@ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ +#include #include "hw_fence_drv_priv.h" #include "hw_fence_drv_utils.h" #include "hw_fence_drv_ipc.h" @@ -10,13 +11,15 @@ /** * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. - * @ipc_client_id: ipc client id for the hw-fence client. + * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. + * @ipc_client_id_phys: physical ipc client id for the hw-fence client. * @ipc_signal_id: ipc signal id for the hw-fence client. * @update_rxq: bool to indicate if clinet uses rx-queue. * @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences */ struct hw_fence_client_ipc_map { - int ipc_client_id; + int ipc_client_id_virt; + int ipc_client_id_phys; int ipc_signal_id; bool update_rxq; bool send_ipc; @@ -34,22 +37,22 @@ struct hw_fence_client_ipc_map { * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 14, false, true}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 15, false, true}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 16, false, true}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 17, false, true}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 18, false, true}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 19, false, true}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 15, false, true},/*ctl1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 16, false, true},/*ctl2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 17, false, true},/*ctl3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ #endif /* CONFIG_DEBUG_FS */ }; @@ -62,31 +65,67 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M * Note that the index of this struct must match the enum hw_fence_client_id */ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS, 1, true, true}, /* ctrl queue loopback */ - {HW_FENCE_IPC_CLIENT_ID_GPU, 0, false, false}, /* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 0, false, true}, /* ctl0 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 1, false, true}, /* ctl1 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 2, false, true}, /* ctl2 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 3, false, true}, /* ctl3 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 4, false, true}, /* ctl4 */ - {HW_FENCE_IPC_CLIENT_ID_DPU, 5, false, true}, /* ctl5 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true},/* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true},/* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true},/* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS, 21, true, true}, /* val0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 22, true, true}, /* val1 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 23, true, true}, /* val2 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 24, true, true}, /* val3 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 25, true, true}, /* val4 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 26, true, true}, /* val5 */ - {HW_FENCE_IPC_CLIENT_ID_APPS, 27, true, true}, /* val6 */ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ #endif /* CONFIG_DEBUG_FS */ }; -int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id) +/** + * struct hw_fence_clients_ipc_map_v2 - Table makes the 'client to signal' mapping, which is + * used by the hw fence driver to trigger ipc signal when hw fence is already + * signaled. + * This version is for targets that support dpu client id and IPC v2. + * + * Note that the index of this struct must match the enum hw_fence_client_id + */ +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] = { + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */ + {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true},/* ctl1 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true},/* ctl2 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true},/* ctl3 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */ + {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */ +#if IS_ENABLED(CONFIG_DEBUG_FS) + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true},/* val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true},/* val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true},/* val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true},/* val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/ +#endif /* CONFIG_DEBUG_FS */ +}; + +int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) { if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) return -EINVAL; - return drv_data->ipc_clients_table[client_id].ipc_client_id; + return drv_data->ipc_clients_table[client_id].ipc_client_id_virt; +} + +int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id) +{ + if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + return -EINVAL; + + return drv_data->ipc_clients_table[client_id].ipc_client_id_phys; } int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) @@ -114,36 +153,53 @@ bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int clie } /** - * _get_ipc_client_name() - Returns ipc client name, used for debugging. + * _get_ipc_phys_client_name() - Returns ipc client name from its physical id, used for debugging. */ -static inline char *_get_ipc_client_name(u32 client_id) +static inline char *_get_ipc_phys_client_name(u32 client_id) { switch (client_id) { - case HW_FENCE_IPC_CLIENT_ID_APPS: - return "APPS"; - case HW_FENCE_IPC_CLIENT_ID_GPU: - return "GPU"; - case HW_FENCE_IPC_CLIENT_ID_DPU: - return "DPU"; + case HW_FENCE_IPC_CLIENT_ID_APPS_PID: + return "APPS_PID"; + case HW_FENCE_IPC_CLIENT_ID_GPU_PID: + return "GPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_DPU_PID: + return "DPU_PID"; } - return "UNKNOWN"; + return "UNKNOWN_PID"; +} + +/** + * _get_ipc_virt_client_name() - Returns ipc client name from its virtual id, used for debugging. + */ +static inline char *_get_ipc_virt_client_name(u32 client_id) +{ + switch (client_id) { + case HW_FENCE_IPC_CLIENT_ID_APPS_VID: + return "APPS_VID"; + case HW_FENCE_IPC_CLIENT_ID_GPU_VID: + return "GPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_DPU_VID: + return "DPU_VID"; + } + + return "UNKNOWN_VID"; } void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, - u32 tx_client_id, u32 rx_client_id, u32 signal_id) + u32 tx_client_pid, u32 rx_client_vid, u32 signal_id) { void __iomem *ptr; u32 val; /* Send signal */ ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id, - tx_client_id); - val = (rx_client_id << 16) | signal_id; + tx_client_pid); + val = (rx_client_vid << 16) | signal_id; HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n", - _get_ipc_client_name(tx_client_id), tx_client_id, - _get_ipc_client_name(rx_client_id), rx_client_id, + _get_ipc_phys_client_name(tx_client_pid), tx_client_pid, + _get_ipc_virt_client_name(rx_client_vid), rx_client_vid, signal_id, val, ptr); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); writel_relaxed(val, ptr); @@ -162,22 +218,32 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 { switch (hwrev) { case HW_FENCE_IPCC_HW_REV_100: - drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA; drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n"); break; case HW_FENCE_IPCC_HW_REV_110: - drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO; drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n"); break; case HW_FENCE_IPCC_HW_REV_170: - drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA; + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA; drv_data->ipc_clients_table = hw_fence_clients_ipc_map; - HWFNC_DBG_INIT("ipcc protocol_id: Kailua\n"); + HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n"); + break; + case HW_FENCE_IPCC_HW_REV_203: + drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; + drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; + drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */ + drv_data->ipc_clients_table = hw_fence_clients_ipc_map_v2; + HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); break; default: return -1; @@ -190,13 +256,25 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) { void __iomem *ptr; u32 val; + int ret; HWFNC_DBG_H("enable ipc +\n"); - /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ - val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, - HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, HW_FENCE_IPC_CLIENT_ID_APPS)); - HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + /** + * Attempt to read the ipc version from dt, if not available, then attempt + * to read from the registers. + */ + ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val); + if (ret || !val) { + /* if no device tree prop, attempt to get the version from the registers*/ + HWFNC_DBG_H("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); + + /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ + val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, + HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, + HW_FENCE_IPC_CLIENT_ID_APPS_VID)); + HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + } if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { HWFNC_ERR("ipcc protocol id not supported\n"); @@ -206,14 +284,14 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) /* Enable compute l1 (protocol_id = 2) */ val = 0x00000000; ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, - HW_FENCE_IPC_CLIENT_ID_APPS); + drv_data->ipcc_client_pid); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); writel_relaxed(val, ptr); /* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */ val = 0x000080000; ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id, - HW_FENCE_IPC_CLIENT_ID_APPS); + drv_data->ipcc_client_pid); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr); writel_relaxed(val, ptr); @@ -226,6 +304,7 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) { struct hw_fence_client_ipc_map *hw_fence_client; + bool protocol_enabled = false; void __iomem *ptr; u32 val; int i; @@ -239,31 +318,41 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem); - /* - * Enable compute l1 (protocol_id = 2) for dpu (25) - * Sets bit(1) to clear when RECV_ID is read - */ - val = 0x00000001; - ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id, - HW_FENCE_IPC_CLIENT_ID_DPU); - HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); - writel_relaxed(val, ptr); - HWFNC_DBG_H("Initialize dpu signals\n"); /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) { hw_fence_client = &drv_data->ipc_clients_table[i]; /* skip any client that is not a dpu client */ - if (hw_fence_client->ipc_client_id != HW_FENCE_IPC_CLIENT_ID_DPU) + if (hw_fence_client->ipc_client_id_virt != HW_FENCE_IPC_CLIENT_ID_DPU_VID) continue; + if (!protocol_enabled) { + /* + * First DPU client will enable the protocol for dpu, e.g. compute l1 + * (protocol_id = 2) or fencing protocol, depending on the target, for the + * dpu client (vid = 25, pid = 9). + * Sets bit(1) to clear when RECV_ID is read + */ + val = 0x00000001; + ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, + drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); + HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); + writel_relaxed(val, ptr); + + protocol_enabled = true; + } + /* Enable signals for dpu client */ - HWFNC_DBG_H("dpu:%d client:%d signal:%d\n", hw_fence_client->ipc_client_id, i, + HWFNC_DBG_H("dpu client:%d vid:%d pid:%d signal:%d\n", i, + hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys, hw_fence_client->ipc_signal_id); - val = 0x000080000 | (hw_fence_client->ipc_signal_id & 0xFFFF); + + /* Enable input apps-signal for dpu */ + val = (HW_FENCE_IPC_CLIENT_ID_APPS_VID << 16) | + (hw_fence_client->ipc_signal_id & 0xFFFF); ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, - drv_data->protocol_id, HW_FENCE_IPC_CLIENT_ID_DPU); + drv_data->protocol_id, hw_fence_client->ipc_client_id_phys); HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr); writel_relaxed(val, ptr); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f820a824e5..c37374ade4 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1047,8 +1047,8 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u64 flags, u32 error) { - u32 tx_client_id = drv_data->ipcc_client_id; - u32 rx_client_id = hw_fence_client->ipc_client_id; + u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ + u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 3ccd2dd7de..431bf658ed 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -546,6 +546,7 @@ static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long ar static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg) { + struct msm_hw_fence_client *hw_fence_client; struct hw_fence_sync_signal_data data; int ret, tx_client, rx_client, signal_id; @@ -556,6 +557,12 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar return -EINVAL; } + hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle; + if (!hw_fence_client) { + HWFNC_ERR("invalid client handle\n"); + return -EINVAL; + } + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; @@ -569,8 +576,8 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar if (signal_id < 0) return -EINVAL; - tx_client = HW_FENCE_IPC_CLIENT_ID_APPS; - rx_client = HW_FENCE_IPC_CLIENT_ID_APPS; + tx_client = hw_fence_client->ipc_client_vid; + rx_client = hw_fence_client->ipc_client_pid; ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); if (ret) { HWFNC_ERR("hw fence trigger signal has failed\n"); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 8eb520527f..8b8dfb0a59 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -54,10 +54,14 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, mutex_unlock(&hw_fence_drv_data->clients_register_lock); hw_fence_client->client_id = client_id; - hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id); + hw_fence_client->ipc_client_vid = + hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id); + hw_fence_client->ipc_client_pid = + hw_fence_ipcc_get_client_phys_id(hw_fence_drv_data, client_id); - if (hw_fence_client->ipc_client_id <= 0) { - HWFNC_ERR("Failed to find client:%d ipc id\n", client_id); + if (hw_fence_client->ipc_client_vid <= 0 || hw_fence_client->ipc_client_pid <= 0) { + HWFNC_ERR("Failed to find client:%d ipc vid:%d pid:%d\n", client_id, + hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); ret = -EINVAL; goto error; } @@ -91,9 +95,9 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, if (ret) goto error; - HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc_client_id:%d\n", + HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc vid:%d pid:%d\n", hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, - hw_fence_client->ipc_client_id); + hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); #if IS_ENABLED(CONFIG_DEBUG_FS) init_waitqueue_head(&hw_fence_client->wait_queue); @@ -338,8 +342,9 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro } EXPORT_SYMBOL(msm_hw_fence_update_txq); +/* tx client has to be the physical, rx client virtual id*/ int msm_hw_fence_trigger_signal(void *client_handle, - u32 tx_client_id, u32 rx_client_id, + u32 tx_client_pid, u32 rx_client_vid, u32 signal_id) { struct msm_hw_fence_client *hw_fence_client; @@ -355,8 +360,8 @@ int msm_hw_fence_trigger_signal(void *client_handle, hw_fence_client = (struct msm_hw_fence_client *)client_handle; HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id); - hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_id, - rx_client_id, signal_id); + hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_pid, + rx_client_vid, signal_id); return 0; } From 05689a41c3f3958ce66b0f2be1d43bddf1f71874 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Thu, 11 Aug 2022 22:10:45 -0700 Subject: [PATCH 36/77] mm-drivers: hw_fence: add inter-vm try lock Add support for inter-vm try-lock between hlos and vm. Change-Id: Iab9087acf82a4a746e9d43a736724ce2e7196237 Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_debug.h | 4 +++ hw_fence/include/hw_fence_drv_utils.h | 3 +- hw_fence/src/hw_fence_drv_priv.c | 41 +++++++++++----------- hw_fence/src/hw_fence_drv_utils.c | 50 ++++++++++++++++++++------- 4 files changed, 64 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index 4f22b94664..bfb654e603 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -18,6 +18,7 @@ enum hw_fence_drv_prio { HW_FENCE_QUEUE = 0x000010, /* Queue logs */ HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */ HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */ + HW_FENCE_LOCK = 0x000080, /* Lock-related messages */ HW_FENCE_PRINTK = 0x010000, }; @@ -56,6 +57,9 @@ extern u32 msm_hw_fence_debug_level; #define HWFNC_DBG_IRQ(fmt, ...) \ dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__) +#define HWFNC_DBG_LOCK(fmt, ...) \ + dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__) + #define HWFNC_WARN(fmt, ...) \ pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 092bb625cf..2ef6df0fe9 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -22,10 +22,11 @@ enum hw_fence_mem_reserve { /** * global_atomic_store() - Inter-processor lock + * @drv_data: hw fence driver data * @lock: memory to lock * @val: if true, api locks the memory, if false it unlocks the memory */ -void global_atomic_store(uint64_t *lock, bool val); +void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val); /** * hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index c37374ade4..74f7171817 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -13,7 +13,7 @@ #include "hw_fence_drv_debug.h" /* Global atomic lock */ -#define GLOBAL_ATOMIC_STORE(lock, val) global_atomic_store(lock, val) +#define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { @@ -277,7 +277,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); /* lock the client rx queue to update */ - GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */ } /* Make sure data is ready before read */ @@ -348,7 +348,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, exit: if (lock_client) - GLOBAL_ATOMIC_STORE(&drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */ return ret; } @@ -882,7 +882,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d break; } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* compare to either find a free fence or find an allocated fence */ if (compare_fnc(hw_fence, context, seqno)) { @@ -907,7 +907,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d /* ctx & seqno must be unique creating a hw-fence */ HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n", context, seqno); - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); break; } /* compare can fail if we have a collision, we will linearly resolve it */ @@ -915,7 +915,7 @@ struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *d context, seqno); } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* Increment step for the next loop */ step++; @@ -1090,7 +1090,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data } /* lock the child while we clean it up from the parent join-fence */ - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */ for (j = hw_fence_child->parents_cnt; j > 0; j--) { if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) { @@ -1110,7 +1110,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data wmb(); } } - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } /* destroy join fence */ @@ -1141,9 +1141,9 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } /* update this as waiting client of the join-fence */ - GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ join_fence->wait_client_mask |= BIT(hw_fence_client->client_id); - GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ /* Iterate through fences of the array */ for (i = 0; i < array->num_fences; i++) { @@ -1173,18 +1173,18 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, goto error_array; } - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */ if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) { /* child fence is already signaled */ - GLOBAL_ATOMIC_STORE(&join_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ if (--join_fence->pending_child_cnt == 0) signal_join_fence = true; /* update memory for the table update */ wmb(); - GLOBAL_ATOMIC_STORE(&join_fence->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */ } else { /* child fence is not signaled */ @@ -1201,7 +1201,8 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); ret = -EINVAL; goto error_array; } @@ -1212,7 +1213,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); } - GLOBAL_ATOMIC_STORE(&hw_fence_child->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } /* all fences were signaled, signal client now */ @@ -1254,7 +1255,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, return -EINVAL; } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ /* register client in the hw fence */ hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); @@ -1264,6 +1265,8 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ + /* if hw fence already signaled, signal the client */ if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { if (fence != NULL) @@ -1271,8 +1274,6 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ - return 0; } @@ -1325,7 +1326,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, int ret = 0; int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET; - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */ if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) { HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n", hw_fence_client->client_id, hw_fence->ctx_id, @@ -1335,7 +1336,7 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, /* update memory for the table update */ wmb(); } - GLOBAL_ATOMIC_STORE(&hw_fence->lock, 0); /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */ if (hw_fence->fence_allocator == hw_fence_client->client_id) { diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 5d791de1ad..36d8494e1d 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -17,40 +17,64 @@ static void _lock(uint64_t *wait) { - /* WFE Wait */ #if defined(__aarch64__) - __asm__("SEVL\n\t" + __asm__( + // Sequence to wait for lock to be free (i.e. zero) "PRFM PSTL1KEEP, [%x[i_lock]]\n\t" "1:\n\t" - "WFE\n\t" "LDAXR W5, [%x[i_lock]]\n\t" "CBNZ W5, 1b\n\t" - "STXR W5, W0, [%x[i_lock]]\n\t" - "CBNZ W5, 1b\n" + // Sequence to set PVM BIT0 + "LDR W7, =0x1\n\t" // Load BIT0 (0x1) into W7 + "STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1) + "CBNZ W5, 1b\n\t" // If cannot set it, goto 1 : : [i_lock] "r" (wait) : "memory"); #endif } -static void _unlock(uint64_t *lock) +static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) { - /* Signal Client */ + uint64_t lock_val; + #if defined(__aarch64__) - __asm__("STLR WZR, [%x[i_out]]\n\t" - "SEV\n" + __asm__( + // Sequence to clear PVM BIT0 + "2:\n\t" + "LDAXR W5, [%x[i_out]]\n\t" // Atomic Fetch Lock + "AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t" // AND to clear BIT0 (lock &= ~0x1)) + "STXR W5, W6, [%x[i_out]]\n\t" // Store exclusive result + "CBNZ W5, 2b\n\t" // If cannot store exclusive, goto 2 : : [i_out] "r" (lock) : "memory"); #endif + mb(); /* Make sure the memory is updated */ + + lock_val = *lock; /* Read the lock value */ + HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val); + if (lock_val & 0x2) { /* check if SVM BIT1 is set*/ + /* + * SVM is in WFI state, since SVM acquire bit is set + * Trigger IRQ to Wake-Up SVM Client + */ + HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d\n", lock_val); + hw_fence_ipcc_trigger_signal(drv_data, + drv_data->ipcc_client_pid, + drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */ + } } -void global_atomic_store(uint64_t *lock, bool val) +void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val) { - if (val) + if (val) { + preempt_disable(); _lock(lock); - else - _unlock(lock); + } else { + _unlock(drv_data, lock); + preempt_enable(); + } } /* From f5cc2eb42f5542a0e6d69f2564b59573e9294041 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Fri, 21 Oct 2022 18:46:26 -0700 Subject: [PATCH 37/77] mm-drivers: hw_fence: add check to avoid empty join hw fence Current driver creates a join hw-fence from a fence-array, adds its waiting client to it, and then it decides if signal the hw-fence depending in the current state of all the child hw-fences from the fence array. However, if by any reason the fence-array gets all its children cleared within it (which can happen for spec-fences failures), hw-fence driver logic won't signal the new created join-fence. This can lead to the creation of an empty or incomplete join-fence that the waiting-client will be waiting-for, but won't be signaled. Add a check to make sure that if above scenario is ever presented, the register for wait API catches this issue and fails to register for wait in this invalid fence. Change-Id: If3c69405d2a3adfefd12f447257c2560b839d238 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_priv.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 74f7171817..4fcfdc4131 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1072,9 +1072,16 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data int idx, j; u64 hash = 0; + if (!array->fences) + goto destroy_fence; + /* cleanup the child-fences from the parent join-fence */ for (idx = iteration; idx >= 0; idx--) { child_fence = array->fences[idx]; + if (!child_fence) { + HWFNC_ERR("invalid child fence idx:%d\n", idx); + continue; + } hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context, child_fence->seqno, &hash); @@ -1113,6 +1120,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } +destroy_fence: /* destroy join fence */ _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, false); @@ -1230,6 +1238,15 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, */ _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, false); + } else if (!array->num_fences) { + /* + * if we didn't signal the join-fence and the number of fences is not set in + * the fence-array, then fail here, otherwise driver would create a join-fence + * with no-childs that won't be signaled at all or an incomplete join-fence + */ + HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n", + array->base.context, array->base.seqno); + goto error_array; } return ret; From b87b258b9e9d53cc84af592c5aaf97a819dc9e12 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Wed, 26 Oct 2022 12:08:28 -0700 Subject: [PATCH 38/77] mm-drivers: hw_fence: add debug refcount to trylock Add debugfs to query the amount of times that inter-vm trylock needs to wakeup the svm. Change-Id: Ic1f88319f502e652902be0d45792768cf5c5154e Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 3 +++ hw_fence/src/hw_fence_drv_debug.c | 2 ++ hw_fence/src/hw_fence_drv_utils.c | 6 +++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 14a302871d..b9165fe666 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -191,6 +191,7 @@ struct msm_hw_fence_mem_data { * @create_hw_fences: boolean to continuosly create hw-fences within debugfs * @clients_list: list of debug clients registered * @clients_list_lock: lock to synchronize access to the clients list + * @lock_wake_cnt: number of times that driver triggers wake-up ipcc to unlock inter-vm try-lock */ struct msm_hw_fence_dbg_data { struct dentry *root; @@ -204,6 +205,8 @@ struct msm_hw_fence_dbg_data { struct list_head clients_list; struct mutex clients_list_lock; + + u64 lock_wake_cnt; }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index a1db824278..314bf27a18 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1003,6 +1003,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_file("hw_fence_dump_queues", 0600, debugfs_root, drv_data, &hw_fence_dump_queues_fops); debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); + debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root, + &drv_data->debugfs_data.lock_wake_cnt); return 0; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 36d8494e1d..c8eab917dd 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -59,7 +59,11 @@ static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock) * SVM is in WFI state, since SVM acquire bit is set * Trigger IRQ to Wake-Up SVM Client */ - HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d\n", lock_val); +#if IS_ENABLED(CONFIG_DEBUG_FS) + drv_data->debugfs_data.lock_wake_cnt++; + HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val, + drv_data->debugfs_data.lock_wake_cnt); +#endif hw_fence_ipcc_trigger_signal(drv_data, drv_data->ipcc_client_pid, drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */ From 66c1c4f019d1a8bcafa2825c4a1ed24d572ffe69 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 17 Aug 2022 15:05:02 -0700 Subject: [PATCH 39/77] mm-drivers: hw_fence: update new APIs for synx compat support Add new APIs to receive params client-id and handles of hw fences to manage synx compat support. Change-Id: I5dae0845f8eb2c6c05cc2605d8fc93935c780901 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 8 ++-- hw_fence/src/hw_fence_drv_debug.c | 5 +- hw_fence/src/hw_fence_drv_priv.c | 57 ++++++++++++++++------ hw_fence/src/hw_fence_ioctl.c | 2 +- hw_fence/src/msm_hw_fence.c | 70 +++++++++++++++++++++++++--- 5 files changed, 117 insertions(+), 25 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 14a302871d..7b83538670 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -422,11 +422,13 @@ int hw_fence_create(struct hw_fence_driver_data *drv_data, int hw_fence_destroy(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno); +int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash); int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence_array *array); + struct dma_fence_array *array, u64 *hash_join_fence); int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence); + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash); int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, u32 error, int queue_type); @@ -435,7 +437,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno); + u64 seqno, u64 *hash); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index a1db824278..9959db5bbc 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -345,7 +345,7 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /**********************************************/ /* use same context and seqno that src client used to create fence */ ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, - seqno); + seqno, &hash); if (ret) { HWFNC_ERR("failed to register for wait\n"); return -EINVAL; @@ -861,7 +861,8 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, /* wait on the fence array */ fence_array_fence = &fence_array->base; - msm_hw_fence_wait_update(client_info_dst->client_handle, &fence_array_fence, 1, 1); + msm_hw_fence_wait_update_v2(client_info_dst->client_handle, &fence_array_fence, NULL, NULL, + 1, 1); signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id; if (signal_id < 0) { diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 74f7171817..dd9fa3c348 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -982,6 +982,37 @@ int hw_fence_destroy(struct hw_fence_driver_data *drv_data, return ret; } +int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash) +{ + u32 client_id = hw_fence_client->client_id; + struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl; + struct msm_hw_fence *hw_fence = NULL; + int ret = 0; + + hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash); + if (!hw_fence) { + HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id); + return -EINVAL; + } + + if (hw_fence->fence_allocator != client_id) { + HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n", + client_id, hash, hw_fence->fence_allocator); + return -EINVAL; + } + + /* remove hw fence from table*/ + if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id, + hw_fence->seq_id)) { + HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n", + client_id, hw_fence->ctx_id, hw_fence->seq_id, hash); + ret = -EINVAL; + } + + return ret; +} + static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, u64 *hash, bool create) @@ -1119,13 +1150,14 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data } int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array) + struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, + u64 *hash_join_fence) { struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; struct dma_fence *child_fence; bool signal_join_fence = false; - u64 hash_join_fence, hash; + u64 hash; int i, ret = 0; /* @@ -1134,7 +1166,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, * join_fence->pending_child_count = array->num_fences */ join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array, - &hash_join_fence, true); + hash_join_fence, true); if (!join_fence) { HWFNC_ERR("cannot alloc hw fence for join fence array\n"); return -EINVAL; @@ -1208,7 +1240,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, } hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] = - hash_join_fence; + *hash_join_fence; /* update memory for the table update */ wmb(); @@ -1220,7 +1252,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, if (signal_join_fence) { /* signal the join hw fence */ - _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* @@ -1228,7 +1260,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, * we can delete it now. This can happen when all the fences that * are part of the join-fence are already signaled. */ - _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence, + _hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence, false); } @@ -1236,20 +1268,19 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, error_array: _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence, - hash_join_fence); + *hash_join_fence); return -EINVAL; } int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno) + u64 seqno, u64 *hash) { struct msm_hw_fence *hw_fence; - u64 hash; /* find the hw fence within the table */ - hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, &hash); + hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); if (!hw_fence) { HWFNC_ERR("Cannot find fence!\n"); return -EINVAL; @@ -1271,7 +1302,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); - _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, 0); } return 0; @@ -1279,7 +1310,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence *fence) + struct dma_fence *fence, u64 *hash) { int ret = 0; @@ -1294,7 +1325,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, } ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, - fence->seqno); + fence->seqno, hash); if (ret) HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 431bf658ed..72566126c6 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -536,7 +536,7 @@ static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long ar return -EINVAL; } - ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1); + ret = msm_hw_fence_wait_update_v2(obj->client_handle, &fence, NULL, NULL, num_fences, 1); /* Decrement the refcount that hw_sync_get_fence increments */ dma_fence_put(fence); diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 8b8dfb0a59..ee34367a54 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -238,8 +238,41 @@ int msm_hw_fence_destroy(void *client_handle, } EXPORT_SYMBOL(msm_hw_fence_destroy); -int msm_hw_fence_wait_update(void *client_handle, - struct dma_fence **fence_list, u32 num_fences, bool create) +int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret; + + if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid data\n"); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); + return -EINVAL; + } + + HWFNC_DBG_H("+\n"); + + /* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */ + ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, handle); + if (ret) { + HWFNC_ERR("Error destroying the HW fence handle:%llu client_id:%d\n", handle, + hw_fence_client->client_id); + return ret; + } + + HWFNC_DBG_H("-\n"); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_destroy_with_handle); + +int msm_hw_fence_wait_update_v2(void *client_handle, + struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences, + bool create) { struct msm_hw_fence_client *hw_fence_client; struct dma_fence_array *array; @@ -262,30 +295,43 @@ int msm_hw_fence_wait_update(void *client_handle, /* Process all the list of fences */ for (i = 0; i < num_fences; i++) { struct dma_fence *fence = fence_list[i]; + u64 hash; /* Process a Fence-Array */ array = to_dma_fence_array(fence); if (array) { ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, - array); + array, &hash); if (ret) { - HWFNC_ERR("Failed to create FenceArray\n"); + HWFNC_ERR("Failed to process FenceArray\n"); return ret; } } else { /* Process individual Fence */ - ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence); + ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence, + &hash); if (ret) { - HWFNC_ERR("Failed to create Fence\n"); + HWFNC_ERR("Failed to process Fence\n"); return ret; } } + + if (handles) + handles[i] = hash; } HWFNC_DBG_H("-\n"); return 0; } +EXPORT_SYMBOL(msm_hw_fence_wait_update_v2); + +int msm_hw_fence_wait_update(void *client_handle, + struct dma_fence **fence_list, u32 num_fences, bool create) +{ + return msm_hw_fence_wait_update_v2(client_handle, fence_list, NULL, NULL, num_fences, + create); +} EXPORT_SYMBOL(msm_hw_fence_wait_update); int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) @@ -316,6 +362,18 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) } EXPORT_SYMBOL(msm_hw_fence_reset_client); +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags) +{ + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id:%d\n", client_id); + return -EINVAL; + } + + return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id], + reset_flags); +} +EXPORT_SYMBOL(msm_hw_fence_reset_client_by_id); + int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error) { struct msm_hw_fence_client *hw_fence_client; From b09b4f0720bd5bb1eb90acf802537b6be99a5f13 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 2 Aug 2022 11:19:55 -0700 Subject: [PATCH 40/77] mm-drivers: hw_fence: add support for 64-bit client_data Add support of the option to pass a 64-bit client_data value to the hw fence driver when a client registers as a waiting client for a hardware fence. Then during fence signaling, this client_data is returned to the client via the RxQ. If no client_data is passed to the driver for the hw fence, then a default value of zero is registered as the client_data. Change-Id: I34cf3e50413639d53cbfa8251c98b9ff1d3cbf4a Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 34 ++++++++++-- hw_fence/src/hw_fence_drv_debug.c | 13 ++--- hw_fence/src/hw_fence_drv_priv.c | 78 ++++++++++++++++++++++++---- hw_fence/src/msm_hw_fence.c | 18 +++++-- 4 files changed, 119 insertions(+), 24 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 7b83538670..91bb4adbb5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -121,6 +121,27 @@ enum hw_fence_loopback_id { #define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1) +/** + * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional + * parameter passed from the waiting client and returned + * to it upon fence signaling + * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client. + * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client. + * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client. + * @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0. + * @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1. + * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an + * invalid hw_fence_client_data_id + */ +enum hw_fence_client_data_id { + HW_FENCE_CLIENT_DATA_ID_CTX0, + HW_FENCE_CLIENT_DATA_ID_IPE, + HW_FENCE_CLIENT_DATA_ID_VPU, + HW_FENCE_CLIENT_DATA_ID_VAL0, + HW_FENCE_CLIENT_DATA_ID_VAL1, + HW_FENCE_MAX_CLIENTS_WITH_DATA, +}; + /** * struct msm_hw_fence_queue - Structure holding the data of the hw fence queues. * @va_queue: pointer to the virtual address of the queue elements @@ -387,6 +408,8 @@ struct msm_hw_fence_queue_payload { * @fence_trigger_time: debug info with the trigger time timestamp * @fence_wait_time: debug info with the register-for-wait timestamp * @debug_refcount: refcount used for debugging + * @client_data: array of data optionally passed from and returned to clients waiting on the fence + * during fence signaling */ struct msm_hw_fence { u32 valid; @@ -405,6 +428,7 @@ struct msm_hw_fence { u64 fence_trigger_time; u64 fence_wait_time; u64 debug_refcount; + u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA]; }; int hw_fence_init(struct hw_fence_driver_data *drv_data); @@ -426,20 +450,22 @@ int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 hash); int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence_array *array, u64 *hash_join_fence); + struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data); int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, - struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash); + struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash, + u64 client_data); int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, - u64 flags, u32 error, int queue_type); + u64 flags, u64 client_data, u32 error, int queue_type); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno, u64 *hash); + u64 seqno, u64 *hash, u64 client_data); struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); +enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id); #endif /* __HW_FENCE_DRV_INTERNAL_H */ diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 9959db5bbc..f54e8dfc26 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -338,14 +338,14 @@ static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file, /* Write to Tx queue */ hw_fence_update_queue(drv_data, hw_fence_client, context, seqno, hash, - 0, 0, HW_FENCE_TX_QUEUE - 1); // no flags and no error + 0, 0, 0, HW_FENCE_TX_QUEUE - 1); /* no flags and no error */ /**********************************************/ /***** DST CLIENT - REGISTER WAIT CLIENT ******/ /**********************************************/ /* use same context and seqno that src client used to create fence */ ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context, - seqno, &hash); + seqno, &hash, 0); if (ret) { HWFNC_ERR("failed to register for wait\n"); return -EINVAL; @@ -558,7 +558,7 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user struct hw_fence_driver_data *drv_data; struct msm_hw_fence_queue *rx_queue; struct msm_hw_fence_queue *tx_queue; - u64 hash, ctx_id, seqno, timestamp, flags; + u64 hash, ctx_id, seqno, timestamp, flags, client_data; u32 *read_ptr, error; int client_id, i; struct msm_hw_fence_queue_payload *read_ptr_payload; @@ -595,12 +595,13 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user seqno = readq_relaxed(&read_ptr_payload->seqno); hash = readq_relaxed(&read_ptr_payload->hash); flags = readq_relaxed(&read_ptr_payload->flags); + client_data = readq_relaxed(&read_ptr_payload->client_data); error = readl_relaxed(&read_ptr_payload->error); timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", - i, hash, ctx_id, seqno, flags, error, timestamp); + HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", + i, hash, ctx_id, seqno, flags, client_data, error, timestamp); } HWFNC_DBG_L("-------TX QUEUE------\n"); @@ -855,7 +856,7 @@ static ssize_t hw_fence_dbg_create_join_fence(struct file *file, /* Write to Tx queue */ hw_fence_update_queue(drv_data, hw_fence_client, client_info_src->dma_context, - hw_fence_dbg_seqno + i, hash, 0, 0, + hw_fence_dbg_seqno + i, hash, 0, 0, 0, HW_FENCE_TX_QUEUE - 1); } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index dd9fa3c348..b1011967ec 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -208,6 +208,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, payload->seqno = readq_relaxed(&read_ptr_payload->seqno); payload->hash = readq_relaxed(&read_ptr_payload->hash); payload->flags = readq_relaxed(&read_ptr_payload->flags); + payload->client_data = readq_relaxed(&read_ptr_payload->client_data); payload->error = readl_relaxed(&read_ptr_payload->error); /* update the read index */ @@ -226,7 +227,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, */ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, - u64 flags, u32 error, int queue_type) + u64 flags, u64 client_data, u32 error, int queue_type) { struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; @@ -332,6 +333,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, writeq_relaxed(seqno, &write_ptr_payload->seqno); writeq_relaxed(hash, &write_ptr_payload->hash); writeq_relaxed(flags, &write_ptr_payload->flags); + writeq_relaxed(client_data, &write_ptr_payload->client_data); writel_relaxed(error, &write_ptr_payload->error); timestamp = hw_fence_get_qtime(drv_data); writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo); @@ -726,6 +728,8 @@ static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence) for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++) hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE; + + memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data)); } /* This function must be called with the hw fence lock */ @@ -1076,7 +1080,7 @@ struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data, static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, - u64 flags, u32 error) + u64 flags, u64 client_data, u32 error) { u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */ u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */ @@ -1086,7 +1090,7 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, /* Write to Rx queue */ if (hw_fence_client->update_rxq) hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, - hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1); + hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); /* Signal the hw fence now */ if (hw_fence_client->send_ipc) @@ -1151,7 +1155,7 @@ static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array, - u64 *hash_join_fence) + u64 *hash_join_fence, u64 client_data) { struct msm_hw_fence *join_fence; struct msm_hw_fence *hw_fence_child; @@ -1159,6 +1163,16 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, bool signal_join_fence = false; u64 hash; int i, ret = 0; + enum hw_fence_client_data_id data_id; + + if (client_data) { + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", + client_data, hw_fence_client->client_id); + return -EINVAL; + } + } /* * Create join fence from the join-fences table, @@ -1248,11 +1262,15 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */ } + if (client_data) + join_fence->client_data[data_id] = client_data; + /* all fences were signaled, signal client now */ if (signal_join_fence) { /* signal the join hw fence */ - _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0, + client_data); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* @@ -1275,9 +1293,19 @@ error_array: int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, - u64 seqno, u64 *hash) + u64 seqno, u64 *hash, u64 client_data) { struct msm_hw_fence *hw_fence; + enum hw_fence_client_data_id data_id; + + if (client_data) { + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", + client_data, hw_fence_client->client_id); + return -EINVAL; + } + } /* find the hw fence within the table */ hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash); @@ -1292,6 +1320,8 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id); hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data); hw_fence->debug_refcount++; + if (client_data) + hw_fence->client_data[data_id] = client_data; /* update memory for the table update */ wmb(); @@ -1302,7 +1332,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) { if (fence != NULL) set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags); - _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, 0); + _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0); } return 0; @@ -1310,7 +1340,7 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, - struct dma_fence *fence, u64 *hash) + struct dma_fence *fence, u64 *hash, u64 client_data) { int ret = 0; @@ -1325,7 +1355,7 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, } ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context, - fence->seqno, hash); + fence->seqno, hash, client_data); if (ret) HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id); @@ -1336,16 +1366,22 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hw_fence, u64 hash, int error) { enum hw_fence_client_id wait_client_id; + enum hw_fence_client_data_id data_id; struct msm_hw_fence_client *hw_fence_wait_client; + u64 client_data = 0; /* signal with an error all the waiting clients for this fence */ for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; + data_id = hw_fence_get_client_data_id(wait_client_id); + + if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) + client_data = hw_fence->client_data[data_id]; if (hw_fence_wait_client) _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, error); + hash, 0, client_data, error); } } } @@ -1389,3 +1425,25 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, skip_destroy: return ret; } + +enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id) +{ + enum hw_fence_client_data_id data_id; + + switch (client_id) { + case HW_FENCE_CLIENT_ID_CTX0: + data_id = HW_FENCE_CLIENT_DATA_ID_CTX0; + break; + case HW_FENCE_CLIENT_ID_VAL0: + data_id = HW_FENCE_CLIENT_DATA_ID_VAL0; + break; + case HW_FENCE_CLIENT_ID_VAL1: + data_id = HW_FENCE_CLIENT_DATA_ID_VAL1; + break; + default: + data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA; + break; + } + + return data_id; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index ee34367a54..02fe414f19 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -277,6 +277,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, struct msm_hw_fence_client *hw_fence_client; struct dma_fence_array *array; int i, ret = 0; + enum hw_fence_client_data_id data_id; if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) { HWFNC_ERR("Invalid data\n"); @@ -289,19 +290,28 @@ int msm_hw_fence_wait_update_v2(void *client_handle, } hw_fence_client = (struct msm_hw_fence_client *)client_handle; + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { + HWFNC_ERR("Populating non-NULL client_data_list with unsupported client id:%d\n", + hw_fence_client->client_id); + return -EINVAL; + } HWFNC_DBG_H("+\n"); /* Process all the list of fences */ for (i = 0; i < num_fences; i++) { struct dma_fence *fence = fence_list[i]; - u64 hash; + u64 hash, client_data = 0; + + if (client_data_list) + client_data = client_data_list[i]; /* Process a Fence-Array */ array = to_dma_fence_array(fence); if (array) { ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client, - array, &hash); + array, &hash, client_data); if (ret) { HWFNC_ERR("Failed to process FenceArray\n"); return ret; @@ -309,7 +319,7 @@ int msm_hw_fence_wait_update_v2(void *client_handle, } else { /* Process individual Fence */ ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence, - &hash); + &hash, client_data); if (ret) { HWFNC_ERR("Failed to process Fence\n"); return ret; @@ -394,7 +404,7 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro hw_fence_update_queue(hw_fence_drv_data, hw_fence_client, hw_fence_drv_data->hw_fences_tbl[handle].ctx_id, hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle, - flags, error, HW_FENCE_TX_QUEUE - 1); + flags, 0, error, HW_FENCE_TX_QUEUE - 1); return 0; } From b2efa8bc8bb0e2cdaab0585f39dfd8c30efc9aa0 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 19 Oct 2022 17:15:03 -0700 Subject: [PATCH 41/77] mm-drivers: hw_fence: add dtsi-based allocation of client queues Update hw fence driver to support configurable parameters for each client type, which can be set up through device-tree. This allows configuring number of queues (e.g. only Tx Queue or both Rx and Tx Queues), number of entries per client queue, and number of sub-clients for each client-type. Change-Id: I2d8f84ff2b7eb5322f9ca661cfd8f6a291db7b38 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 28 +++- hw_fence/src/hw_fence_drv_priv.c | 21 ++- hw_fence/src/hw_fence_drv_utils.c | 198 ++++++++++++++++++++++++--- hw_fence/src/msm_hw_fence.c | 8 ++ 4 files changed, 221 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index aa93a6131e..29f1fb00c9 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -35,8 +35,8 @@ #define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES)) -#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ - (HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CLIENT_QUEUES)) +#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) (HW_FENCE_HFI_TABLE_HEADER_SIZE + \ + (HW_FENCE_HFI_QUEUE_HEADER_SIZE * queues_num)) /* * Max Payload size is the bigest size of the message that we can have in the CTRL queue @@ -230,6 +230,22 @@ struct msm_hw_fence_dbg_data { u64 lock_wake_cnt; }; +/** + * struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client. + * + * @queues_num: number of client queues + * @queue_entries: number of queue entries per client queue + * @mem_size: size of memory allocated for client queues + * @start_offset: start offset of client queue memory region, from beginning of carved-out memory + * allocation for hw fence driver + */ +struct hw_fence_client_queue_size_desc { + u32 queues_num; + u32 queue_entries; + u32 mem_size; + u32 start_offset; +}; + /** * struct hw_fence_driver_data - Structure holding internal hw-fence driver data * @@ -240,8 +256,7 @@ struct msm_hw_fence_dbg_data { * @hw_fence_queue_entries: total number of entries that can be available in the queue * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq - * @hw_fence_client_queue_size: size of the client queue for the payload - * @hw_fence_mem_clients_queues_size: total size of client queues, including: header + rxq + txq + * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table * @client_lock_tbl: pointer to the per-client locks table @@ -257,6 +272,7 @@ struct msm_hw_fence_dbg_data { * @peer_name: peer name for this carved-out memory * @rm_nb: hyp resource manager notifier * @memparcel: memparcel for the allocated memory + * @used_mem_size: total memory size of global table, lock region, and ctrl and client queues * @db_label: doorbell label * @rx_dbl: handle to the Rx doorbell * @debugfs_data: debugfs info @@ -291,8 +307,7 @@ struct hw_fence_driver_data { u32 hw_fence_ctrl_queue_size; u32 hw_fence_mem_ctrl_queues_size; /* client queues */ - u32 hw_fence_client_queue_size; - u32 hw_fence_mem_clients_queues_size; + struct hw_fence_client_queue_size_desc hw_fence_client_queue_size[HW_FENCE_CLIENT_MAX]; /* HW Fences Table VA */ struct msm_hw_fence *hw_fences_tbl; @@ -316,6 +331,7 @@ struct hw_fence_driver_data { u32 peer_name; struct notifier_block rm_nb; u32 memparcel; + u32 used_mem_size; /* doorbell */ u32 db_label; diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 89dbc666a6..7dc3b69c56 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -46,8 +46,14 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE; - queue_size = drv_data->hw_fence_client_queue_size; + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id: %d\n", client_id); + return -EINVAL; + } + + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num); + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * + drv_data->hw_fence_client_queue_size[client_id].queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; break; default: @@ -244,8 +250,10 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u64 timestamp; int ret = 0; - if (queue_type >= HW_FENCE_CLIENT_QUEUES) { - HWFNC_ERR("Invalid queue type:%s\n", queue_type); + if (queue_type >= + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) { + HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type, + hw_fence_client->client_id); return -EINVAL; } @@ -526,7 +534,8 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, /* Init client queues */ ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, &hw_fence_client->mem_descriptor, hw_fence_client->queues, - HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id); + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num, + hw_fence_client->client_id); if (ret) { HWFNC_ERR("Failure to init the queue for client:%d\n", hw_fence_client->client_id); @@ -549,7 +558,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, /* * Initialize IPCC Signals for this client * - * NOTE: Fore each Client HW-Core, the client drivers might be the ones making + * NOTE: For each Client HW-Core, the client drivers might be the ones making * it's own initialization (in case that any hw-sequence must be enforced), * however, if that is not the case, any per-client ipcc init to enable the * signaling, can go here. diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index c8eab917dd..a9a391982e 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -15,6 +15,73 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" +/** + * MAX_CLIENT_QUEUE_MEM_SIZE: + * Maximum memory size for client queues of a hw fence client. + */ +#define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 + +/** + * HW_FENCE_MAX_CLIENT_TYPE: + * Total number of client types (GFX, DPU, VAL) + */ +#define HW_FENCE_MAX_CLIENT_TYPE 3 + +/* Maximum number of clients for each client type */ +#define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 +#define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 + +/** + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. + * + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @mem_size: size of memory allocated for client queue(s) per client + */ +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; + u32 queues_num; + u32 queue_entries; + u32 mem_size; +}; + +/** + * struct hw_fence_client_types - Table describing all supported client types, used to parse + * device-tree properties related to client queue size. + * + * The fields name, init_id, and max_clients_num are constants. Default values for clients_num and + * queues_num are provided in this table, and clients_num, queues_num, and queue_entries can be read + * from device-tree. + * + * If a value for queue entries is not parsed for the client type, then the default number of client + * queue entries (parsed from device-tree) is used. + * + * Notes: + * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. + * 2. Each HW Fence client ID must be described by one of the client types in this table. + * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num. + * 4. HW_FENCE_MAX_CLIENT_TYPE must be incremented for new client types. + */ +struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { + {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, + HW_FENCE_CLIENT_QUEUES, 0, 0}, +}; + static void _lock(uint64_t *wait) { #if defined(__aarch64__) @@ -399,6 +466,11 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -ENXIO; } drv_data->size = resource_size(&drv_data->res); + if (drv_data->size < drv_data->used_mem_size) { + HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n", + drv_data->size, drv_data->used_mem_size); + return -ENOMEM; + } HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n", drv_data->io_mem_base, drv_data->res.start, @@ -469,12 +541,17 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, goto exit; } - start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + - HW_FENCE_MEM_LOCKS_SIZE + - drv_data->hw_fence_mem_fences_table_size) + - ((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size); - *size = drv_data->hw_fence_mem_clients_queues_size; + start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; + *size = drv_data->hw_fence_client_queue_size[client_id].mem_size; + /* + * If this error occurs when client should be valid, check that support for this + * client has been configured in device-tree properties. + */ + if (!*size) { + HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id); + ret = -EINVAL; + } break; default: HWFNC_ERR("Invalid mem reserve type:%d\n", type); @@ -501,6 +578,95 @@ exit: return ret; } +static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_type_desc *desc) +{ + char name[31]; + u32 tmp[3]; + u32 queue_size; + int ret; + + /* parse client queue property from device-tree */ + snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 3); + if (ret) { + HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name, + ret); + desc->queue_entries = drv_data->hw_fence_queue_entries; + } else { + desc->clients_num = tmp[0]; + desc->queues_num = tmp[1]; + desc->queue_entries = tmp[2]; + } + + if (desc->clients_num > desc->max_clients_num || !desc->queues_num || + desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) { + HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n", + desc->name, desc->clients_num, desc->queues_num, desc->queue_entries); + return -EINVAL; + } + + /* compute mem_size */ + if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { + HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n", + desc->name, desc->queue_entries); + return -EINVAL; + } + + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; + if (queue_size >= ((U32_MAX & PAGE_MASK) - + HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) { + HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n", + desc->name, queue_size); + return -EINVAL; + } + + desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + + (queue_size * desc->queues_num)); + + if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { + HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n", + desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE); + return -EINVAL; + } + + HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu\n", desc->name, + desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size); + + return 0; +} + +static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) +{ + struct hw_fence_client_type_desc *desc; + int i, j, ret; + u32 start_offset; + + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE + drv_data->hw_fence_mem_fences_table_size); + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { + desc = &hw_fence_client_types[i]; + ret = _parse_client_queue_dt_props_indv(drv_data, desc); + if (ret) { + HWFNC_ERR("failed to initialize %s client queue size properties\n", + desc->name); + return ret; + } + + /* initialize client queue size desc for each client */ + for (j = 0; j < desc->clients_num; j++) { + drv_data->hw_fence_client_queue_size[desc->init_id + j] = + (struct hw_fence_client_queue_size_desc) + {desc->queues_num, desc->queue_entries, desc->mem_size, + start_offset}; + start_offset += desc->mem_size; + } + } + drv_data->used_mem_size = start_offset; + + return 0; +} + int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) { int ret; @@ -549,29 +715,17 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) /* clients queues init */ - if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { - HWFNC_ERR("queue entries:%lu will overflow client queue size\n", - drv_data->hw_fence_queue_entries); + ret = _parse_client_queue_dt_props(drv_data); + if (ret) { + HWFNC_ERR("failed to parse client queue properties\n"); return -EINVAL; } - drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * - drv_data->hw_fence_queue_entries; - - if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) - - HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) { - HWFNC_ERR("queue size:%lu will overflow client queue mem size\n", - drv_data->hw_fence_client_queue_size); - return -EINVAL; - } - drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE + - (HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size)); HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); - HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b", - drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size, - drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size); + HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b", + drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); return 0; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 02fe414f19..5085592def 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -74,6 +74,14 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); + if (hw_fence_client->update_rxq && + hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num < + HW_FENCE_CLIENT_QUEUES) { + HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id); + ret = -EINVAL; + goto error; + } + hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); /* Alloc Client HFI Headers and Queues */ From 368ae729919b911174c38f214b2d399d20d236d5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 19 Oct 2022 17:15:03 -0700 Subject: [PATCH 42/77] mm-drivers: hw_fence: add support for ipe, vpu, and ife clients Update hw fence driver to support new clients with large number of possible sub-clients, which can be configured in device-tree. Add client queues support for ipe, vpu, and ife clients. Change-Id: I6e274819c1c154af3ea977d1d09e419d86f6fe8e Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 2 +- hw_fence/include/hw_fence_drv_priv.h | 20 ++-- hw_fence/include/hw_fence_drv_utils.h | 17 ++++ hw_fence/src/hw_fence_drv_priv.c | 22 +++- hw_fence/src/hw_fence_drv_utils.c | 140 ++++++++++++++++++++++++-- hw_fence/src/hw_fence_ioctl.c | 8 +- hw_fence/src/msm_hw_fence.c | 43 ++++++-- 7 files changed, 218 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index bfb654e603..a1d66e0cdd 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -85,7 +85,7 @@ struct hw_fence_out_clients_map { * The index of this struct must match the enum hw_fence_client_id */ static const struct hw_fence_out_clients_map - dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = { + dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */ diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 29f1fb00c9..2645fbc638 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -48,8 +48,8 @@ #define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE #define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload)) -/* Locks area for all the clients */ -#define HW_FENCE_MEM_LOCKS_SIZE (sizeof(u64) * (HW_FENCE_CLIENT_MAX - 1)) +/* Locks area for all clients with RxQ */ +#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (sizeof(u64) * rxq_clients_num) #define HW_FENCE_TX_QUEUE 1 #define HW_FENCE_RX_QUEUE 2 @@ -165,7 +165,9 @@ enum payload_type { /** * struct msm_hw_fence_client - Structure holding the per-Client allocated resources. - * @client_id: id of the client + * @client_id: internal client_id used within HW fence driver; index into the clients struct + * @client_id_ext: external client_id, equal to client_id except for clients with configurable + * number of sub-clients (e.g. ife clients) * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor * @ipc_signal_id: id of the signal to be triggered for this client @@ -178,6 +180,7 @@ enum payload_type { */ struct msm_hw_fence_client { enum hw_fence_client_id client_id; + enum hw_fence_client_id client_id_ext; struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; int ipc_signal_id; @@ -257,6 +260,8 @@ struct hw_fence_client_queue_size_desc { * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client + * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree) + * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table * @client_lock_tbl: pointer to the per-client locks table @@ -290,7 +295,7 @@ struct hw_fence_client_queue_size_desc { * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) * @client_id_mask: bitmask for tracking registered client_ids * @clients_register_lock: lock to synchronize clients registration and deregistration - * @msm_hw_fence_client: table with the handles of the registered clients + * @clients: table with the handles of the registered clients; size is equal to clients_num * @vm_ready: flag to indicate if vm has been initialized * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized */ @@ -307,7 +312,10 @@ struct hw_fence_driver_data { u32 hw_fence_ctrl_queue_size; u32 hw_fence_mem_ctrl_queues_size; /* client queues */ - struct hw_fence_client_queue_size_desc hw_fence_client_queue_size[HW_FENCE_CLIENT_MAX]; + struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size; + struct hw_fence_client_type_desc *hw_fence_client_types; + u32 rxq_clients_num; + u32 clients_num; /* HW Fences Table VA */ struct msm_hw_fence *hw_fences_tbl; @@ -366,7 +374,7 @@ struct hw_fence_driver_data { struct mutex clients_register_lock; /* table with registered client handles */ - struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX]; + struct msm_hw_fence_client **clients; bool vm_ready; #ifdef HW_DPU_IPCC diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 2ef6df0fe9..ac8b504a52 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -111,4 +111,21 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags); +/** + * hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver + * from the client_id used externally + * + * Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX, + * otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if + * provided with client IDs for such clients when support for those clients is not configured in + * device-tree. + * + * @drv_data: hw fence driver data + * @client_id: external client_id to get internal client_id for + * + * Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX + */ +enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, + enum hw_fence_client_id client_id); + #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 7dc3b69c56..ce7546de69 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -46,7 +46,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id >= drv_data->clients_num) { HWFNC_ERR("Invalid client_id: %d\n", client_id); return -EINVAL; } @@ -563,7 +563,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, * however, if that is not the case, any per-client ipcc init to enable the * signaling, can go here. */ - switch (hw_fence_client->client_id) { + switch ((int)hw_fence_client->client_id) { case HW_FENCE_CLIENT_ID_CTX0: /* nothing to initialize for gpu client */ break; @@ -596,6 +596,16 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, } #endif /* HW_DPU_IPCC */ break; + case HW_FENCE_CLIENT_ID_IPE: + /* nothing to initialize for IPE client */ + break; + case HW_FENCE_CLIENT_ID_VPU: + /* nothing to initialize for VPU client */ + break; + case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: + /* nothing to initialize for IFE clients */ + break; default: HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id); ret = -EINVAL; @@ -1397,7 +1407,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, u64 client_data = 0; /* signal with an error all the waiting clients for this fence */ - for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) { + for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; data_id = hw_fence_get_client_data_id(wait_client_id); @@ -1466,6 +1476,12 @@ enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id case HW_FENCE_CLIENT_ID_VAL1: data_id = HW_FENCE_CLIENT_DATA_ID_VAL1; break; + case HW_FENCE_CLIENT_ID_IPE: + data_id = HW_FENCE_CLIENT_DATA_ID_IPE; + break; + case HW_FENCE_CLIENT_ID_VPU: + data_id = HW_FENCE_CLIENT_DATA_ID_VPU; + break; default: data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA; break; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a9a391982e..e1857bb962 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -22,15 +22,49 @@ #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 /** - * HW_FENCE_MAX_CLIENT_TYPE: - * Total number of client types (GFX, DPU, VAL) + * HW_FENCE_MAX_CLIENT_TYPE_STATIC: + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) */ -#define HW_FENCE_MAX_CLIENT_TYPE 3 +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 + +/** + * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: + * Maximum number of client types with configurable number of sub-clients (e.g. IFE) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 + +/** + * HW_FENCE_MAX_CLIENT_TYPE: + * Total number of client types with and without configurable number of sub-clients + */ +#define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \ + HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) + +/** + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: + * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients + */ +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 + +/** + * HW_FENCE_MIN_RXQ_CLIENTS: + * Minimum number of static hw fence clients with rxq + */ +#define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6 + +/** + * HW_FENCE_MIN_RXQ_CLIENT_TYPE: + * Minimum number of static hw fence client types with rxq (GFX, DPU, VAL) + */ +#define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3 /* Maximum number of clients for each client type */ #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 +#define HW_FENCE_CLIENT_TYPE_MAX_IPE 1 +#define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 /** * struct hw_fence_client_type_desc - Structure holding client type properties, including static @@ -71,7 +105,8 @@ struct hw_fence_client_type_desc { * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. * 2. Each HW Fence client ID must be described by one of the client types in this table. * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num. - * 4. HW_FENCE_MAX_CLIENT_TYPE must be incremented for new client types. + * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must + * be incremented as appropriate for new client types. */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, @@ -80,6 +115,18 @@ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] HW_FENCE_CLIENT_QUEUES, 0, 0}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, + HW_FENCE_CLIENT_QUEUES, 0, 0}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, }; static void _lock(uint64_t *wait) @@ -527,15 +574,16 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, case HW_FENCE_MEM_RESERVE_LOCKS_REGION: /* Locks region starts at the end of the ctrl queues */ start_offset = drv_data->hw_fence_mem_ctrl_queues_size; - *size = HW_FENCE_MEM_LOCKS_SIZE; + *size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num); break; case HW_FENCE_MEM_RESERVE_TABLE: /* HW Fence table starts at the end of the Locks region */ - start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE; + start_offset = drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num); *size = drv_data->hw_fence_mem_fences_table_size; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id >= drv_data->clients_num) { HWFNC_ERR("unexpected client_id:%d\n", client_id); ret = -EINVAL; goto exit; @@ -641,9 +689,10 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) struct hw_fence_client_type_desc *desc; int i, j, ret; u32 start_offset; + size_t size; + int configurable_clients_num = 0; - start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + - HW_FENCE_MEM_LOCKS_SIZE + drv_data->hw_fence_mem_fences_table_size); + drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS; for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { desc = &hw_fence_client_types[i]; ret = _parse_client_queue_dt_props_indv(drv_data, desc); @@ -653,12 +702,43 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) return ret; } - /* initialize client queue size desc for each client */ + if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE && + desc->queues_num == HW_FENCE_CLIENT_QUEUES) + drv_data->rxq_clients_num += desc->clients_num; + + if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC) + configurable_clients_num += desc->clients_num; + } + + /* store client type descriptors for configurable client indexing logic */ + drv_data->hw_fence_client_types = hw_fence_client_types; + + /* clients and size desc are allocated for all static clients regardless of device-tree */ + drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num; + + /* allocate memory for client queue size descriptors */ + size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc); + drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL); + if (!drv_data->hw_fence_client_queue_size) + return -ENOMEM; + + /* initialize client queue size desc for each client */ + start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size + + HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) + + drv_data->hw_fence_mem_fences_table_size); + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) { + desc = &hw_fence_client_types[i]; for (j = 0; j < desc->clients_num; j++) { - drv_data->hw_fence_client_queue_size[desc->init_id + j] = + enum hw_fence_client_id client_id_ext = desc->init_id + j; + enum hw_fence_client_id client_id = + hw_fence_utils_get_client_id_priv(drv_data, client_id_ext); + + drv_data->hw_fence_client_queue_size[client_id] = (struct hw_fence_client_queue_size_desc) {desc->queues_num, desc->queue_entries, desc->mem_size, start_offset}; + HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", + desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; } } @@ -670,6 +750,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) { int ret; + size_t size; u32 val = 0; ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val); @@ -721,11 +802,20 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data) return -EINVAL; } + /* allocate clients */ + + size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *); + drv_data->clients = kzalloc(size, GFP_KERNEL); + if (!drv_data->clients) + return -ENOMEM; + HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b", drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size, drv_data->hw_fence_queue_entries); HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b", drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size); + HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num, + drv_data->used_mem_size); return 0; } @@ -837,3 +927,31 @@ int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data) return 0; } + +enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, + enum hw_fence_client_id client_id) +{ + int i, client_type, offset; + enum hw_fence_client_id client_id_priv; + + if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX) + return client_id; + + /* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */ + client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + + (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) / + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT; + offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) % + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT; + + /* invalid client id out of range of supported configurable sub-clients */ + if (offset >= drv_data->hw_fence_client_types[client_type].clients_num) + return HW_FENCE_CLIENT_MAX; + + client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset; + + for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++) + client_id_priv += drv_data->hw_fence_client_types[i].clients_num; + + return client_id_priv; +} diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 72566126c6..02942dc3ea 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -132,9 +132,9 @@ static bool _is_valid_client(struct hw_sync_obj *obj) if (!obj) return false; - if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id >= HW_FENCE_CLIENT_MAX) { + if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) { HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id, - HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); return false; } @@ -151,9 +151,9 @@ static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg) if (!obj) return -EINVAL; - if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id >= HW_FENCE_CLIENT_MAX) { + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) { HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id, - HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX); + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); return -EINVAL; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 5085592def..30eaf25c70 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -17,24 +17,33 @@ struct hw_fence_driver_data *hw_fence_drv_data; static bool hw_fence_driver_enable; -void *msm_hw_fence_register(enum hw_fence_client_id client_id, +void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, struct msm_hw_fence_mem_addr *mem_descriptor) { struct msm_hw_fence_client *hw_fence_client; + enum hw_fence_client_id client_id; int ret; - HWFNC_DBG_H("++ client_id:%d\n", client_id); + HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext); if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { HWFNC_ERR("hw fence driver not ready\n"); return ERR_PTR(-EAGAIN); } - if (!mem_descriptor || client_id >= HW_FENCE_CLIENT_MAX) { - HWFNC_ERR("Invalid params: %d client_id:%d\n", - !mem_descriptor, client_id); + if (!mem_descriptor || client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: %d client_id_ext:%d\n", + !mem_descriptor, client_id_ext); return ERR_PTR(-EINVAL); } + + client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext); + if (client_id >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n", + client_id, client_id_ext); + return ERR_PTR(-EINVAL); + } + /* Alloc client handle */ hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL); if (!hw_fence_client) @@ -54,6 +63,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id, mutex_unlock(&hw_fence_drv_data->clients_register_lock); hw_fence_client->client_id = client_id; + hw_fence_client->client_id_ext = client_id_ext; hw_fence_client->ipc_client_vid = hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id); hw_fence_client->ipc_client_pid = @@ -132,7 +142,7 @@ int msm_hw_fence_deregister(void *client_handle) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); return -EINVAL; } @@ -257,7 +267,7 @@ int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle) } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) { + if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) { HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id); return -EINVAL; } @@ -305,6 +315,12 @@ int msm_hw_fence_wait_update_v2(void *client_handle, return -EINVAL; } + if (hw_fence_client->client_id > hw_fence_drv_data->rxq_clients_num) { + HWFNC_ERR("Transmit-only client client_id:%d client_id_ext:%d register for wait\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext); + return -EINVAL; + } + HWFNC_DBG_H("+\n"); /* Process all the list of fences */ @@ -380,10 +396,19 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) } EXPORT_SYMBOL(msm_hw_fence_reset_client); -int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags) +int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags) { + enum hw_fence_client_id client_id; + + if (client_id_ext >= HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext); + return -EINVAL; + } + + client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext); + if (client_id >= HW_FENCE_CLIENT_MAX) { - HWFNC_ERR("Invalid client_id:%d\n", client_id); + HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext); return -EINVAL; } From 965d398c0635af4ab59fac971fba9a662abfe016 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 19 Oct 2022 15:17:59 -0700 Subject: [PATCH 43/77] mm-drivers: hw_fence: update ipc for ipe, vpu, ife clients Update ipc configurations to support IPE, VPU, and IFE clients in hw fence driver. Add support for IPE and VPU clients on kalama, and add support for all clients on pineaple. Change-Id: Iee577118284a02bd5b368ca206e88ed75eaa95b3 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 20 ++++ hw_fence/include/hw_fence_drv_utils.h | 42 ++++++++ hw_fence/src/hw_fence_drv_ipc.c | 147 +++++++++++++++++++++++--- hw_fence/src/hw_fence_drv_utils.c | 36 ------- 4 files changed, 196 insertions(+), 49 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index e905ea8ed6..07b7aa754c 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -9,12 +9,32 @@ /* ipc clients virtual client-id */ #define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8 #define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9 +#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11 +#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12 #define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135 /* ipc clients physical client-id */ #define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3 #define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4 +#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5 +#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8 #define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9 +#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11 +#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12 +#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13 +#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14 +#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15 +#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16 +#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17 +#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index ac8b504a52..6d9cd9627d 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -6,6 +6,24 @@ #ifndef __HW_FENCE_DRV_UTILS_H #define __HW_FENCE_DRV_UTILS_H +/** + * HW_FENCE_MAX_CLIENT_TYPE_STATIC: + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 + +/** + * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: + * Maximum number of client types with configurable number of sub-clients (e.g. IFE) + */ +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 + +/** + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: + * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients + */ +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 + /** * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. * HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues. @@ -20,6 +38,30 @@ enum hw_fence_mem_reserve { HW_FENCE_MEM_RESERVE_CLIENT_QUEUE }; +/** + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. + * + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @mem_size: size of memory allocated for client queue(s) per client + */ +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; + u32 queues_num; + u32 queue_entries; + u32 mem_size; +}; + /** * global_atomic_store() - Inter-processor lock * @drv_data: hw fence driver data diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a3cccfbf31..7e46e08f53 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -9,6 +9,13 @@ #include "hw_fence_drv_ipc.h" #include "hw_fence_drv_debug.h" +/* + * Max size of base table with ipc mappings, with one mapping per client type with configurable + * number of subclients + */ +#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \ + HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) + /** * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger. * @ipc_client_id_virt: virtual ipc client id for the hw-fence client. @@ -36,7 +43,7 @@ struct hw_fence_client_ipc_map { * To change to a loopback signal instead of GMU, change ctx0 row to use: * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = { +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/ @@ -64,7 +71,7 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M * * Note that the index of this struct must match the enum hw_fence_client_id */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */ @@ -81,7 +88,17 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ +#else + {0, 0, 0, false, false}, /* val0 */ + {0, 0, 0, false, false}, /* val1 */ + {0, 0, 0, false, false}, /* val2 */ + {0, 0, 0, false, false}, /* val3 */ + {0, 0, 0, false, false}, /* val4 */ + {0, 0, 0, false, false}, /* val5 */ + {0, 0, 0, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true}, /* ipe */ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true}, /* vpu */ }; /** @@ -90,9 +107,12 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = { * signaled. * This version is for targets that support dpu client id and IPC v2. * - * Note that the index of this struct must match the enum hw_fence_client_id + * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less + * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX. + * For clients with configurable sub-clients, the index of this struct matches + * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC). */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] = { +struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = { {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */ {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */ @@ -109,12 +129,30 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/ +#else + {0, 0, 0, false, false}, /* val0 */ + {0, 0, 0, false, false}, /* val1 */ + {0, 0, 0, false, false}, /* val2 */ + {0, 0, 0, false, false}, /* val3 */ + {0, 0, 0, false, false}, /* val4 */ + {0, 0, 0, false, false}, /* val5 */ + {0, 0, 0, false, false}, /* val6 */ #endif /* CONFIG_DEBUG_FS */ + {HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true}, /* ipe */ + {HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */ + {HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/ + {HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/ + {HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/ + {HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/ + {HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/ + {HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/ + {HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/ + {HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/ }; int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return -EINVAL; return drv_data->ipc_clients_table[client_id].ipc_client_id_virt; @@ -122,7 +160,7 @@ int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return -EINVAL; return drv_data->ipc_clients_table[client_id].ipc_client_id_phys; @@ -130,7 +168,7 @@ int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) + if (!drv_data || client_id >= drv_data->clients_num) return -EINVAL; return drv_data->ipc_clients_table[client_id].ipc_signal_id; @@ -138,8 +176,8 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id) { - if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) - return -EINVAL; + if (!drv_data || client_id >= drv_data->clients_num) + return false; return drv_data->ipc_clients_table[client_id].update_rxq; } @@ -147,7 +185,7 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id) { if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX) - return -EINVAL; + return false; return drv_data->ipc_clients_table[client_id].send_ipc; } @@ -164,6 +202,26 @@ static inline char *_get_ipc_phys_client_name(u32 client_id) return "GPU_PID"; case HW_FENCE_IPC_CLIENT_ID_DPU_PID: return "DPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_IPE_PID: + return "IPE_PID"; + case HW_FENCE_IPC_CLIENT_ID_VPU_PID: + return "VPU_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE0_PID: + return "IFE0_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE1_PID: + return "IFE1_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE2_PID: + return "IFE2_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE3_PID: + return "IFE3_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE4_PID: + return "IFE4_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE5_PID: + return "IFE5_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE6_PID: + return "IFE6_PID"; + case HW_FENCE_IPC_CLIENT_ID_IFE7_PID: + return "IFE7_PID"; } return "UNKNOWN_PID"; @@ -181,6 +239,26 @@ static inline char *_get_ipc_virt_client_name(u32 client_id) return "GPU_VID"; case HW_FENCE_IPC_CLIENT_ID_DPU_VID: return "DPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_IPE_VID: + return "IPE_VID"; + case HW_FENCE_IPC_CLIENT_ID_VPU_VID: + return "VPU_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE0_VID: + return "IFE0_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE1_VID: + return "IFE1_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE2_VID: + return "IFE2_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE3_VID: + return "IFE3_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE4_VID: + return "IFE4_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE5_VID: + return "IFE5_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE6_VID: + return "IFE6_VID"; + case HW_FENCE_IPC_CLIENT_ID_IFE7_VID: + return "IFE7_VID"; } return "UNKNOWN_VID"; @@ -208,6 +286,46 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, wmb(); } +static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_ipc_map *base_table) +{ + int i, j, map_idx; + size_t size; + + size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map); + drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL); + + if (!drv_data->ipc_clients_table) + return -ENOMEM; + + /* copy mappings for static hw fence clients */ + size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map); + memcpy(drv_data->ipc_clients_table, base_table, size); + + /* initialize mappings for ipc clients with configurable number of hw fence clients */ + map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX; + for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) { + int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i; + int clients_num = drv_data->hw_fence_client_types[client_type].clients_num; + + for (j = 0; j < clients_num; j++) { + /* this should never happen if drv_data->clients_num is correct */ + if (map_idx >= drv_data->clients_num) { + HWFNC_ERR("%s clients_num:%lu exceeds drv_data->clients_num:%lu\n", + drv_data->hw_fence_client_types[client_type].name, + clients_num, drv_data->clients_num); + return -EINVAL; + } + drv_data->ipc_clients_table[map_idx] = + base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i]; + drv_data->ipc_clients_table[map_idx].ipc_signal_id = j; + map_idx++; + } + } + + return 0; +} + /** * _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data, * according to the ipcc hw revision. @@ -216,6 +334,8 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, */ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev) { + int ret = 0; + switch (hwrev) { case HW_FENCE_IPCC_HW_REV_100: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; @@ -242,14 +362,15 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */ - drv_data->ipc_clients_table = hw_fence_clients_ipc_map_v2; + ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, + hw_fence_clients_ipc_map_v2); HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); break; default: return -1; } - return 0; + return ret; } int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) @@ -320,7 +441,7 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) HWFNC_DBG_H("Initialize dpu signals\n"); /* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */ - for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) { + for (i = 0; i < drv_data->clients_num; i++) { hw_fence_client = &drv_data->ipc_clients_table[i]; /* skip any client that is not a dpu client */ diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index e1857bb962..d530950785 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -21,18 +21,6 @@ */ #define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000 -/** - * HW_FENCE_MAX_CLIENT_TYPE_STATIC: - * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) - */ -#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 - -/** - * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: - * Maximum number of client types with configurable number of sub-clients (e.g. IFE) - */ -#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 - /** * HW_FENCE_MAX_CLIENT_TYPE: * Total number of client types with and without configurable number of sub-clients @@ -66,30 +54,6 @@ #define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 -/** - * struct hw_fence_client_type_desc - Structure holding client type properties, including static - * properties and client queue properties read from device-tree. - * - * @name: name of client type, used to parse properties from device-tree - * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. - * HW_FENCE_CLIENT_ID_CTL0 for DPU clients - * @max_clients_num: maximum number of clients of given client type - * @clients_num: number of clients of given client type - * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or - * two (for both Tx and Rx Queues) - * @queue_entries: number of entries per client queue of given client type - * @mem_size: size of memory allocated for client queue(s) per client - */ -struct hw_fence_client_type_desc { - char *name; - enum hw_fence_client_id init_id; - u32 max_clients_num; - u32 clients_num; - u32 queues_num; - u32 queue_entries; - u32 mem_size; -}; - /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. From f4afac60ba7a802d5221d05368d63ef922d03422 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 17 Oct 2022 13:11:26 -0700 Subject: [PATCH 44/77] mm-drivers: hw_fence: update txq to use separate software wr ptr Some hw fence driver clients require the ability to call the 'msm_hw_fence_update_txq' API to update the queue payload without updating the 'write_index' member within the hfi header. These clients also need to receive the index at which the payload is written within the queue. This change adds support for this requirement by adding a device-tree property to configure this behavior for each client. The 'tx_wm' member within the hfi header is used to track in software the place where the payloads are within the queue for clients that skip the update to the 'write_index' member. Change-Id: I2881fa49bef4e49691eb6049830f9dc8dc8fa425 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 6 +++ hw_fence/include/hw_fence_drv_utils.h | 16 +++++++ hw_fence/src/hw_fence_drv_priv.c | 18 +++++--- hw_fence/src/hw_fence_drv_utils.c | 60 +++++++++++++++++---------- hw_fence/src/msm_hw_fence.c | 2 + 5 files changed, 75 insertions(+), 27 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 2645fbc638..a59b48f2e5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -175,6 +175,8 @@ enum payload_type { * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -188,6 +190,7 @@ struct msm_hw_fence_client { int ipc_client_pid; bool update_rxq; bool send_ipc; + bool skip_txq_wr_idx; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; @@ -241,12 +244,15 @@ struct msm_hw_fence_dbg_data { * @mem_size: size of memory allocated for client queues * @start_offset: start offset of client queue memory region, from beginning of carved-out memory * allocation for hw fence driver + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead */ struct hw_fence_client_queue_size_desc { u32 queues_num; u32 queue_entries; u32 mem_size; u32 start_offset; + bool skip_txq_wr_idx; }; /** diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 6d9cd9627d..756f07b2bf 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -51,6 +51,8 @@ enum hw_fence_mem_reserve { * two (for both Tx and Rx Queues) * @queue_entries: number of entries per client queue of given client type * @mem_size: size of memory allocated for client queue(s) per client + * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence + * driver and hfi_header->tx_wm is updated instead */ struct hw_fence_client_type_desc { char *name; @@ -60,6 +62,7 @@ struct hw_fence_client_type_desc { u32 queues_num; u32 queue_entries; u32 mem_size; + bool skip_txq_wr_idx; }; /** @@ -170,4 +173,17 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id client_id); +/** + * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index + * is not updated in hw fence driver. Instead, + * hfi_header->tx_wm tracks where payload is written within + * the queue. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise + */ +bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id); + #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ce7546de69..21fe8822a0 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -248,6 +248,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, bool lock_client = false; u32 lock_idx; u64 timestamp; + u32 *wr_ptr; int ret = 0; if (queue_type >= @@ -269,6 +270,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, return -EINVAL; } + /* if skipping update txq wr_index, then use hfi_header->tx_wm instead */ + if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx) + wr_ptr = &hfi_header->tx_wm; + else + wr_ptr = &hfi_header->write_index; + /* * We need to lock the client if there is an Rx Queue update, since that * is the only time when HW Fence driver can have a race condition updating @@ -294,11 +301,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, /* Get read and write index */ read_idx = readl_relaxed(&hfi_header->read_index); - write_idx = readl_relaxed(&hfi_header->write_index); + write_idx = readl_relaxed(wr_ptr); - HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n", - hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, - read_idx, write_idx, queue, queue_type); + HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", + hw_fence_client->client_id, &hfi_header->read_index, wr_ptr, + read_idx, write_idx, queue, queue_type, + hw_fence_client->skip_txq_wr_idx ? "true" : "false"); /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -351,7 +359,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, wmb(); /* update the write index */ - writel_relaxed(to_write_idx, &hfi_header->write_index); + writel_relaxed(to_write_idx, wr_ptr); /* update memory for the index */ wmb(); diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index d530950785..2ae198fbfd 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -58,9 +58,9 @@ * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. * - * The fields name, init_id, and max_clients_num are constants. Default values for clients_num and - * queues_num are provided in this table, and clients_num, queues_num, and queue_entries can be read - * from device-tree. + * The fields name, init_id, and max_clients_num are constants. Default values for clients_num, + * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num, + * queue_entries, and skip_txq_wr_idx can be read from device-tree. * * If a value for queue entries is not parsed for the client type, then the default number of client * queue entries (parsed from device-tree) is used. @@ -68,29 +68,30 @@ * Notes: * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'. * 2. Each HW Fence client ID must be described by one of the client types in this table. - * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num. + * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and + * skip_txq_wr_idx. * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must * be incremented as appropriate for new client types. */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0}, + HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, }; static void _lock(uint64_t *wait) @@ -594,13 +595,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da struct hw_fence_client_type_desc *desc) { char name[31]; - u32 tmp[3]; + u32 tmp[4]; u32 queue_size; int ret; /* parse client queue property from device-tree */ snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 3); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4); if (ret) { HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name, ret); @@ -609,6 +610,12 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da desc->clients_num = tmp[0]; desc->queues_num = tmp[1]; desc->queue_entries = tmp[2]; + + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]); + return -EINVAL; + } + desc->skip_txq_wr_idx = tmp[3]; } if (desc->clients_num > desc->max_clients_num || !desc->queues_num || @@ -642,8 +649,9 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da return -EINVAL; } - HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu\n", desc->name, - desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size); + HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n", + desc->name, desc->clients_num, desc->queues_num, desc->queue_entries, + desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false"); return 0; } @@ -700,7 +708,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) drv_data->hw_fence_client_queue_size[client_id] = (struct hw_fence_client_queue_size_desc) {desc->queues_num, desc->queue_entries, desc->mem_size, - start_offset}; + start_offset, desc->skip_txq_wr_idx}; HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; @@ -919,3 +927,11 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver return client_id_priv; } + +bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num) + return false; + + return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx; +} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 30eaf25c70..e81a4dd457 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -93,6 +93,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, } hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); + hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, + client_id); /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, From e0bf897e1fcf09b79da3af6ed41a461fefa9f2f4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 24 Oct 2022 12:05:09 -0700 Subject: [PATCH 45/77] mm-drivers: hw_fence: update ipc protocol to fence for pineapple Starting pineapple, fence protocol is used for hw fence driver. Change-Id: I87435128c22aeb338dfcda38f0196e04dc9eb70b Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_ipc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 7e46e08f53..48317cafa8 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -361,7 +361,7 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 case HW_FENCE_IPCC_HW_REV_203: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */ + drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */ ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data, hw_fence_clients_ipc_map_v2); HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n"); From fbde79b1181e3634b3689bb74873e24237213290 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 16 Nov 2022 16:41:49 -0800 Subject: [PATCH 46/77] mm-drivers: hw-fence: extend ioctl support to create fence_array Current HW Fence validation framework limits the creation of a HW Fence- array to fences from a single parent client. This change adds support to hw_fence_create_array IOCTL so a HW fence-array can be created from fences of different clients. Change-Id: I6ce801f51747fcab503fc23c1ae981b107d4f315 Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 102 ++++++++++++---------------------- 1 file changed, 34 insertions(+), 68 deletions(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 02942dc3ea..379dbf971d 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -71,16 +71,16 @@ struct hw_fence_sync_create_data { /** * struct hw_fence_array_sync_create_data - data used in creating multiple fences. - * @seqno: array of sequence numbers used to create fences. - * @num_fences: number of fences to be created. - * @fence: return the fd of the new sync_file with the created fence. - * @hash: array of fence hash + * @seqno: sequence number used to create fence array. + * @num_fences: number of fence fds received. + * @fences: array of fence fds. + * @fence_array_fd: fd of fence array. */ struct hw_fence_array_sync_create_data { - u64 seqno[HW_FENCE_ARRAY_SIZE]; + u64 seqno; int num_fences; - __s32 fence; - u64 hash[HW_FENCE_ARRAY_SIZE]; + u64 fences[HW_FENCE_ARRAY_SIZE]; + __s32 fence_array_fd; }; /** @@ -343,16 +343,22 @@ static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long a return 0; } +static void _put_child_fences(int i, struct dma_fence **fences) +{ + int fence_idx; + + for (fence_idx = i; fence_idx >= 0 ; fence_idx--) + dma_fence_put(fences[i]); +} + static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg) { struct dma_fence_array *fence_array; struct hw_fence_array_sync_create_data data; struct dma_fence **fences = NULL; - struct msm_hw_fence_create_params params; struct sync_file *sync_file; - spinlock_t **fence_lock = NULL; int num_fences, i, fd, ret; - u64 hash; + struct hw_dma_fence *fence; if (!_is_valid_client(obj)) { return -EINVAL; @@ -370,80 +376,43 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l num_fences, HW_FENCE_ARRAY_SIZE); return -EINVAL; } - fence_lock = kcalloc(num_fences, sizeof(*fence_lock), GFP_KERNEL); - if (!fence_lock) - return -ENOMEM; fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); if (!fences) { - kfree(fence_lock); return -ENOMEM; } - /* - * Create the array of dma fences - * This API takes seqno[num_fences] as the seqno for the fence-array - * and from 0 to (num_fences - 1) for the fences in the array. - */ for (i = 0; i < num_fences; i++) { - struct hw_dma_fence *dma_fence; - - fence_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!fence_lock[i]) { - _cleanup_fences(i, fences, fence_lock); - return -ENOMEM; + fd = data.fences[i]; + if (fd <= 0) { + kfree(fences); + return -EINVAL; } - - dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL); - if (!dma_fence) { - _cleanup_fences(i, fences, fence_lock); - return -ENOMEM; + fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); + if (!fence) { + _put_child_fences(i-1, fences); + kfree(fences); + return -EINVAL; } - fences[i] = &dma_fence->base; - - spin_lock_init(fence_lock[i]); - dma_fence_init(fences[i], &hw_fence_dbg_ops, fence_lock[i], - obj->context, data.seqno[i]); + fences[i] = &fence->base; } /* create the fence array from array of dma fences */ - fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno[i], 0); + fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0); if (!fence_array) { HWFNC_ERR("Error creating fence_array\n"); - _cleanup_fences(num_fences - 1, fences, fence_lock); + /* decrease the refcount incremented for each child fences */ + for (i = 0; i < num_fences; i++) + dma_fence_put(fences[i]); + kfree(fences); return -EINVAL; } - /* create hw fences */ - for (i = 0; i < num_fences; i++) { - params.fence = fences[i]; - params.handle = &hash; - - ret = msm_hw_fence_create(obj->client_handle, ¶ms); - if (ret) { - HWFNC_ERR("Error creating HW fence\n"); - dma_fence_put(&fence_array->base); - /* - * free array of pointers, no need to call kfree in 'fences', - * since that is released from the fence-array release api - */ - kfree(fence_lock); - kfree(fence_array); - return -EINVAL; - } - - /* keep handle in dma_fence, to destroy hw-fence during release */ - to_hw_dma_fence(fences[i])->client_handle = obj->client_handle; - data.hash[i] = hash; - } - /* create fd */ fd = get_unused_fd_flags(0); - if (fd < 0) { + if (fd <= 0) { HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id); dma_fence_put(&fence_array->base); - kfree(fence_lock); - kfree(fence_array); return fd; } @@ -451,7 +420,6 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l if (sync_file == NULL) { HWFNC_ERR("couldn't create fence fd, %d\n", fd); dma_fence_put(&fence_array->base); - kfree(fence_lock); kfree(fence_array); ret = -EINVAL; goto exit; @@ -460,12 +428,10 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l /* Decrement the refcount that sync_file_create increments */ dma_fence_put(&fence_array->base); - data.fence = fd; + data.fence_array_fd = fd; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { fput(sync_file->file); dma_fence_put(&fence_array->base); - kfree(fence_lock); - kfree(fence_array); ret = -EFAULT; goto exit; } @@ -492,7 +458,7 @@ static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; - fd = data.fence; + fd = data.fence_array_fd; fence = (struct dma_fence *)_hw_sync_get_fence(fd); if (!fence) { HWFNC_ERR("Invalid fence fd: %d\n", fd); From bb0f9e965f406d00229703a1b93dc150f3280843 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 30 Nov 2022 09:06:24 -0800 Subject: [PATCH 47/77] mm-drivers: hw_fence: share hw fence driver mem pool always When hw fencing is disabled via kernel command line argument, allow probing of hw fence driver and perform memory sharing during probe. This ensures that the carved out memory region for hw fences is always shared with hypervisor regardless of hw-fencing feature enablement. Change-Id: I7723fd61860e0d6b8dc374a054c8519d98d700a6 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e81a4dd457..dcbe4cd80c 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -500,15 +500,26 @@ static int msm_hw_fence_probe_init(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, hw_fence_drv_data); hw_fence_drv_data->dev = &pdev->dev; - /* Initialize HW Fence Driver resources */ - rc = hw_fence_init(hw_fence_drv_data); - if (rc) - goto error; + if (hw_fence_driver_enable) { + /* Initialize HW Fence Driver resources */ + rc = hw_fence_init(hw_fence_drv_data); + if (rc) + goto error; - mutex_init(&hw_fence_drv_data->clients_register_lock); + mutex_init(&hw_fence_drv_data->clients_register_lock); - /* set ready ealue so clients can register */ - hw_fence_drv_data->resources_ready = true; + /* set ready value so clients can register */ + hw_fence_drv_data->resources_ready = true; + } else { + /* Allocate hw fence driver mem pool and share it with HYP */ + rc = hw_fence_utils_alloc_mem(hw_fence_drv_data); + if (rc) { + HWFNC_ERR("failed to alloc base memory\n"); + goto error; + } + + HWFNC_DBG_INFO("hw fence driver not enabled\n"); + } HWFNC_DBG_H("-\n"); @@ -534,11 +545,6 @@ static int msm_hw_fence_probe(struct platform_device *pdev) return -EINVAL; } - if (!hw_fence_driver_enable) { - HWFNC_DBG_INFO("hw fence driver not enabled\n"); - return -EOPNOTSUPP; - } - if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence")) rc = msm_hw_fence_probe_init(pdev); if (rc) From 13b4e1270c82e1b6c002782a9084a649298055c2 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 14 Nov 2022 11:50:04 -0800 Subject: [PATCH 48/77] mm-drivers: hw_fence: reset queues during client reset This change make sure that the write_idx and read_idx of the client hfi queues are reset during the call to msm_hw_fence_reset_client. Change-Id: Iaf94865ddf78ed8e19de509e3ee6176d03c5301c Signed-off-by: Ingrid Gallardo --- hw_fence/include/hw_fence_drv_priv.h | 2 ++ hw_fence/src/hw_fence_drv_priv.c | 43 ++++++++++++++++++++++++++++ hw_fence/src/msm_hw_fence.c | 4 ++- 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index a59b48f2e5..139d9288da 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -473,6 +473,8 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client); void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client); +void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client); int hw_fence_create(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 21fe8822a0..ed7ce14bda 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1430,6 +1430,49 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, } } +void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + u32 rd_idx, wr_idx, lock_idx; + + queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; + hfi_header = queue->va_header; + + /* For the client TxQ: set the read-index same as last write that was done by the client */ + mb(); /* make sure data is ready before read */ + wr_idx = readl_relaxed(&hfi_header->write_index); + writel_relaxed(wr_idx, &hfi_header->read_index); + wmb(); /* make sure data is updated after write the index*/ + + /* For the client RxQ: set the write-index same as last read done by the client */ + if (hw_fence_client->update_rxq) { + lock_idx = hw_fence_client->client_id - 1; + + if (lock_idx >= drv_data->client_lock_tbl_cnt) { + HWFNC_ERR("cannot reset rxq, lock for client id:%d exceed max:%d\n", + hw_fence_client->client_id, drv_data->client_lock_tbl_cnt); + return; + } + HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx); + + /* lock the client rx queue to update */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); + + queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1]; + hfi_header = queue->va_header; + + mb(); /* make sure data is ready before read */ + rd_idx = readl_relaxed(&hfi_header->read_index); + writel_relaxed(rd_idx, &hfi_header->write_index); + wmb(); /* make sure data is updated after write the index */ + + /* unlock */ + GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); + } +} + int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index e81a4dd457..6e1bde5d53 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -389,11 +389,13 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags) hw_fence_client = (struct msm_hw_fence_client *)client_handle; hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl; - HWFNC_DBG_L("reset fences for client:%d\n", hw_fence_client->client_id); + HWFNC_DBG_L("reset fences and queues for client:%d\n", hw_fence_client->client_id); for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++) hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client, &hw_fences_tbl[i], i, reset_flags); + hw_fence_utils_reset_queues(hw_fence_drv_data, hw_fence_client); + return 0; } EXPORT_SYMBOL(msm_hw_fence_reset_client); From 4f59f5ce0294f07672dba504c244a102065a8a72 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 10 Jan 2023 15:10:31 -0800 Subject: [PATCH 49/77] mm-drivers: hw_fence: switch to qcom_scm_assign_mem from hyp_assign_phys Switch to upstream friendly qcom_scm_assign_mem from hyp_assign_phys. Change-Id: I01c6b93698fea094cf89926f3168466ba14061bc Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 2ae198fbfd..5ce438a921 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "hw_fence_drv_priv.h" @@ -339,18 +340,19 @@ int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data) static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, gh_vmid_t self, gh_vmid_t peer) { - u32 src_vmlist[1] = {self}; - int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC}; - int dst_vmlist[2] = {self, peer}; - int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE}; + struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}}; + struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE}, + {peer, PERM_READ | PERM_WRITE}}; + int srcvmids = BIT(src_vmlist[0].vmid); + int dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); struct gh_acl_desc *acl; struct gh_sgl_desc *sgl; int ret; - ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), - src_vmlist, 1, dst_vmlist, dst_perms, 2); + ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids, + dst_vmlist, ARRAY_SIZE(dst_vmlist)); if (ret) { - HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n", + HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=%x size=%u err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); return ret; } @@ -379,9 +381,8 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); /* Attempt to give resource back to HLOS */ - hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res), - dst_vmlist, 2, - src_vmlist, src_perms, 1); + qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), + &dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist)); ret = -EPROBE_DEFER; } From 1db686776229c73c2fe7158e4b5643c268b24a9d Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 5 Jan 2023 16:05:37 -0800 Subject: [PATCH 50/77] mm-drivers: hw_fence: change memory mapping of hwfence shared memory Currently, carved-out memory region is mapped as IO. Change mapping to normal memory. Change-Id: I1eca1067e30e2a6e39969c003dcce9ea0f9c47fd Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 2ae198fbfd..c2d598ad27 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -471,7 +471,7 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data) return -EINVAL; } - drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start, + drv_data->io_mem_base = devm_ioremap_wc(dev, drv_data->res.start, resource_size(&drv_data->res)); if (!drv_data->io_mem_base) { HWFNC_ERR("ioremap failed!\n"); From 0219a76630381851656354bdf9640eb0b8f29829 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Tue, 24 Jan 2023 11:02:46 -0800 Subject: [PATCH 51/77] mm-drivers: hw_fence: silently fail registration when feature disabled Current hw-fencing feature is disabled by default through kernel command line argument, therefore it is expected that clients receive an error when trying to register a client while feature is disabled. This change silence any print error messages during the clients registration when feature is disabled. Change-Id: Ie57adb52a975f9541e485039a582407cf21c11cd Signed-off-by: Ingrid Gallardo --- hw_fence/src/msm_hw_fence.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index dcbe4cd80c..424c84662b 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -24,6 +24,9 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, enum hw_fence_client_id client_id; int ret; + if (!hw_fence_driver_enable) + return ERR_PTR(-ENODEV); + HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext); if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { From e443a11c833a826142d45035fd4bb4656fbfc45a Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 9 Feb 2023 11:46:49 -0800 Subject: [PATCH 52/77] mm-drivers: hw_fence: fix ioctl support for ipcc signaling Currently, the ioctl to trigger ipcc signals uses the client virtual id as the tx client and the client physical id as the rx client. This should be reversed to correctly perform ipcc signaling. Change-Id: I61e7ec0e4bfd63f2d7e1cd1dd4e62dd4f6a82143 Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 379dbf971d..7c5b141faf 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -542,8 +542,8 @@ static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long ar if (signal_id < 0) return -EINVAL; - tx_client = hw_fence_client->ipc_client_vid; - rx_client = hw_fence_client->ipc_client_pid; + tx_client = hw_fence_client->ipc_client_pid; + rx_client = hw_fence_client->ipc_client_vid; ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id); if (ret) { HWFNC_ERR("hw fence trigger signal has failed\n"); From 90268c94ab312403ba64a443df9040c525290954 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 10 Feb 2023 12:16:09 -0800 Subject: [PATCH 53/77] mm-driver: hw_fence: resolve compilation failure with data types Latest update to qcom_scm_assign_mem API changed input data types. Change data types in HW Fence Driver to ensure compatibility with newest API. Change-Id: Ia25bb9e129cf67ec99e18c60407ac997cf0d6e3f Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 33ab483f6b..fa407134fb 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "hw_fence_drv_priv.h" @@ -343,12 +344,17 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}}; struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE}, {peer, PERM_READ | PERM_WRITE}}; - int srcvmids = BIT(src_vmlist[0].vmid); - int dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + u64 srcvmids, dstvmids; +#else + unsigned int srcvmids, dstvmids; +#endif struct gh_acl_desc *acl; struct gh_sgl_desc *sgl; int ret; + srcvmids = BIT(src_vmlist[0].vmid); + dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid); ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids, dst_vmlist, ARRAY_SIZE(dst_vmlist)); if (ret) { From 4e5524c85fdf61b768b54646ac161b84074e5b44 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 9 Jan 2023 15:22:04 -0800 Subject: [PATCH 54/77] mm-drivers: hw_fence: trigger signal for validation signaled fences When validation clients register to wait on already signaled fences, the hw fence driver must signal the client wait and wake up waiting validation clients. Change-Id: I3e0f7abfbb055d8e5fbb5afd5fc8b88991c95aee Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 4 +-- hw_fence/src/hw_fence_drv_ipc.c | 44 ++++++++++++++-------------- hw_fence/src/hw_fence_drv_priv.c | 8 ++++- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 139d9288da..9ed047208b 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_INTERNAL_H @@ -108,7 +108,7 @@ enum hw_fence_loopback_id { HW_FENCE_LOOPBACK_DPU_CTL_5, HW_FENCE_LOOPBACK_GFX_CTX_0, #if IS_ENABLED(CONFIG_DEBUG_FS) - HW_FENCE_LOOPBACK_VAL_0, + HW_FENCE_LOOPBACK_VAL_0 = HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_LOOPBACK_VAL_1, HW_FENCE_LOOPBACK_VAL_2, HW_FENCE_LOOPBACK_VAL_3, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index 48317cafa8..a2289fc8ee 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -53,13 +53,13 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/ {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ #endif /* CONFIG_DEBUG_FS */ }; @@ -81,13 +81,13 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true},/* val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true},/* val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true},/* val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true},/* val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ #else {0, 0, 0, false, false}, /* val0 */ {0, 0, 0, false, false}, /* val1 */ @@ -122,13 +122,13 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */ {HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */ #if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true},/* val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true},/* val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true},/* val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true},/* val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/ + {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/ #else {0, 0, 0, false, false}, /* val0 */ {0, 0, 0, false, false}, /* val1 */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ed7ce14bda..f69fd408e6 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1123,6 +1123,12 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, if (hw_fence_client->send_ipc) hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, hw_fence_client->ipc_signal_id); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 + && hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6) + process_validation_client_loopback(drv_data, hw_fence_client->client_id); +#endif /* CONFIG_DEBUG_FS */ } static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data, From 5eec9ba76cc7e41de27d40e3422f5a6184e1c9d6 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 3 Mar 2023 15:38:06 -0800 Subject: [PATCH 55/77] mm-drivers: hw_fence: add header file for translation to synx api Add header file for synx translation layer in hwfence driver. Change-Id: Ie0ec426292cda180159d8572a3ace474804d3af5 Signed-off-by: Grace An --- .../include/msm_hw_fence_synx_translation.h | 220 ++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 hw_fence/include/msm_hw_fence_synx_translation.h diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h new file mode 100644 index 0000000000..1235d7639e --- /dev/null +++ b/hw_fence/include/msm_hw_fence_synx_translation.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __MSM_HW_FENCE_SYNX_TRANSLATION_H +#define __MSM_HW_FENCE_SYNX_TRANSLATION_H + +#include + +#define SYNX_HW_FENCE_CLIENT_START 1024 +#define SYNX_HW_FENCE_CLIENT_END 4096 +#define SYNX_MAX_SIGNAL_PER_CLIENT 64 + +extern bool hw_fence_driver_enable; + +/** + * enum synx_client_id : Unique identifier of the supported clients + * + * @SYNX_CLIENT_HW_FENCE_GFX_CTX0 : HW Fence GFX Client 0 + * @SYNX_CLIENT_HW_FENCE_IPE_CTX0 : HW Fence IPE Client 0 + * @SYNX_CLIENT_HW_FENCE_VID_CTX0 : HW Fence Video Client 0 + * @SYNX_CLIENT_HW_FENCE_DPU0_CTL0 : HW Fence DPU0 Client 0 + * @SYNX_CLIENT_HW_FENCE_DPU1_CTL0 : HW Fence DPU1 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE0_CTX0 : HW Fence IFE0 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE1_CTX0 : HW Fence IFE1 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE2_CTX0 : HW Fence IFE2 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE3_CTX0 : HW Fence IFE3 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE4_CTX0 : HW Fence IFE4 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE5_CTX0 : HW Fence IFE5 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE6_CTX0 : HW Fence IFE6 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE7_CTX0 : HW Fence IFE7 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE8_CTX0 : HW Fence IFE8 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE9_CTX0 : HW Fence IFE9 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE10_CTX0 : HW Fence IFE10 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE11_CTX0 : HW Fence IFE11 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE12_CTX0 : HW Fence IFE12 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE13_CTX0 : HW Fence IFE13 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE14_CTX0 : HW Fence IFE14 Client 0 + * @SYNX_CLIENT_HW_FENCE_IFE15_CTX0 : HW Fence IFE15 Client 0 + */ +enum synx_hwfence_client_id { + SYNX_CLIENT_HW_FENCE_GFX_CTX0 = SYNX_HW_FENCE_CLIENT_START, + SYNX_CLIENT_HW_FENCE_IPE_CTX0 = SYNX_CLIENT_HW_FENCE_GFX_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_VID_CTX0 = SYNX_CLIENT_HW_FENCE_IPE_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_DPU0_CTL0 = SYNX_CLIENT_HW_FENCE_VID_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_DPU1_CTL0 = SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE0_CTX0 = SYNX_CLIENT_HW_FENCE_DPU1_CTL0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE1_CTX0 = SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE2_CTX0 = SYNX_CLIENT_HW_FENCE_IFE1_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE3_CTX0 = SYNX_CLIENT_HW_FENCE_IFE2_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE4_CTX0 = SYNX_CLIENT_HW_FENCE_IFE3_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE5_CTX0 = SYNX_CLIENT_HW_FENCE_IFE4_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE6_CTX0 = SYNX_CLIENT_HW_FENCE_IFE5_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE7_CTX0 = SYNX_CLIENT_HW_FENCE_IFE6_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE8_CTX0 = SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE9_CTX0 = SYNX_CLIENT_HW_FENCE_IFE8_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE10_CTX0 = SYNX_CLIENT_HW_FENCE_IFE9_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE11_CTX0 = SYNX_CLIENT_HW_FENCE_IFE10_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE12_CTX0 = SYNX_CLIENT_HW_FENCE_IFE11_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE13_CTX0 = SYNX_CLIENT_HW_FENCE_IFE12_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE14_CTX0 = SYNX_CLIENT_HW_FENCE_IFE13_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_IFE15_CTX0 = SYNX_CLIENT_HW_FENCE_IFE14_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT, + SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END, +}; + +#if IS_ENABLED(CONFIG_QTI_HW_FENCE) +/** + * synx_hwfence_initialize - Initializes a new client session + * + * @param params : Pointer to session init params + * + * @return Client session pointer on success. NULL or error in case of failure. + */ +struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params); + +/** + * synx_hwfence_uninitialize - Destroys the client session + * + * @param session : Session ptr (returned from synx_initialize) + * + * @return Status of operation. SYNX_SUCCESS in case of success. + */ +int synx_hwfence_uninitialize(struct synx_session *session); + +/** + * synx_hwfence_create - Creates a synx object + * + * Creates a new synx obj and returns the handle to client. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Pointer to create params + * + * @return Status of operation. SYNX_SUCCESS in case of success. + * -SYNX_INVALID will be returned if params were invalid. + * -SYNX_NOMEM will be returned if the kernel can't allocate space for + * synx object. + */ +int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params); + +/** + * synx_hwfence_release - Release the synx object + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle to be destroyed + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_hwfence_release(struct synx_session *session, u32 h_synx); + +/** + * synx_hwfence_signal - Signals a synx object with the status argument. + * + * This function will signal the synx object referenced by h_synx + * and invoke any external binding synx objs. + * The status parameter will indicate whether the entity + * performing the signaling wants to convey an error case or a success case. + * + * @param session : Session ptr (returned from synx_initialize) + * @param h_synx : Synx object handle + * @param status : Status of signaling. + * Clients can send custom signaling status + * beyond SYNX_STATE_SIGNALED_MAX. + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status); + +/** + * synx_hwfence_recover - Recover any possible handle leaks + * + * Function should be called on HW hang/reset to + * recover the Synx handles shared. This cleans up + * Synx handles held by the rest HW, and avoids + * potential resource leaks. + * + * Function does not destroy the session, but only + * recover synx handles belonging to the session. + * Synx session would still be active and clients + * need to destroy the session explicitly through + * synx_uninitialize API. + * + * @param id : Client ID of core to recover + * + * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise. + */ +int synx_hwfence_recover(enum synx_client_id id); + +/** + * synx_hwfence_import - Imports (looks up) synx object from given handle/fence + * + * Import subscribes the client session for notification on signal + * of handles/fences. + * + * @param session : Session ptr (returned from synx_initialize) + * @param params : Pointer to import params + * + * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state + */ +int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params); + +#else /* CONFIG_QTI_HW_FENCE */ +static inline struct synx_session *synx_hwfence_initialize( + struct synx_initialization_params *params) +{ + return ERR_PTR(-SYNX_INVALID); +} + +static inline int synx_hwfence_uninitialize(struct synx_session *session) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_create(struct synx_session *session, + struct synx_create_params *params) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_release(struct synx_session *session, u32 h_synx) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_signal(struct synx_session *session, u32 h_synx, + enum synx_signal_status status) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_recover(enum synx_client_id id) +{ + return -SYNX_INVALID; +} + +static inline int synx_hwfence_import(struct synx_session *session, + struct synx_import_params *params) +{ + return -SYNX_INVALID; +} + +#endif /* CONFIG_QTI_HW_FENCE */ +#endif /* __MSM_HW_FENCE_SYNX_TRANSLATION_H */ From 41b11c1d09b93c0b8e4687dd44a99468552f2489 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 8 Dec 2022 15:40:49 -0800 Subject: [PATCH 56/77] mm-drivers: hw_fence: add implementation of translation to synx API Add implementation to translate the msm_hw_fence API into synx API. Change-Id: I5d0b7afcc297a4e3c8ec4ed9867831b5d2dfc3af Signed-off-by: Grace An --- hw_fence/Kbuild | 7 +- hw_fence/src/msm_hw_fence.c | 2 +- hw_fence/src/msm_hw_fence_synx_translation.c | 332 +++++++++++++++++++ 3 files changed, 338 insertions(+), 3 deletions(-) create mode 100644 hw_fence/src/msm_hw_fence_synx_translation.c diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 2cf74d291b..55334e8b65 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -3,7 +3,9 @@ KDIR := $(TOP)/kernel_platform/msm-kernel include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ - -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ \ + -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ + -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ ifdef CONFIG_QTI_HW_FENCE obj-m += msm_hw_fence.o @@ -12,7 +14,8 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_priv.o \ src/hw_fence_drv_utils.o \ src/hw_fence_drv_debug.o \ - src/hw_fence_drv_ipc.o + src/hw_fence_drv_ipc.o \ + src/msm_hw_fence_synx_translation.o msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index c5531727f9..9904d934e6 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -15,7 +15,7 @@ #include "hw_fence_drv_ipc.h" struct hw_fence_driver_data *hw_fence_drv_data; -static bool hw_fence_driver_enable; +bool hw_fence_driver_enable; void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, struct msm_hw_fence_mem_addr *mem_descriptor) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c new file mode 100644 index 0000000000..f35bfcd488 --- /dev/null +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include "msm_hw_fence_synx_translation.h" +#include "hw_fence_drv_priv.h" +#include "hw_fence_drv_debug.h" + +/** + * MAX_SUPPORTED_DPU0: + * Maximum number of dpu clients supported + */ +#define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0) + +static int to_synx_status(int hw_fence_status_code) +{ + int synx_status_code; + + switch (hw_fence_status_code) { + case 0: + synx_status_code = SYNX_SUCCESS; + break; + case -ENOMEM: + synx_status_code = -SYNX_NOMEM; + break; + case -EPERM: + synx_status_code = -SYNX_NOPERM; + break; + case -ETIMEDOUT: + synx_status_code = -SYNX_TIMEOUT; + break; + case -EALREADY: + synx_status_code = -SYNX_ALREADY; + break; + case -ENOENT: + synx_status_code = -SYNX_NOENT; + break; + case -EINVAL: + synx_status_code = -SYNX_INVALID; + break; + case -EBUSY: + synx_status_code = -SYNX_BUSY; + break; + default: + synx_status_code = hw_fence_status_code; + break; + } + + return synx_status_code; +} + +static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id) +{ + enum hw_fence_client_id hw_fence_client_id; + + switch ((int)synx_client_id) { + case SYNX_CLIENT_HW_FENCE_GFX_CTX0: + hw_fence_client_id = HW_FENCE_CLIENT_ID_CTX0; + break; + case SYNX_CLIENT_HW_FENCE_IPE_CTX0 ... SYNX_CLIENT_HW_FENCE_IPE_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPE_CTX0 + + HW_FENCE_CLIENT_ID_IPE; + break; + case SYNX_CLIENT_HW_FENCE_VID_CTX0 ... SYNX_CLIENT_HW_FENCE_VID_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_VID_CTX0 + + HW_FENCE_CLIENT_ID_VPU; + break; + case SYNX_CLIENT_HW_FENCE_DPU0_CTL0 ... SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + MAX_SUPPORTED_DPU0: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + + HW_FENCE_CLIENT_ID_CTL0; + break; + case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE7_CTX0 + + SYNX_MAX_SIGNAL_PER_CLIENT - 1: + hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 + + HW_FENCE_CLIENT_ID_IFE0; + break; + default: + HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id); + hw_fence_client_id = HW_FENCE_CLIENT_MAX; + break; + } + + return hw_fence_client_id; +} + +static bool is_hw_fence_client(enum synx_client_id synx_client_id) +{ + return synx_client_id >= SYNX_HW_FENCE_CLIENT_START + && synx_client_id < SYNX_HW_FENCE_CLIENT_END; +} + +struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params) +{ + struct synx_session *session = NULL; + enum hw_fence_client_id client_id; + void *client_handle; + + if (!hw_fence_driver_enable) + return ERR_PTR(-SYNX_INVALID); + + if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->ptr)) { + HWFNC_ERR("invalid params:0x%pK params->ptr:0x%pK\n", params, + IS_ERR_OR_NULL(params) ? NULL : params->ptr); + return ERR_PTR(-SYNX_INVALID); + } + + client_id = _get_hw_fence_client_id(params->id); + if (!is_hw_fence_client(params->id) || client_id == HW_FENCE_CLIENT_MAX) { + HWFNC_ERR("Initializing session for invalid synx_id:%d\n", params->id); + return ERR_PTR(-SYNX_INVALID); + } + + session = kzalloc(sizeof(struct synx_session), GFP_KERNEL); + if (!session) + return ERR_PTR(-SYNX_NOMEM); + + client_handle = msm_hw_fence_register(client_id, + (struct msm_hw_fence_mem_addr *)params->ptr); + if (IS_ERR_OR_NULL(client_handle)) { + kfree(session); + HWFNC_ERR("failed to initialize synx_id:%d ret:%d\n", params->id, + PTR_ERR(client_handle)); + return ERR_PTR(to_synx_status(PTR_ERR(client_handle))); + } + session->client = client_handle; + session->type = params->id; + HWFNC_DBG_INIT("initialized session synx_id:%d hw_fence_id:%d\n", params->id, client_id); + + return session; +} +EXPORT_SYMBOL(synx_hwfence_initialize); + +int synx_hwfence_uninitialize(struct synx_session *session) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_deregister(session->client); + if (ret) + HWFNC_ERR("Failed to deregister synx_id:%d ret:%d\n", session->type, ret); + else + kfree(session); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_uninitialize); + +int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params) +{ + int ret = 0; + struct msm_hw_fence_create_params hwfence_params; + u64 handle; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || + IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, params); + return -SYNX_INVALID; + } + + if (IS_ERR_OR_NULL(params->h_synx) || (params->flags != SYNX_CREATE_DMA_FENCE) || + IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n", + session->type, params->h_synx, params->flags, params->fence); + return -SYNX_INVALID; + } + + hwfence_params.fence = params->fence; + hwfence_params.handle = &handle; + ret = msm_hw_fence_create(session->client, &hwfence_params); + if (ret) { + HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type, + params->fence, params->flags, ret); + return to_synx_status(ret); + } + if (handle > U32_MAX) { + HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type, + handle); + msm_hw_fence_destroy_with_handle(session->client, handle); + return -SYNX_INVALID; + } + *params->h_synx = handle; + + return SYNX_SUCCESS; +} +EXPORT_SYMBOL(synx_hwfence_create); + +int synx_hwfence_release(struct synx_session *session, u32 h_synx) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_destroy_with_handle(session->client, h_synx); + if (ret) + HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type, + h_synx, ret); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_release); + +int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_update_txq(session->client, h_synx, 0, (u32)status); + if (ret) + HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n", + session->type, h_synx, status, ret); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_signal); + +int synx_hwfence_recover(enum synx_client_id id) +{ + int ret; + + if (!is_hw_fence_client(id)) { + HWFNC_ERR("invalid synx_id:%d\n", id); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_reset_client_by_id(_get_hw_fence_client_id(id), + MSM_HW_FENCE_RESET_WITHOUT_DESTROY); + if (ret) + HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret); + + return to_synx_status(ret); +} +EXPORT_SYMBOL(synx_hwfence_recover); + +static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params) +{ + u64 handle; + int ret; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || + IS_ERR_OR_NULL(params->new_h_synx) || + (params->flags != SYNX_IMPORT_DMA_FENCE) || IS_ERR_OR_NULL(params->fence)) { + HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", + client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, + IS_ERR_OR_NULL(params) ? 0 : params->flags, + IS_ERR_OR_NULL(params) ? NULL : params->fence); + return -SYNX_INVALID; + } + + ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, + NULL, 1, true); + if (ret) { + HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence, + params->flags, ret); + return to_synx_status(ret); + } + if (handle > U32_MAX) { + HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle); + msm_hw_fence_wait_update_v2(client, (struct dma_fence **)¶ms->fence, &handle, + NULL, 1, false); + return -SYNX_INVALID; + } + *params->new_h_synx = handle; + + return SYNX_SUCCESS; +} + +static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params) +{ + int i, ret; + + if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || !params->num_fences) { + HWFNC_ERR("invalid import arr client:0x%pK params:0x%pK num_fences:%u\n", client, + params, IS_ERR_OR_NULL(params) ? -1 : params->num_fences); + return -SYNX_INVALID; + } + + for (i = 0; i < params->num_fences; i++) { + ret = synx_hwfence_import_indv(client, ¶ms->list[i]); + if (ret) { + HWFNC_ERR("importing fence[%u] 0x%pK failed ret:%d\n", i, + params->list[i].fence, ret); + return ret; + } + } + + return SYNX_SUCCESS; +} + +int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params) +{ + int ret; + + if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) + || IS_ERR_OR_NULL(params)) { + HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session, + IS_ERR_OR_NULL(session) ? -1 : session->type, params); + return -SYNX_INVALID; + } + + if (params->type == SYNX_IMPORT_ARR_PARAMS) + ret = synx_hwfence_import_arr(session->client, ¶ms->arr); + else + ret = synx_hwfence_import_indv(session->client, ¶ms->indv); + + if (ret) + HWFNC_ERR("synx_id:%d failed to import type:%s fences ret:%d\n", session->type, + (params->type == SYNX_IMPORT_ARR_PARAMS) ? "arr" : "indv", ret); + + return ret; +} +EXPORT_SYMBOL(synx_hwfence_import); From 88f51cfe0423457f728049cd7c09c939b9e5734c Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 Feb 2023 10:26:37 -0800 Subject: [PATCH 57/77] mm-drivers: hw_fence: add support for multiple ipe and vpu clients Add support for signal-based reservation of hw fence client ids for ipe and vpu clients. Change-Id: I4e4a835424756c6e5fa8d5c2d340dfadc4d11541 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 10 ++++++---- hw_fence/include/hw_fence_drv_utils.h | 12 ++++++------ hw_fence/src/hw_fence_drv_priv.c | 28 ++++++++++++++------------- hw_fence/src/hw_fence_drv_utils.c | 10 ++-------- hw_fence/src/msm_hw_fence.c | 6 +++--- 5 files changed, 32 insertions(+), 34 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 9ed047208b..d17643c4d6 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -124,10 +124,12 @@ enum hw_fence_loopback_id { /** * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional * parameter passed from the waiting client and returned - * to it upon fence signaling - * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client. - * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client. - * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client. + * to it upon fence signaling. Only the first HW Fence + * Client for non-VAL clients (e.g. GFX, IPE, VPU) have + * client_data. + * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0. + * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client 0. + * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client 0. * @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0. * @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1. * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 756f07b2bf..ae711d8869 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_UTILS_H @@ -8,21 +8,21 @@ /** * HW_FENCE_MAX_CLIENT_TYPE_STATIC: - * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU) + * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL) */ -#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5 +#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 3 /** * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE: - * Maximum number of client types with configurable number of sub-clients (e.g. IFE) + * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE) */ -#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8 +#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 10 /** * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients */ -#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 +#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IPE /** * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory. diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f69fd408e6..833bc1b077 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -568,10 +568,10 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, * * NOTE: For each Client HW-Core, the client drivers might be the ones making * it's own initialization (in case that any hw-sequence must be enforced), - * however, if that is not the case, any per-client ipcc init to enable the + * however, if that is not the case, any per-client ipcc init to enable the * signaling, can go here. */ - switch ((int)hw_fence_client->client_id) { + switch ((int)hw_fence_client->client_id_ext) { case HW_FENCE_CLIENT_ID_CTX0: /* nothing to initialize for gpu client */ break; @@ -594,8 +594,8 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_CTL5: #ifdef HW_DPU_IPCC /* initialize ipcc signals for dpu clients */ - HWFNC_DBG_H("init_controller_signal: DPU client:%d initialized:%d\n", - hw_fence_client->client_id, drv_data->ipcc_dpu_initialized); + HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n", + hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized); if (!drv_data->ipcc_dpu_initialized) { drv_data->ipcc_dpu_initialized = true; @@ -604,10 +604,12 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, } #endif /* HW_DPU_IPCC */ break; - case HW_FENCE_CLIENT_ID_IPE: + case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for IPE client */ break; - case HW_FENCE_CLIENT_ID_VPU: + case HW_FENCE_CLIENT_ID_VPU ... HW_FENCE_CLIENT_ID_VPU + + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: /* nothing to initialize for VPU client */ break; case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 + @@ -615,7 +617,7 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, /* nothing to initialize for IFE clients */ break; default: - HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id); + HWFNC_ERR("Unexpected client_id_ext:%d\n", hw_fence_client->client_id_ext); ret = -EINVAL; break; } @@ -1207,10 +1209,10 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, enum hw_fence_client_data_id data_id; if (client_data) { - data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { - HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", - client_data, hw_fence_client->client_id); + HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n", + client_data, hw_fence_client->client_id_ext); return -EINVAL; } } @@ -1349,9 +1351,9 @@ int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, enum hw_fence_client_data_id data_id; if (client_data) { - data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { - HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n", + HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n", client_data, hw_fence_client->client_id); return -EINVAL; } @@ -1424,7 +1426,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; - data_id = hw_fence_get_client_data_id(wait_client_id); + data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext); if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) client_data = hw_fence->client_data[data_id]; diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index fa407134fb..162e962dbc 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -30,12 +30,6 @@ #define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \ HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE) -/** - * HW_FENCE_MAX_STATIC_CLIENTS_INDEX: - * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients - */ -#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0 - /** * HW_FENCE_MIN_RXQ_CLIENTS: * Minimum number of static hw fence clients with rxq @@ -52,8 +46,8 @@ #define HW_FENCE_CLIENT_TYPE_MAX_GPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_DPU 6 #define HW_FENCE_CLIENT_TYPE_MAX_VAL 7 -#define HW_FENCE_CLIENT_TYPE_MAX_IPE 1 -#define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 +#define HW_FENCE_CLIENT_TYPE_MAX_IPE 32 +#define HW_FENCE_CLIENT_TYPE_MAX_VPU 32 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 /** diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index c5531727f9..f1d1b3be2f 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -313,10 +313,10 @@ int msm_hw_fence_wait_update_v2(void *client_handle, } hw_fence_client = (struct msm_hw_fence_client *)client_handle; - data_id = hw_fence_get_client_data_id(hw_fence_client->client_id); + data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext); if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) { - HWFNC_ERR("Populating non-NULL client_data_list with unsupported client id:%d\n", - hw_fence_client->client_id); + HWFNC_ERR("Populating non-NULL client_data_list with invalid client_id_ext:%d\n", + hw_fence_client->client_id_ext); return -EINVAL; } From cf8ab93a12abcefefc94190b1c9ca9c990e6fd22 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 28 Feb 2023 17:22:07 -0800 Subject: [PATCH 58/77] mm-drivers: hw_fence: fix edge cases for hwfence ioctls Clear doorbell mask for val client loopbacks. Create up to full number of possible fences for create_fence_array. Wait full amount of time for fence in ioctl. Change-Id: Ic0f2553f345932511fa9669b5383d8bfdaa23459 Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_debug.c | 3 ++- hw_fence/src/hw_fence_drv_utils.c | 12 ++++++------ hw_fence/src/hw_fence_ioctl.c | 18 ++++++++++++++---- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 44083141a9..c3c409a3ab 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -907,6 +907,7 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, if (!drv_data->clients[client_id]) { mutex_unlock(&drv_data->clients_register_lock); + HWFNC_ERR("Processing workaround for unregistered val client:%d\n", client_id); return -EINVAL; } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index fa407134fb..a6feec9108 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -56,6 +56,12 @@ #define HW_FENCE_CLIENT_TYPE_MAX_VPU 1 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 +/* + * Each bit in this mask represents each of the loopback clients supported in + * the enum hw_fence_loopback_id + */ +#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff + /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. @@ -162,12 +168,6 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, } } -/* - * Each bit in this mask represents each of the loopback clients supported in - * the enum hw_fence_loopback_id - */ -#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f - static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data, int client_id) { diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 7c5b141faf..456732d0db 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,8 @@ .name = #ioctl \ } +#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) + /** * struct hw_sync_obj - per client hw sync object. * @context: context id used to create fences. @@ -371,7 +374,7 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l return -EFAULT; num_fences = data.num_fences; - if (num_fences >= HW_FENCE_ARRAY_SIZE) { + if (num_fences > HW_FENCE_ARRAY_SIZE) { HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n", num_fences, HW_FENCE_ARRAY_SIZE); return -EINVAL; @@ -559,6 +562,7 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) struct msm_hw_fence_queue_payload payload; struct hw_fence_sync_wait_data data; struct dma_fence *fence; + ktime_t cur_ktime, exp_ktime; int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ if (!_is_valid_client(obj)) @@ -582,9 +586,15 @@ static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg) return -EINVAL; } - ret = wait_event_timeout(hw_fence_client->wait_queue, - atomic_read(&hw_fence_client->val_signal) > 0, - msecs_to_jiffies(data.timeout_ms)); + exp_ktime = ktime_add_ms(ktime_get(), data.timeout_ms); + do { + ret = wait_event_timeout(hw_fence_client->wait_queue, + atomic_read(&hw_fence_client->val_signal) > 0, + msecs_to_jiffies(data.timeout_ms)); + cur_ktime = ktime_get(); + } while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) && + ktime_compare_safe(exp_ktime, cur_ktime) > 0); + if (!ret) { HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms); /* Decrement the refcount that hw_sync_get_fence increments */ From e59a1e44645e426550c098a68948a79182aea232 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 14 Mar 2023 11:35:19 -0700 Subject: [PATCH 59/77] mm-drivers: hw_fence: avoid compiling synx translation layer on kalama Avoid compiling synx translation layer on kalama target where synx driver is not available. Change-Id: I0a4f8c291fc3843065e75f536b4e16a246ea69d4 Signed-off-by: Grace An --- hw_fence/Kbuild | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/hw_fence/Kbuild b/hw_fence/Kbuild index 55334e8b65..3bcd693da7 100644 --- a/hw_fence/Kbuild +++ b/hw_fence/Kbuild @@ -3,9 +3,7 @@ KDIR := $(TOP)/kernel_platform/msm-kernel include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \ - -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ \ - -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ - -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ + -I$(MSM_HW_FENCE_ROOT)hw_fence/include/ ifdef CONFIG_QTI_HW_FENCE obj-m += msm_hw_fence.o @@ -14,8 +12,13 @@ msm_hw_fence-y := src/msm_hw_fence.o \ src/hw_fence_drv_priv.o \ src/hw_fence_drv_utils.o \ src/hw_fence_drv_debug.o \ - src/hw_fence_drv_ipc.o \ - src/msm_hw_fence_synx_translation.o + src/hw_fence_drv_ipc.o + +ifneq ($(CONFIG_ARCH_KALAMA), y) +LINUXINCLUDE += -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \ + -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/ +msm_hw_fence-y += src/msm_hw_fence_synx_translation.o +endif msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o From 2348b032738c2e0498ee283e419ff42b38e9b773 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 13 Dec 2022 11:49:57 -0800 Subject: [PATCH 60/77] mm-drivers: hw_fence: add device-tree configurable queue padding Add device-tree configurable padding in bytes before and after queue header(s). This enables support for 32-byte aligned queue write_idx, which is a requirement to satisfy hardware constraints by some clients. Change-Id: Icfd6bb385c825a8629974c72522efdc3cbfe3303 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 45 +++++++-- hw_fence/include/hw_fence_drv_utils.h | 37 ++------ hw_fence/src/hw_fence_drv_priv.c | 43 +++++---- hw_fence/src/hw_fence_drv_utils.c | 130 ++++++++++++++++++-------- hw_fence/src/msm_hw_fence.c | 20 ++-- 5 files changed, 178 insertions(+), 97 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index d17643c4d6..2e03faba41 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -172,6 +172,7 @@ enum payload_type { * number of sub-clients (e.g. ife clients) * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor + * @queues_num: number of client queues * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client * @ipc_client_pid: physical id of the ipc client for this hw fence driver client @@ -187,6 +188,7 @@ struct msm_hw_fence_client { enum hw_fence_client_id client_id_ext; struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; + int queues_num; int ipc_signal_id; int ipc_client_vid; int ipc_client_pid; @@ -239,24 +241,48 @@ struct msm_hw_fence_dbg_data { }; /** - * struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client. + * struct hw_fence_client_type_desc - Structure holding client type properties, including static + * properties and client queue properties read from device-tree. * - * @queues_num: number of client queues - * @queue_entries: number of queue entries per client queue - * @mem_size: size of memory allocated for client queues - * @start_offset: start offset of client queue memory region, from beginning of carved-out memory - * allocation for hw fence driver + * @name: name of client type, used to parse properties from device-tree + * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. + * HW_FENCE_CLIENT_ID_CTL0 for DPU clients + * @max_clients_num: maximum number of clients of given client type + * @clients_num: number of clients of given client type + * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or + * two (for both Tx and Rx Queues) + * @queue_entries: number of entries per client queue of given client type + * @start_padding: size of padding between queue table header and first queue header in bytes + * @end_padding: size of padding between queue header(s) and first queue payload in bytes + * @mem_size: size of memory allocated for client queue(s) per client in bytes * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence * driver and hfi_header->tx_wm is updated instead */ -struct hw_fence_client_queue_size_desc { +struct hw_fence_client_type_desc { + char *name; + enum hw_fence_client_id init_id; + u32 max_clients_num; + u32 clients_num; u32 queues_num; u32 queue_entries; + u32 start_padding; + u32 end_padding; u32 mem_size; - u32 start_offset; bool skip_txq_wr_idx; }; +/** + * struct hw_fence_client_queue_desc - Structure holding client queue properties for a client. + * + * @type: pointer to client queue properties of client type + * @start_offset: start offset of client queue memory region, from beginning of carved-out memory + * allocation for hw fence driver + */ +struct hw_fence_client_queue_desc { + struct hw_fence_client_type_desc *type; + u32 start_offset; +}; + /** * struct hw_fence_driver_data - Structure holding internal hw-fence driver data * @@ -268,6 +294,7 @@ struct hw_fence_client_queue_size_desc { * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client + * @hw_fence_client_types: descriptors of properties for each hw fence client type * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree) * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table @@ -320,7 +347,7 @@ struct hw_fence_driver_data { u32 hw_fence_ctrl_queue_size; u32 hw_fence_mem_ctrl_queues_size; /* client queues */ - struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size; + struct hw_fence_client_queue_desc *hw_fence_client_queue_size; struct hw_fence_client_type_desc *hw_fence_client_types; u32 rxq_clients_num; u32 clients_num; diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index ae711d8869..454b5b570d 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -38,33 +38,6 @@ enum hw_fence_mem_reserve { HW_FENCE_MEM_RESERVE_CLIENT_QUEUE }; -/** - * struct hw_fence_client_type_desc - Structure holding client type properties, including static - * properties and client queue properties read from device-tree. - * - * @name: name of client type, used to parse properties from device-tree - * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g. - * HW_FENCE_CLIENT_ID_CTL0 for DPU clients - * @max_clients_num: maximum number of clients of given client type - * @clients_num: number of clients of given client type - * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or - * two (for both Tx and Rx Queues) - * @queue_entries: number of entries per client queue of given client type - * @mem_size: size of memory allocated for client queue(s) per client - * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence - * driver and hfi_header->tx_wm is updated instead - */ -struct hw_fence_client_type_desc { - char *name; - enum hw_fence_client_id init_id; - u32 max_clients_num; - u32 clients_num; - u32 queues_num; - u32 queue_entries; - u32 mem_size; - bool skip_txq_wr_idx; -}; - /** * global_atomic_store() - Inter-processor lock * @drv_data: hw fence driver data @@ -173,6 +146,16 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id client_id); +/** + * hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id. + * + * @drv_data: driver data + * @client_id: hw fence driver client id + * + * Returns: number of client queues + */ +int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); + /** * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index * is not updated in hw fence driver. Instead, diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 833bc1b077..ea931f1510 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -32,10 +32,12 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence_hfi_queue_table_header *hfi_table_header; struct msm_hw_fence_hfi_queue_header *hfi_queue_header; + struct hw_fence_client_type_desc *desc; void *ptr, *qptr; phys_addr_t phys, qphys; u32 size, start_queue_offset; int headers_size, queue_size, payload_size; + int start_padding = 0, end_padding = 0; int i, ret = 0; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); @@ -46,14 +48,19 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= drv_data->clients_num) { - HWFNC_ERR("Invalid client_id: %d\n", client_id); + if (client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id, + drv_data->clients_num); return -EINVAL; } - headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num); - queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * - drv_data->hw_fence_client_queue_size[client_id].queue_entries; + desc = drv_data->hw_fence_client_queue_size[client_id].type; + start_padding = desc->start_padding; + end_padding = desc->end_padding; + headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding + + end_padding; + queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; break; default: @@ -75,16 +82,15 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, mem_descriptor->size = size; /* bytes */ mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */ - HWFNC_DBG_INIT("Initialize headers\n"); + HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n", + headers_size, start_padding, end_padding); /* Initialize headers info within hfi memory */ hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr; hfi_table_header->version = 0; hfi_table_header->size = size; /* bytes */ /* Offset, from the Base Address, where the first queue header starts */ - hfi_table_header->qhdr0_offset = - sizeof(struct msm_hw_fence_hfi_queue_table_header); - hfi_table_header->qhdr_size = - sizeof(struct msm_hw_fence_hfi_queue_header); + hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding; + hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE; hfi_table_header->num_q = queues_num; /* number of queues */ hfi_table_header->num_active_q = queues_num; @@ -96,7 +102,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, */ HWFNC_DBG_INIT("Initialize queues\n"); hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *) - ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE); + ((char *)ptr + hfi_table_header->qhdr0_offset); for (i = 0; i < queues_num; i++) { HWFNC_DBG_INIT("init queue[%d]\n", i); @@ -251,10 +257,9 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, u32 *wr_ptr; int ret = 0; - if (queue_type >= - drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) { - HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type, - hw_fence_client->client_id); + if (queue_type >= hw_fence_client->queues_num) { + HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type, + hw_fence_client->client_id, hw_fence_client->queues_num); return -EINVAL; } @@ -539,10 +544,16 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data, { int ret; + if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) { + HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n", + hw_fence_client->client_id); + return -EINVAL; + } + /* Init client queues */ ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, &hw_fence_client->mem_descriptor, hw_fence_client->queues, - drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num, + drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num, hw_fence_client->client_id); if (ret) { HWFNC_ERR("Failure to init the queue for client:%d\n", diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9293e52cb3..e2dc4b04ca 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -77,23 +77,23 @@ */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, false}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, }; static void _lock(uint64_t *wait) @@ -549,23 +549,16 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, *size = drv_data->hw_fence_mem_fences_table_size; break; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: - if (client_id >= drv_data->clients_num) { - HWFNC_ERR("unexpected client_id:%d\n", client_id); + if (client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id, + drv_data->clients_num); ret = -EINVAL; goto exit; } start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; - *size = drv_data->hw_fence_client_queue_size[client_id].mem_size; - - /* - * If this error occurs when client should be valid, check that support for this - * client has been configured in device-tree properties. - */ - if (!*size) { - HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id); - ret = -EINVAL; - } + *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size; break; default: HWFNC_ERR("Invalid mem reserve type:%d\n", type); @@ -592,6 +585,49 @@ exit: return ret; } +static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data, + struct hw_fence_client_type_desc *desc) +{ + char name[40]; + u32 tmp[2]; + int ret; + + snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name); + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 2); + + /* extra dt props not set */ + if (ret) + return 0; + + desc->start_padding = tmp[0]; + desc->end_padding = tmp[1]; + + if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || + (desc->start_padding + desc->end_padding) % sizeof(u64)) { + HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n", + desc->name, desc->start_padding, desc->end_padding); + return -EINVAL; + } + + if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { + HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n", + desc->name, desc->queues_num, desc->start_padding); + return -EINVAL; + } + + if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - + desc->start_padding) { + HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n", + desc->name, desc->queues_num, desc->start_padding, desc->end_padding); + return -EINVAL; + } + + HWFNC_DBG_INIT("%s: start_padding_size=%lu end_padding_size=%lu\n", desc->name, + desc->start_padding, desc->end_padding); + + return 0; +} + static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc) { @@ -600,7 +636,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da u32 queue_size; int ret; - /* parse client queue property from device-tree */ + /* parse client queue properties from device-tree */ snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name); ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4); if (ret) { @@ -626,6 +662,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da return -EINVAL; } + /* parse extra client queue properties from device-tree */ + ret = _parse_client_queue_dt_props_extra(drv_data, desc); + if (ret) { + HWFNC_ERR("%s failed to parse extra dt props\n", desc->name); + return -EINVAL; + } + /* compute mem_size */ if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) { HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n", @@ -635,17 +678,18 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; if (queue_size >= ((U32_MAX & PAGE_MASK) - - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) { - HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n", - desc->name, queue_size); + (HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + + desc->start_padding + desc->end_padding)) / desc->queues_num) { + HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n", + desc->name, queue_size, desc->start_padding, desc->end_padding); return -EINVAL; } desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) + - (queue_size * desc->queues_num)); + (queue_size * desc->queues_num) + desc->start_padding + desc->end_padding); if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) { - HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n", + HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n", desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE); return -EINVAL; } @@ -690,7 +734,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num; /* allocate memory for client queue size descriptors */ - size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc); + size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc); drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL); if (!drv_data->hw_fence_client_queue_size) return -ENOMEM; @@ -707,9 +751,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data) hw_fence_utils_get_client_id_priv(drv_data, client_id_ext); drv_data->hw_fence_client_queue_size[client_id] = - (struct hw_fence_client_queue_size_desc) - {desc->queues_num, desc->queue_entries, desc->mem_size, - start_offset, desc->skip_txq_wr_idx}; + (struct hw_fence_client_queue_desc){desc, start_offset}; HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n", desc->name, client_id_ext, client_id, start_offset); start_offset += desc->mem_size; @@ -929,10 +971,24 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver return client_id_priv; } +int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id) +{ + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d queues_num\n", client_id); + return 0; + } + + return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; +} + bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) { - if (!drv_data || client_id >= drv_data->clients_num) + if (!drv_data || client_id >= drv_data->clients_num || + !drv_data->hw_fence_client_queue_size[client_id].type) { + HWFNC_ERR("invalid access to client:%d skips_txq_wr_idx\n", client_id); return false; + } - return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx; + return drv_data->hw_fence_client_queue_size[client_id].type->skip_txq_wr_idx; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 517a991741..d243b06543 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -87,15 +87,18 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, } hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id); - if (hw_fence_client->update_rxq && - hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num < - HW_FENCE_CLIENT_QUEUES) { - HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id); + hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); + + hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id); + if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq && + hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) { + HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id, + hw_fence_client->queues_num, + hw_fence_client->update_rxq ? "true" : "false"); ret = -EINVAL; goto error; } - hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id); hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, client_id); @@ -118,9 +121,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, if (ret) goto error; - HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc vid:%d pid:%d\n", - hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id, - hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid); + HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n", + hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num, + hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid, + hw_fence_client->ipc_client_pid); #if IS_ENABLED(CONFIG_DEBUG_FS) init_waitqueue_head(&hw_fence_client->wait_queue); From 414d3b480d09eb243261b2a1c4ba49da5957d436 Mon Sep 17 00:00:00 2001 From: Grace An Date: Wed, 11 Jan 2023 16:58:22 -0800 Subject: [PATCH 61/77] mm-drivers: hw_fence: add support for client queue alternate indexing Some clients require that write_index starts from nonzero value and index by payload instead of by dwords. Add support for device-tree configurable properties to control nonzero index start_index and indexing by payload for client tx queue read and write indices. Change-Id: I8942dc2d25a7d1cb0421cabd36c73a404ecd0134 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 14 +++- hw_fence/include/hw_fence_drv_utils.h | 13 --- hw_fence/src/hw_fence_drv_priv.c | 79 ++++++++++++++++-- hw_fence/src/hw_fence_drv_utils.c | 116 +++++++++++++++++--------- hw_fence/src/msm_hw_fence.c | 5 +- 5 files changed, 163 insertions(+), 64 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 2e03faba41..f1786831fb 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -150,12 +150,19 @@ enum hw_fence_client_data_id { * @q_size_bytes: size of the queue * @va_header: pointer to the hfi header virtual address * @pa_queue: physical address of the queue + * @rd_wr_idx_start: start read and write indexes for client queue (zero by default) + * @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default) + * @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and + * hfi_header->tx_wm is updated instead */ struct msm_hw_fence_queue { void *va_queue; u32 q_size_bytes; void *va_header; phys_addr_t pa_queue; + u32 rd_wr_idx_start; + u32 rd_wr_idx_factor; + bool skip_wr_idx; }; /** @@ -178,8 +185,6 @@ enum payload_type { * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences - * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence - * driver and hfi_header->tx_wm is updated instead * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -194,7 +199,6 @@ struct msm_hw_fence_client { int ipc_client_pid; bool update_rxq; bool send_ipc; - bool skip_txq_wr_idx; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; @@ -255,6 +259,8 @@ struct msm_hw_fence_dbg_data { * @start_padding: size of padding between queue table header and first queue header in bytes * @end_padding: size of padding between queue header(s) and first queue payload in bytes * @mem_size: size of memory allocated for client queue(s) per client in bytes + * @txq_idx_start: start read and write indexes for client tx queue (zero by default) + * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default) * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence * driver and hfi_header->tx_wm is updated instead */ @@ -268,6 +274,8 @@ struct hw_fence_client_type_desc { u32 start_padding; u32 end_padding; u32 mem_size; + u32 txq_idx_start; + u32 txq_idx_factor; bool skip_txq_wr_idx; }; diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 454b5b570d..6b35962f41 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -156,17 +156,4 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver */ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); -/** - * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index - * is not updated in hw fence driver. Instead, - * hfi_header->tx_wm tracks where payload is written within - * the queue. - * - * @drv_data: driver data - * @client_id: hw fence driver client id - * - * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise - */ -bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id); - #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ea931f1510..f47abca728 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -15,6 +15,8 @@ /* Global atomic lock */ #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) +#define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1) + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -35,10 +37,11 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc; void *ptr, *qptr; phys_addr_t phys, qphys; - u32 size, start_queue_offset; + u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1; int headers_size, queue_size, payload_size; int start_padding = 0, end_padding = 0; int i, ret = 0; + bool skip_txq_wr_idx = false; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); switch (mem_reserve_id) { @@ -62,6 +65,9 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, end_padding; queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; + txq_idx_start = desc->txq_idx_start; + txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1; + skip_txq_wr_idx = desc->skip_txq_wr_idx; break; default: HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); @@ -115,7 +121,8 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, hfi_queue_header->start_addr = qphys; /* Set the queue type (i.e. RX or TX queue) */ - hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE; + hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE : + HW_FENCE_RX_QUEUE; /* Set the size of this header */ hfi_queue_header->queue_size = queue_size; @@ -123,6 +130,20 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, /* Set the payload size */ hfi_queue_header->pkt_size = payload_size; + /* Set write index for clients' tx queues that index from nonzero value */ + if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) { + if (skip_txq_wr_idx) + hfi_queue_header->tx_wm = txq_idx_start; + hfi_queue_header->read_index = txq_idx_start; + hfi_queue_header->write_index = txq_idx_start; + HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id, + skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx", + txq_idx_start); + } + + /* Update memory for hfi_queue_header */ + wmb(); + /* Store Memory info in the Client data */ queues[i].va_queue = qptr; queues[i].pa_queue = qphys; @@ -133,6 +154,18 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, queues[i].q_size_bytes, payload_size); + /* Store additional tx queue rd_wr_idx properties */ + if (IS_HW_FENCE_TX_QUEUE(i)) { + queues[i].rd_wr_idx_start = txq_idx_start; + queues[i].rd_wr_idx_factor = txq_idx_factor; + queues[i].skip_wr_idx = skip_txq_wr_idx; + } else { + queues[i].rd_wr_idx_factor = 1; + } + HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n", + queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor, + queues[i].skip_wr_idx ? "true" : "false"); + /* Next header */ hfi_queue_header++; } @@ -189,6 +222,14 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); + /* translate read and write indexes from custom indexing to dwords with no offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); @@ -215,6 +256,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, if (to_read_idx >= q_size_u32) to_read_idx = 0; + /* translate to_read_idx to custom indexing with offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + /* Read the Client Queue */ payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); payload->seqno = readq_relaxed(&read_ptr_payload->seqno); @@ -275,8 +323,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, return -EINVAL; } - /* if skipping update txq wr_index, then use hfi_header->tx_wm instead */ - if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx) + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ + if (queue->skip_wr_idx) wr_ptr = &hfi_header->tx_wm; else wr_ptr = &hfi_header->write_index; @@ -310,8 +358,15 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", hw_fence_client->client_id, &hfi_header->read_index, wr_ptr, - read_idx, write_idx, queue, queue_type, - hw_fence_client->skip_txq_wr_idx ? "true" : "false"); + read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false"); + + /* translate read and write indexes from custom indexing to dwords with no offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -346,6 +401,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, if (to_write_idx >= q_size_u32) to_write_idx = 0; + /* translate to_write_idx to custom indexing with offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + /* Update Client Queue */ writeq_relaxed(payload_size, &write_ptr_payload->size); writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type); @@ -1462,8 +1524,12 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* For the client TxQ: set the read-index same as last write that was done by the client */ mb(); /* make sure data is ready before read */ wr_idx = readl_relaxed(&hfi_header->write_index); + if (queue->skip_wr_idx) + hfi_header->tx_wm = wr_idx; writel_relaxed(wr_idx, &hfi_header->read_index); wmb(); /* make sure data is updated after write the index*/ + HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n", + queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx); /* For the client RxQ: set the write-index same as last read done by the client */ if (hw_fence_client->update_rxq) { @@ -1489,6 +1555,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* unlock */ GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); + HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx); } } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index e2dc4b04ca..a42329ecb7 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -77,23 +77,31 @@ */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false}, + {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, }; static void _lock(uint64_t *wait) @@ -588,44 +596,87 @@ exit: static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc) { + u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32); char name[40]; - u32 tmp[2]; - int ret; + u32 tmp[4]; + bool idx_by_payload = false; + int count, ret; snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 2); - /* extra dt props not set */ - if (ret) + /* check if property is present */ + ret = of_property_read_bool(drv_data->dev->of_node, name); + if (!ret) return 0; + count = of_property_count_u32_elems(drv_data->dev->of_node, name); + if (count <= 0 || count > 4) { + HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count); + if (ret) { + HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name, + ret, count); + ret = -EINVAL; + goto exit; + } + desc->start_padding = tmp[0]; - desc->end_padding = tmp[1]; + if (count >= 2) + desc->end_padding = tmp[1]; + if (count >= 3) + desc->txq_idx_start = tmp[2]; + if (count >= 4) { + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]); + ret = -EINVAL; + goto exit; + } + idx_by_payload = tmp[3]; + desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1; + } if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || (desc->start_padding + desc->end_padding) % sizeof(u64)) { HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n", desc->name, desc->start_padding, desc->end_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - desc->start_padding) { HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding, desc->end_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } - HWFNC_DBG_INIT("%s: start_padding_size=%lu end_padding_size=%lu\n", desc->name, - desc->start_padding, desc->end_padding); + max_idx_from_zero = idx_by_payload ? desc->queue_entries : + desc->queue_entries * payload_size_u32; + if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) { + HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n", + desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false", + desc->queue_entries); + ret = -EINVAL; + goto exit; + } - return 0; + HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n", + desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start, + idx_by_payload ? "true" : "false"); + +exit: + return ret; } static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, @@ -981,14 +1032,3 @@ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int cli return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; } - -bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) -{ - if (!drv_data || client_id >= drv_data->clients_num || - !drv_data->hw_fence_client_queue_size[client_id].type) { - HWFNC_ERR("invalid access to client:%d skips_txq_wr_idx\n", client_id); - return false; - } - - return drv_data->hw_fence_client_queue_size[client_id].type->skip_txq_wr_idx; -} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index d243b06543..82ee33bdaa 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -99,16 +99,13 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, goto error; } - hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, - client_id); - /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, hw_fence_client, mem_descriptor); if (ret) goto error; - /* Initialize signal for communication withe FenceCTL */ + /* Initialize signal for communication with FenceCTL */ ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client); if (ret) goto error; From b39fb22f2dfc0586d296d2604a3b258ca1a76ce7 Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Tue, 17 Jan 2023 14:14:27 -0800 Subject: [PATCH 62/77] mm-drivers: hw_fence: avoid mem share from rm callback Avoid from hw-fence driver call to share carved-out memory between hlos and cpusys vm if the memory has been already shared by the gh_cpusys_vm_mem_access kernel driver. Change-Id: I0df0216a6153a8982936885f53bebf7fe83db7e9 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_utils.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a42329ecb7..f39234c361 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "hw_fence_drv_priv.h" @@ -404,8 +405,10 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da { struct gh_rm_notif_vm_status_payload *vm_status_payload; struct hw_fence_driver_data *drv_data; + struct resource res; gh_vmid_t peer_vmid; gh_vmid_t self_vmid; + int ret; drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb); @@ -430,11 +433,25 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da switch (vm_status_payload->vm_status) { case GH_RM_VM_STATUS_READY: - HWFNC_DBG_INIT("init mem\n"); - if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) - HWFNC_ERR("failed to share memory\n"); - else - drv_data->vm_ready = true; + ret = gh_cpusys_vm_get_share_mem_info(&res); + if (ret) { + HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret); + if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) + HWFNC_ERR("failed to share memory\n"); + else + drv_data->vm_ready = true; + } else { + if (drv_data->res.start == res.start && + resource_size(&drv_data->res) == resource_size(&res)) { + drv_data->vm_ready = true; + HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start, + resource_size(&res), ret); + } else { + HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n", + res.start, resource_size(&res), drv_data->res.start, + resource_size(&drv_data->res)); + } + } break; case GH_RM_VM_STATUS_RESET: HWFNC_DBG_INIT("reset\n"); From d39f91c9bd64739527d2ffe18c67d81f628c9544 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 15 Dec 2022 09:41:59 -0800 Subject: [PATCH 63/77] mm-drivers: hw_fence: remove deprecated workarounds for no dpu-ipc signals Remove HW Fence driver deprecated support of platforms without dpu-ipc signaling, e.g. targets prior to kalama. Change-Id: I8491a96040b4c3857a32a9bf6092e53479284a64 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_ipc.h | 8 +- hw_fence/include/hw_fence_drv_priv.h | 50 --------- hw_fence/include/hw_fence_drv_utils.h | 9 -- hw_fence/src/hw_fence_drv_debug.c | 8 +- hw_fence/src/hw_fence_drv_ipc.c | 61 +---------- hw_fence/src/hw_fence_drv_priv.c | 11 -- hw_fence/src/hw_fence_drv_utils.c | 139 ++------------------------ 7 files changed, 16 insertions(+), 270 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_ipc.h b/hw_fence/include/hw_fence_drv_ipc.h index 07b7aa754c..93bafd1e93 100644 --- a/hw_fence/include/hw_fence_drv_ipc.h +++ b/hw_fence/include/hw_fence_drv_ipc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_IPC_H @@ -36,14 +36,10 @@ #define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17 #define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18 -#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2 -#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2 #define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4 -#define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */ -#define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */ #define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */ #define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */ @@ -73,7 +69,6 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data, */ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); -#ifdef HW_DPU_IPCC /** * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client. * @drv_data: driver data. @@ -81,7 +76,6 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data); * Return: 0 on success or negative errno (-EINVAL) */ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data); -#endif /* HW_DPU_IPCC */ /** * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index f1786831fb..f934017749 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -14,9 +14,6 @@ #include #include -/* Add define only for platforms that support IPCC in dpu-hw */ -#define HW_DPU_IPCC 1 - /* max u64 to indicate invalid fence */ #define HW_FENCE_INVALID_PARENT_FENCE (~0ULL) @@ -82,45 +79,6 @@ enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_FIND_FENCE }; -/** - * enum hw_fence_loopback_id - Enum with the clients having a loopback signal (i.e AP to AP signal). - * HW_FENCE_LOOPBACK_DPU_CTL_0: dpu client 0. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_1: dpu client 1. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_2: dpu client 2. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_3: dpu client 3. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc. - * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support. - * HW_FENCE_LOOPBACK_VAL_0: debug validation client 0. - * HW_FENCE_LOOPBACK_VAL_1: debug validation client 1. - * HW_FENCE_LOOPBACK_VAL_2: debug validation client 2. - * HW_FENCE_LOOPBACK_VAL_3: debug validation client 3. - * HW_FENCE_LOOPBACK_VAL_4: debug validation client 4. - * HW_FENCE_LOOPBACK_VAL_5: debug validation client 5. - * HW_FENCE_LOOPBACK_VAL_6: debug validation client 6. - */ -enum hw_fence_loopback_id { - HW_FENCE_LOOPBACK_DPU_CTL_0, - HW_FENCE_LOOPBACK_DPU_CTL_1, - HW_FENCE_LOOPBACK_DPU_CTL_2, - HW_FENCE_LOOPBACK_DPU_CTL_3, - HW_FENCE_LOOPBACK_DPU_CTL_4, - HW_FENCE_LOOPBACK_DPU_CTL_5, - HW_FENCE_LOOPBACK_GFX_CTX_0, -#if IS_ENABLED(CONFIG_DEBUG_FS) - HW_FENCE_LOOPBACK_VAL_0 = HW_FENCE_CLIENT_ID_VAL0, - HW_FENCE_LOOPBACK_VAL_1, - HW_FENCE_LOOPBACK_VAL_2, - HW_FENCE_LOOPBACK_VAL_3, - HW_FENCE_LOOPBACK_VAL_4, - HW_FENCE_LOOPBACK_VAL_5, - HW_FENCE_LOOPBACK_VAL_6, -#endif /* CONFIG_DEBUG_FS */ - HW_FENCE_LOOPBACK_MAX, -}; - -#define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1) - /** * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional * parameter passed from the waiting client and returned @@ -334,8 +292,6 @@ struct hw_fence_client_queue_desc { * @qtime_reg_base: qtimer register base address * @qtime_io_mem: qtimer io mem map * @qtime_size: qtimer io mem map size - * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc) - * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc) * @client_id_mask: bitmask for tracking registered client_ids * @clients_register_lock: lock to synchronize clients registration and deregistration * @clients: table with the handles of the registered clients; size is equal to clients_num @@ -409,10 +365,6 @@ struct hw_fence_driver_data { void __iomem *qtime_io_mem; uint32_t qtime_size; - /* base address for dpu ctl start regs */ - void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; - uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS]; - /* synchronize client_ids registration and deregistration */ struct mutex clients_register_lock; @@ -420,10 +372,8 @@ struct hw_fence_driver_data { struct msm_hw_fence_client **clients; bool vm_ready; -#ifdef HW_DPU_IPCC /* state variables */ bool ipcc_dpu_initialized; -#endif /* HW_DPU_IPCC */ }; /** diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 6b35962f41..9063385a23 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -106,15 +106,6 @@ int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data); */ int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data); -/** - * hw_fence_utils_map_ctl_start() - Maps ctl_start registers from dpu hw - * @drv_data: hw fence driver data - * - * Returns zero if success, otherwise returns negative error code. This API is only used - * for simulation purposes in platforms where dpu does not support ipc signal. - */ -int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data); - /** * hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client * @drv_data: hw fence driver data diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index c3c409a3ab..46eb3e4abe 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -112,7 +112,6 @@ static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *use drv_data->ipcc_client_vid); } -#ifdef HW_DPU_IPCC /** * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core. * @file: file handler. @@ -137,7 +136,6 @@ static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = { .open = simple_open, .write = hw_fence_dbg_ipcc_dpu_write, }; -#endif /* HW_DPU_IPCC */ static const struct file_operations hw_fence_dbg_ipcc_fops = { .open = simple_open, @@ -897,9 +895,9 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, { struct msm_hw_fence_client *hw_fence_client; - if (client_id < HW_FENCE_LOOPBACK_VAL_0 || client_id > HW_FENCE_LOOPBACK_VAL_6) { + if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) { HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id, - HW_FENCE_LOOPBACK_VAL_0, HW_FENCE_LOOPBACK_VAL_6); + HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6); return -EINVAL; } @@ -982,10 +980,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data, &hw_fence_dbg_ipcc_fops); -#ifdef HW_DPU_IPCC debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data, &hw_fence_dbg_ipcc_dpu_fops); -#endif /* HW_DPU_IPCC */ debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data, &hw_fence_reset_client_fops); debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data, diff --git a/hw_fence/src/hw_fence_drv_ipc.c b/hw_fence/src/hw_fence_drv_ipc.c index a2289fc8ee..a9b317e87c 100644 --- a/hw_fence/src/hw_fence_drv_ipc.c +++ b/hw_fence/src/hw_fence_drv_ipc.c @@ -32,37 +32,6 @@ struct hw_fence_client_ipc_map { bool send_ipc; }; -/** - * struct hw_fence_clients_ipc_map_no_dpu - Table makes the 'client to signal' mapping, which - * is used by the hw fence driver to trigger ipc signal when the hw fence is already - * signaled. - * This no_dpu version is for targets that do not support dpu client id - * - * Notes: - * The index of this struct must match the enum hw_fence_client_id. - * To change to a loopback signal instead of GMU, change ctx0 row to use: - * {HW_FENCE_IPC_CLIENT_ID_APPS, 20}. - */ -struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_MAX] = { - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/ - {HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 15, false, true},/*ctl1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 16, false, true},/*ctl2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 17, false, true},/*ctl3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/ -#if IS_ENABLED(CONFIG_DEBUG_FS) - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/ - {HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/ -#endif /* CONFIG_DEBUG_FS */ -}; - /** * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is * used by the hw fence driver to trigger ipc signal when hw fence is already @@ -337,20 +306,6 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 int ret = 0; switch (hwrev) { - case HW_FENCE_IPCC_HW_REV_100: - drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA; - drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; - HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n"); - break; - case HW_FENCE_IPCC_HW_REV_110: - drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; - drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO; - drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu; - HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n"); - break; case HW_FENCE_IPCC_HW_REV_170: drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID; @@ -381,20 +336,10 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) HWFNC_DBG_H("enable ipc +\n"); - /** - * Attempt to read the ipc version from dt, if not available, then attempt - * to read from the registers. - */ ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val); if (ret || !val) { - /* if no device tree prop, attempt to get the version from the registers*/ - HWFNC_DBG_H("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); - - /* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */ - val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem, - HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, - HW_FENCE_IPC_CLIENT_ID_APPS_VID)); - HWFNC_DBG_INIT("ipcc version:0x%x\n", val); + HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val); + return -EINVAL; } if (_hw_fence_ipcc_hwrev_init(drv_data, val)) { @@ -421,7 +366,6 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data) return 0; } -#ifdef HW_DPU_IPCC int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) { struct hw_fence_client_ipc_map *hw_fence_client; @@ -482,4 +426,3 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data) return 0; } -#endif /* HW_DPU_IPCC */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index f47abca728..e59162b1f5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -567,15 +567,6 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) goto exit; } - /* Map ctl_start registers */ - ret = hw_fence_utils_map_ctl_start(drv_data); - if (ret) { - /* This is not fatal error, since platfoms with dpu-ipc - * won't use this option - */ - HWFNC_WARN("no ctl_start regs, won't trigger the frame\n"); - } - /* Init debugfs */ ret = hw_fence_debug_debugfs_register(drv_data); if (ret) { @@ -665,7 +656,6 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, case HW_FENCE_CLIENT_ID_CTL3: case HW_FENCE_CLIENT_ID_CTL4: case HW_FENCE_CLIENT_ID_CTL5: -#ifdef HW_DPU_IPCC /* initialize ipcc signals for dpu clients */ HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n", hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized); @@ -675,7 +665,6 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data, /* Init dpu client ipcc signal */ hw_fence_ipcc_enable_dpu_signaling(drv_data); } -#endif /* HW_DPU_IPCC */ break; case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1: diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index a42329ecb7..bca3b959c5 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -52,7 +52,7 @@ /* * Each bit in this mask represents each of the loopback clients supported in - * the enum hw_fence_loopback_id + * the enum hw_fence_client_id */ #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff @@ -170,89 +170,20 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, } } -static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data, - int client_id) -{ - int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */ - void *ctl_start_reg; - u32 val; - - if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) { - HWFNC_ERR("invalid ctl_id:%d\n", ctl_id); - return -EINVAL; - } - - ctl_start_reg = drv_data->ctl_start_ptr[ctl_id]; - if (!ctl_start_reg) { - HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id); - return -EINVAL; - } - - HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id); - - val = 0x1; /* ctl_start trigger */ -#ifdef CTL_START_SIM - HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id); - writel_relaxed(val, ctl_start_reg); -#else - HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id, - ctl_start_reg, val); -#endif - - return 0; -} - -static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data, - int client_id) -{ - int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */ - struct msm_hw_fence_queue_payload payload; - int read = 1; - - HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id); - while (read) { - /* - * 'client_id' is the loopback-client-id, not the hw-fence client_id, - * so use GFX hw-fence client id, to get the client data - */ - read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload, - queue_type); - if (read < 0) { - HWFNC_ERR("unable to read gfx rxq\n"); - break; - } - HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n", - payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error); - } - - return read; -} - static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id) { int ret; - HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id); + HWFNC_DBG_H("Processing doorbell client_id:%d\n", client_id); switch (client_id) { - case HW_FENCE_LOOPBACK_DPU_CTL_0: - case HW_FENCE_LOOPBACK_DPU_CTL_1: - case HW_FENCE_LOOPBACK_DPU_CTL_2: - case HW_FENCE_LOOPBACK_DPU_CTL_3: - case HW_FENCE_LOOPBACK_DPU_CTL_4: - case HW_FENCE_LOOPBACK_DPU_CTL_5: - ret = _process_dpu_client_loopback(drv_data, client_id); - break; - case HW_FENCE_LOOPBACK_GFX_CTX_0: - ret = _process_gfx_client_loopback(drv_data, client_id); - break; #if IS_ENABLED(CONFIG_DEBUG_FS) - case HW_FENCE_LOOPBACK_VAL_0: - case HW_FENCE_LOOPBACK_VAL_1: - case HW_FENCE_LOOPBACK_VAL_2: - case HW_FENCE_LOOPBACK_VAL_3: - case HW_FENCE_LOOPBACK_VAL_4: - case HW_FENCE_LOOPBACK_VAL_5: - case HW_FENCE_LOOPBACK_VAL_6: + case HW_FENCE_CLIENT_ID_VAL0: + case HW_FENCE_CLIENT_ID_VAL1: + case HW_FENCE_CLIENT_ID_VAL2: + case HW_FENCE_CLIENT_ID_VAL3: + case HW_FENCE_CLIENT_ID_VAL4: + case HW_FENCE_CLIENT_ID_VAL5: + case HW_FENCE_CLIENT_ID_VAL6: ret = process_validation_client_loopback(drv_data, client_id); break; #endif /* CONFIG_DEBUG_FS */ @@ -266,10 +197,10 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) { - int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0; + int client_id = HW_FENCE_CLIENT_ID_CTL0; u64 mask; - for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) { + for (; client_id <= HW_FENCE_CLIENT_ID_VAL6; client_id++) { mask = 1 << client_id; if (mask & db_flags) { HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags); @@ -946,54 +877,6 @@ int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data) return ret; } -static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id, - void **iomem_ptr, uint32_t *iomem_size) -{ - u32 reg_config[2]; - void __iomem *ptr; - char name[30] = {0}; - int ret; - - snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2); - if (ret) - return 0; /* this is an optional property */ - - /* Mmap registers */ - ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]); - if (!ptr) { - HWFNC_ERR("failed to ioremap %s reg\n", name); - return -ENOMEM; - } - - *iomem_ptr = ptr; - *iomem_size = reg_config[1]; - - HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n", - ctl_id, name, reg_config[0], reg_config[1], ptr); - - return 0; -} - -int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data) -{ - u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0; - - for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) { - if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id], - &drv_data->ctl_start_size[ctl_id])) { - HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id); - } else { - if (drv_data->ctl_start_ptr[ctl_id]) - HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n", - ctl_id, drv_data->ctl_start_ptr[ctl_id], - drv_data->ctl_start_size[ctl_id]); - } - } - - return 0; -} - enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data, enum hw_fence_client_id client_id) { From 26eb7e4268341a414042a2493ba53f0a220f8614 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 14 Mar 2023 16:40:55 -0700 Subject: [PATCH 64/77] mm-drivers: hw_fence: remove destroy fence and fence array ioctls Remove ioctls to destroy fence and fence array. Existing implementation fails to close file descriptors properly, and closing fds must be done by caller, not by HW Fence driver. Change-Id: I7a84c87475144f3e8a90acf44b7cf8678b6cc2dd Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 70 ----------------------------------- 1 file changed, 70 deletions(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 456732d0db..5fa02ef489 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -22,12 +22,8 @@ #define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long) #define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\ struct hw_fence_sync_create_data) -#define HW_SYNC_IOC_DESTROY_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 13,\ - struct hw_fence_sync_create_data) #define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\ struct hw_fence_array_sync_create_data) -#define HW_SYNC_IOC_DESTROY_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 15,\ - struct hw_fence_array_sync_create_data) #define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int) #define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long) #define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int) @@ -317,35 +313,6 @@ exit: return ret; } -static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long arg) -{ - int fd; - struct hw_dma_fence *fence; - struct hw_fence_sync_create_data data; - - if (!_is_valid_client(obj)) - return -EINVAL; - - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) - return -EFAULT; - - fd = data.fence; - fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd); - - if (!fence) { - HWFNC_ERR("fence for fd:%d not found\n", fd); - return -EINVAL; - } - - /* Decrement the refcount that hw_sync_get_fence increments */ - dma_fence_put(&fence->base); - - /* To destroy fence */ - dma_fence_put(&fence->base); - - return 0; -} - static void _put_child_fences(int i, struct dma_fence **fences) { int fence_idx; @@ -448,41 +415,6 @@ exit: return ret; } -static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned long arg) -{ - struct dma_fence_array *fence_array; - struct dma_fence *fence; - struct hw_fence_array_sync_create_data data; - int fd; - - if (!_is_valid_client(obj)) - return -EINVAL; - - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) - return -EFAULT; - - fd = data.fence_array_fd; - fence = (struct dma_fence *)_hw_sync_get_fence(fd); - if (!fence) { - HWFNC_ERR("Invalid fence fd: %d\n", fd); - return -EINVAL; - } - - /* Decrement the refcount that hw_sync_get_fence increments */ - dma_fence_put(fence); - - fence_array = to_dma_fence_array(fence); - if (!fence_array) { - HWFNC_ERR("Invalid fence array fd: %d\n", fd); - return -EINVAL; - } - - /* Destroy fence array */ - dma_fence_put(&fence_array->base); - - return 0; -} - /* * this IOCTL only supports receiving one fence as input-parameter, which can be * either a "dma_fence" or a "dma_fence_array", but eventually we would expand @@ -658,9 +590,7 @@ static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = { HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client), HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client), HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence), - HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE, hw_sync_ioctl_destroy_fence), HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array), - HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE_ARRAY, hw_sync_ioctl_destroy_fence_array), HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait), HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal), HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait), From 00044be1d41e08ea1763856b8a51b59df1d988e4 Mon Sep 17 00:00:00 2001 From: Grace An Date: Mon, 20 Mar 2023 16:37:35 -0700 Subject: [PATCH 65/77] mm-drivers: hw_fence: allow synx create and import with more flags Synx clients may specify additional flags (e.g. flags to specify a global fence) when creating or importing hwfence-backed synx objects. Ensure HW Fence Driver support of these flags. Change-Id: I38d94875be09da3506d3939077099c05fa9235f6 Signed-off-by: Grace An --- hw_fence/src/msm_hw_fence_synx_translation.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hw_fence/src/msm_hw_fence_synx_translation.c b/hw_fence/src/msm_hw_fence_synx_translation.c index f35bfcd488..6970eb4d60 100644 --- a/hw_fence/src/msm_hw_fence_synx_translation.c +++ b/hw_fence/src/msm_hw_fence_synx_translation.c @@ -169,7 +169,9 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params return -SYNX_INVALID; } - if (IS_ERR_OR_NULL(params->h_synx) || (params->flags != SYNX_CREATE_DMA_FENCE) || + if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) || + !(params->flags & SYNX_CREATE_DMA_FENCE) || + (params->flags & SYNX_CREATE_CSL_FENCE) || IS_ERR_OR_NULL(params->fence)) { HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n", session->type, params->h_synx, params->flags, params->fence); @@ -259,7 +261,8 @@ static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) || - (params->flags != SYNX_IMPORT_DMA_FENCE) || IS_ERR_OR_NULL(params->fence)) { + !(params->flags & SYNX_IMPORT_DMA_FENCE) || + (params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) { HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n", client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx, IS_ERR_OR_NULL(params) ? 0 : params->flags, From fc0379a5784c4ce5ddf8ec1899ccd9290864d01a Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 7 Mar 2023 14:00:49 -0800 Subject: [PATCH 66/77] mm-drivers: hw_fence: Rename RM APIs As we are merging upstream patches, resolve conflicts of namespaces in downstream modules. Change-Id: I2aa4b0f2cea859cddd2fb537fce7a6908999e7d4 Signed-off-by: Prakruthi Deepak Heragu Signed-off-by: Grace An --- hw_fence/src/hw_fence_drv_utils.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index bca3b959c5..c615adda29 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -314,8 +314,13 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, sgl->sgl_entries[0].ipa_base = drv_data->res.start; sgl->sgl_entries[0].size = resource_size(&drv_data->res); +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, + acl, sgl, NULL, &drv_data->memparcel); +#else ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label, acl, sgl, NULL, &drv_data->memparcel); +#endif if (ret) { HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n", __func__, drv_data->res.start, drv_data->size, ret); @@ -350,11 +355,19 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET) goto end; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + if (ghd_rm_get_vmid(drv_data->peer_name, &peer_vmid)) + goto end; + + if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) + goto end; +#else if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid)) goto end; if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid)) goto end; +#endif if (peer_vmid != vm_status_payload->vmid) goto end; From cda6ac87d1b793e98add774c83468713b1780834 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 10 Feb 2023 17:26:22 -0800 Subject: [PATCH 67/77] mm-drivers: hw_fence: add support to read hw fence ctl events Add support to read hw fence ctl events through debugfs node from the carved out memory region shared with Fence Controller. Change-Id: I508695efcb8c7aa8fab9db2086af1ec1ff0ddd84 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 26 +++++ hw_fence/include/hw_fence_drv_utils.h | 4 +- hw_fence/src/hw_fence_drv_debug.c | 135 ++++++++++++++++++++++++++ hw_fence/src/hw_fence_drv_priv.c | 26 +++++ hw_fence/src/hw_fence_drv_utils.c | 26 +++++ 5 files changed, 216 insertions(+), 1 deletion(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index f934017749..359347b171 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -72,6 +72,12 @@ */ #define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF)) +/** + * HW_FENCE_EVENT_MAX_DATA: + * Maximum data that can be added to the debug event + */ +#define HW_FENCE_EVENT_MAX_DATA 12 + enum hw_fence_lookup_ops { HW_FENCE_LOOKUP_OP_CREATE = 0x1, HW_FENCE_LOOKUP_OP_DESTROY, @@ -265,6 +271,8 @@ struct hw_fence_client_queue_desc { * @clients_num: number of supported hw fence clients (configured based on device-tree) * @hw_fences_tbl: pointer to the hw-fences table * @hw_fences_tbl_cnt: number of elements in the hw-fence table + * @events: start address of hw fence debug events + * @total_events: total number of hw fence debug events supported * @client_lock_tbl: pointer to the per-client locks table * @client_lock_tbl_cnt: number of elements in the locks table * @hw_fences_mem_desc: memory descriptor for the hw-fence table @@ -320,6 +328,10 @@ struct hw_fence_driver_data { struct msm_hw_fence *hw_fences_tbl; u32 hw_fences_tbl_cnt; + /* events */ + struct msm_hw_fence_event *events; + u32 total_events; + /* Table with a Per-Client Lock */ u64 *client_lock_tbl; u32 client_lock_tbl_cnt; @@ -408,6 +420,20 @@ struct msm_hw_fence_queue_payload { u32 reserve; }; +/** + * struct msm_hw_fence_event - hardware fence ctl debug event + * time: qtime when the event is logged + * cpu: cpu id where the event is logged + * data_cnt: count of valid data available in the data field + * data: debug data logged by the event + */ +struct msm_hw_fence_event { + u64 time; + u32 cpu; + u32 data_cnt; + u32 data[HW_FENCE_EVENT_MAX_DATA]; +}; + /** * struct msm_hw_fence - structure holding each hw fence data. * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 9063385a23..43871ee571 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -30,12 +30,14 @@ * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region. * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table. * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues. + * HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events */ enum hw_fence_mem_reserve { HW_FENCE_MEM_RESERVE_CTRL_QUEUE, HW_FENCE_MEM_RESERVE_LOCKS_REGION, HW_FENCE_MEM_RESERVE_TABLE, - HW_FENCE_MEM_RESERVE_CLIENT_QUEUE + HW_FENCE_MEM_RESERVE_CLIENT_QUEUE, + HW_FENCE_MEM_RESERVE_EVENTS_BUFF }; /** diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 46eb3e4abe..28fb37cccc 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -14,6 +14,11 @@ #define HW_FENCE_DEBUG_MAX_LOOPS 200 +/* event dump data includes one "32-bit" element + "|" separator */ +#define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9) + +#define HFENCE_EVT_MSG "[%d][cpu:%d][%lu] data[%d]:%s\n" + u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK; /** @@ -540,6 +545,129 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 return len; } +static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int len, int max_size, + u32 index) +{ + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + u32 data_cnt; + int i, tmp_len = 0, ret = 0; + + if (!event->time) + return 0; + + memset(&data, 0, sizeof(data)); + if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) { + HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n", + index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA); + data_cnt = HW_FENCE_EVENT_MAX_DATA; + } else { + data_cnt = event->data_cnt; + } + + for (i = 0; i < data_cnt; i++) + tmp_len += scnprintf(data + tmp_len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - tmp_len, + "%lx|", event->data[i]); + + ret = scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, event->cpu, event->time, + event->data_cnt, data); + + HWFNC_DBG_INFO(HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); + + return ret; +} + +/** + * hw_fence_dbg_dump_events_rd() - debugfs read to dump the fctl events. + * @file: file handler. + * @user_buf: user buffer content for debugfs. + * @user_buf_size: size of the user buffer. + * @ppos: position offset of the user buffer. + */ +static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_buf, + size_t user_buf_size, loff_t *ppos) +{ + struct hw_fence_driver_data *drv_data; + u32 entry_size = sizeof(struct msm_hw_fence_event), max_size = SZ_4K; + char *buf = NULL; + int len = 0; + static u64 start_time; + static int index, start_index; + static bool wraparound; + + if (!file || !file->private_data) { + HWFNC_ERR("unexpected data %d\n", file); + return -EINVAL; + } + drv_data = file->private_data; + + if (!drv_data->events) { + HWFNC_ERR("events not supported\n"); + return -EINVAL; + } + + if (wraparound && index >= start_index) { + HWFNC_DBG_H("no more data index:%d total_events:%d\n", index, + drv_data->total_events); + start_time = 0; + index = 0; + wraparound = false; + return 0; + } + + if (user_buf_size < entry_size) { + HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size, + entry_size); + return -EINVAL; + } + + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* find index of earliest event */ + if (!start_time) { + mb(); /* make sure data is ready before read */ + for (index = 0; index < drv_data->total_events; index++) { + u64 time = drv_data->events[index].time; + + if (time && (!start_time || time < start_time)) { + start_time = time; + start_index = index; + } + } + index = start_index; + HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n", + drv_data->events, start_index, start_time, drv_data->total_events); + } + + HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); + while ((!wraparound || index < start_index) && len < (max_size - entry_size)) { + len += _dump_event(&drv_data->events[index], buf, len, max_size, index); + index++; + if (index >= drv_data->total_events) { + index = 0; + wraparound = true; + } + } + HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); + + if (len <= 0 || len > user_buf_size) { + HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size); + len = 0; + goto exit; + } + + if (copy_to_user(user_buf, buf, len)) { + HWFNC_ERR("failed to copy to user!\n"); + len = -EFAULT; + goto exit; + } + *ppos += len; +exit: + kfree(buf); + return len; +} + /** * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. * @file: file handler. @@ -955,6 +1083,11 @@ static const struct file_operations hw_fence_dump_queues_fops = { .write = hw_fence_dbg_dump_queues_wr, }; +static const struct file_operations hw_fence_dump_events_fops = { + .open = simple_open, + .read = hw_fence_dbg_dump_events_rd, +}; + static const struct file_operations hw_fence_create_join_fence_fops = { .open = simple_open, .write = hw_fence_dbg_create_join_fence, @@ -1004,6 +1137,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data) debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops); debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root, &drv_data->debugfs_data.lock_wake_cnt); + debugfs_create_file("hw_fence_dump_events", 0600, debugfs_root, drv_data, + &hw_fence_dump_events_fops); return 0; } diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index e59162b1f5..8fba461b66 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -502,6 +502,27 @@ static int init_hw_fences_table(struct hw_fence_driver_data *drv_data) return 0; } +static int init_hw_fences_events(struct hw_fence_driver_data *drv_data) +{ + phys_addr_t phys; + void *ptr; + u32 size; + int ret; + + ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_EVENTS_BUFF, &phys, &ptr, + &size, 0); + if (ret) { + HWFNC_DBG_INFO("Failed to reserve events buffer %d\n", ret); + return -ENOMEM; + } + drv_data->events = (struct msm_hw_fence_event *)ptr; + drv_data->total_events = size / sizeof(struct msm_hw_fence_event); + HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%u total_size:%u\n", drv_data->events, + drv_data->total_events, sizeof(struct msm_hw_fence_event), size); + + return 0; +} + static int init_ctrl_queue(struct hw_fence_driver_data *drv_data) { struct msm_hw_fence_mem_addr *mem_descriptor; @@ -553,6 +574,11 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data) if (ret) goto exit; + /* Initialize event log */ + ret = init_hw_fences_events(drv_data); + if (ret) + HWFNC_DBG_INFO("Unable to init events\n"); + /* Map ipcc registers */ ret = hw_fence_utils_map_ipcc(drv_data); if (ret) { diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index c615adda29..98033e06ee 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -56,6 +56,12 @@ */ #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff +/** + * HW_FENCE_MAX_EVENTS: + * Maximum number of HW Fence debug events + */ +#define HW_FENCE_MAX_EVENTS 1000 + /** * struct hw_fence_client_types - Table describing all supported client types, used to parse * device-tree properties related to client queue size. @@ -472,6 +478,8 @@ char *_get_mem_reserve_type(enum hw_fence_mem_reserve type) return "HW_FENCE_MEM_RESERVE_TABLE"; case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE"; + case HW_FENCE_MEM_RESERVE_EVENTS_BUFF: + return "HW_FENCE_MEM_RESERVE_EVENTS_BUFF"; } return "Unknown"; @@ -483,6 +491,8 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, { int ret = 0; u32 start_offset = 0; + u32 remaining_size_bytes; + u32 total_events; switch (type) { case HW_FENCE_MEM_RESERVE_CTRL_QUEUE: @@ -512,6 +522,22 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data, start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset; *size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size; break; + case HW_FENCE_MEM_RESERVE_EVENTS_BUFF: + start_offset = drv_data->used_mem_size; + remaining_size_bytes = drv_data->size - start_offset; + if (start_offset >= drv_data->size || + remaining_size_bytes < sizeof(struct msm_hw_fence_event)) { + HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%lu evt_sz:%lu\n", + drv_data->size, start_offset, sizeof(struct msm_hw_fence_event)); + ret = -ENOMEM; + goto exit; + } + + total_events = remaining_size_bytes / sizeof(struct msm_hw_fence_event); + if (total_events > HW_FENCE_MAX_EVENTS) + total_events = HW_FENCE_MAX_EVENTS; + *size = total_events * sizeof(struct msm_hw_fence_event); + break; default: HWFNC_ERR("Invalid mem reserve type:%d\n", type); ret = -EINVAL; From 6624a5c4539fc5143987ae4b70163eec26f93fe6 Mon Sep 17 00:00:00 2001 From: Varsha Suresh Date: Wed, 7 Dec 2022 11:19:11 -0800 Subject: [PATCH 68/77] mm-drivers: disp: Add support for Bazel build system - add support to build mm-drivers module using DDK framework for pineapple - add macro that makes it easy to register new modules Change-Id: I704bbe946f4d1053a85bfb122408c201b0f155b2 Signed-off-by: Varsha Suresh --- BUILD.bazel | 36 ++++++++++++++++ mm_module_build.bzl | 103 ++++++++++++++++++++++++++++++++++++++++++++ mm_modules.bzl | 44 +++++++++++++++++++ target.bzl | 16 +++++++ 4 files changed, 199 insertions(+) create mode 100644 BUILD.bazel create mode 100644 mm_module_build.bzl create mode 100644 mm_modules.bzl create mode 100644 target.bzl diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 0000000000..77944804ce --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,36 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") + +package( + default_visibility = [ + "//visibility:public"], +) + +ddk_headers( + name = "mm_drivers_configs", + hdrs = glob([ + "config/*.h"]), + includes = ["config"] +) + +ddk_headers( + name = "hw_fence_headers", + hdrs = glob([ + "hw_fence/include/*.h"]), + includes = ["hw_fence/include"] +) + +ddk_headers( + name = "sync_fence_uapi_headers", + hdrs = glob([ + "sync_fence/include/uapi/sync_fence/*.h", + "sync_fence/include/*.h"]), + includes = ["sync_fence/include"] +) + +ddk_headers( + name = "mm_drivers_headers", + hdrs = [":mm_drivers_configs", ":hw_fence_headers", ":sync_fence_uapi_headers"] +) + +load(":target.bzl", "define_pineapple") +define_pineapple() \ No newline at end of file diff --git a/mm_module_build.bzl b/mm_module_build.bzl new file mode 100644 index 0000000000..dc708705b5 --- /dev/null +++ b/mm_module_build.bzl @@ -0,0 +1,103 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module","ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps): + processed_config_srcs = {} + + for config_src_name in config_srcs: + config_src = config_srcs[config_src_name] + + if type(config_src) == "list": + processed_config_srcs[config_src_name] = {True: config_src} + else: + processed_config_srcs[config_src_name] = config_src + + module = struct( + name = name, + path = path, + srcs = srcs, + config_srcs = processed_config_srcs, + config_option = config_option, + deps = deps, + ) + + module_map[name] = module + +def _get_config_choices(map, options): + choices = [] + for option in map: + choices.extend(map[option].get(option in options,[])) + return choices + +def _get_kernel_build_options(modules, config_options): + all_options = {option: True for option in config_options} + all_options = all_options | {module.config_option: True for module in modules if module.config_option} + return all_options + +def _get_kernel_build_module_srcs(module, options, formatter): + srcs = module.srcs + _get_config_choices(module.config_srcs, options) + print("-",module.name,",",module.config_option,",srcs =",srcs) + module_path = "{}/".format(module.path) if module.path else "" + return ["{}{}".format(module_path, formatter(src)) for src in srcs] + +def _get_kernel_build_module_deps(module, options, formatter): + return [formatter(dep) for dep in module.deps] + +def mm_driver_module_entry(hdrs = []): + module_map = {} + + def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps =[]): + _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps) + return struct( + register = register, + get = module_map.get, + hdrs = hdrs, + module_map = module_map + ) + +def define_target_variant_modules(target, variant, registry, modules, config_options = []): + kernel_build = "{}_{}".format(target, variant) + kernel_build_label = "//msm-kernel:{}".format(kernel_build) + modules = [registry.get(module_name) for module_name in modules] + options = _get_kernel_build_options(modules, config_options) + build_print = lambda message : print("{}: {}".format(kernel_build, message)) + formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target) + headers = ["//msm-kernel:all_headers"] + registry.hdrs + all_module_rules = [] + + for module in modules: + rule_name = "{}_{}".format(kernel_build, module.name) + module_srcs = _get_kernel_build_module_srcs(module, options, formatter) + + if not module_srcs: + continue + + ddk_submodule( + name = rule_name, + srcs = module_srcs, + out = "{}.ko".format(module.name), + deps = headers + _get_kernel_build_module_deps(module, options, formatter), + local_defines = options.keys(), + ) + all_module_rules.append(rule_name) + + ddk_module( + name = "{}_mm_drivers".format(kernel_build), + kernel_build = kernel_build_label, + deps = all_module_rules, + ) + copy_to_dist_dir( + name = "{}_mm_drivers_dist".format(kernel_build), + data = [":{}_mm_drivers".format(kernel_build)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_consolidate_gki_modules(target, registry, modules, config_options = []): + for (targets, variant) in get_all_variants(): + define_target_variant_modules(targets, variant, registry, modules, config_options) \ No newline at end of file diff --git a/mm_modules.bzl b/mm_modules.bzl new file mode 100644 index 0000000000..ef8b175e79 --- /dev/null +++ b/mm_modules.bzl @@ -0,0 +1,44 @@ +load(":mm_module_build.bzl", "mm_driver_module_entry") + +HW_FENCE_PATH = "hw_fence" +MSM_EXT_DISPLAY_PATH = "msm_ext_display" +SYNC_FENCE_PATH = "sync_fence" + +mm_driver_modules = mm_driver_module_entry([":mm_drivers_headers"]) +module_entry = mm_driver_modules.register + +#--------------- MM-DRIVERS MODULES ------------------ + +module_entry( + name = "hw_fence", + path = HW_FENCE_PATH + "/src", + config_option = "CONFIG_QTI_HW_FENCE", + config_srcs = { + "CONFIG_DEBUG_FS" : [ + "hw_fence_ioctl.c", + ] + }, + srcs = ["hw_fence_drv_debug.c", + "hw_fence_drv_ipc.c", + "hw_fence_drv_priv.c", + "hw_fence_drv_utils.c", + "msm_hw_fence.c", + "msm_hw_fence_synx_translation.c"], + deps =[ + "//vendor/qcom/opensource/synx-kernel:synx_headers" + ] +) + +module_entry( + name = "msm_ext_display", + path = MSM_EXT_DISPLAY_PATH + "/src", + config_option = "CONFIG_MSM_EXT_DISPLAY", + srcs = ["msm_ext_display.c"], +) + +module_entry( + name = "sync_fence", + path = SYNC_FENCE_PATH + "/src", + config_option = "CONFIG_QCOM_SPEC_SYNC", + srcs = ["qcom_sync_file.c"], +) \ No newline at end of file diff --git a/target.bzl b/target.bzl new file mode 100644 index 0000000000..6d63bab3c8 --- /dev/null +++ b/target.bzl @@ -0,0 +1,16 @@ +load(":mm_modules.bzl", "mm_driver_modules") +load(":mm_module_build.bzl", "define_consolidate_gki_modules") + +def define_pineapple(): + define_consolidate_gki_modules( + target = "pineapple", + registry = mm_driver_modules, + modules = [ + "hw_fence", + "msm_ext_display", + "sync_fence", + ], + config_options = [ + "CONFIG_DEBUG_FS", + ], +) \ No newline at end of file From 6db4e6a849902c15e62b72675b201eb2a226756d Mon Sep 17 00:00:00 2001 From: Ingrid Gallardo Date: Mon, 10 Apr 2023 16:34:17 -0700 Subject: [PATCH 69/77] mm-drivers: hw_fence: resolve compilation errors for kalama Fix compilation errors for kalama target, where cpusys vm share memory driver is not present. Change-Id: I4f7762ad747490ba166f8e9ae27dd0191de3f021 Signed-off-by: Ingrid Gallardo --- hw_fence/src/hw_fence_drv_utils.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9162cc7eea..9e8629b428 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -10,7 +10,9 @@ #include #include #include +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) #include +#endif #include #include "hw_fence_drv_priv.h" @@ -343,6 +345,15 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data, return ret; } +static int _is_mem_shared(struct resource *res) +{ +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + return gh_cpusys_vm_get_share_mem_info(res); +#else + return -EINVAL; +#endif +} + static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data) { struct gh_rm_notif_vm_status_payload *vm_status_payload; @@ -383,7 +394,7 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da switch (vm_status_payload->vm_status) { case GH_RM_VM_STATUS_READY: - ret = gh_cpusys_vm_get_share_mem_info(&res); + ret = _is_mem_shared(&res); if (ret) { HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret); if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid)) From 2f76940f774c193f8cf52ea69ce0897f28d46fe5 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 11 Apr 2023 13:42:27 -0700 Subject: [PATCH 70/77] mm-drivers: hw_fence: add check for invalid client_id param in ioctl Current implementation allows ioctl to deregister hw-fence client with client_id that does not match hw_sync_obj. This can cause a double-free if user-space deregisters the wrong file descriptor by mistake. Instead, fail the ioctl early if it has these invalid parameters. Change-Id: Ib781be18d2f71c24d6aa4fc08eeba44649da13da Signed-off-by: Grace An --- hw_fence/src/hw_fence_ioctl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hw_fence/src/hw_fence_ioctl.c b/hw_fence/src/hw_fence_ioctl.c index 5fa02ef489..d7eab54fee 100644 --- a/hw_fence/src/hw_fence_ioctl.c +++ b/hw_fence/src/hw_fence_ioctl.c @@ -213,8 +213,13 @@ static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long ar { int client_id = _get_client_id(obj, arg); - if (IS_ERR(&client_id)) + if (IS_ERR(&client_id)) { return client_id; + } else if (client_id != obj->client_id) { + HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n", + obj->client_id, client_id); + return -EINVAL; + } return msm_hw_fence_deregister(obj->client_handle); } From 2a557e339bf966d818de74d55ead4a3b8a65f7b9 Mon Sep 17 00:00:00 2001 From: Grace An Date: Thu, 2 Mar 2023 12:41:51 -0800 Subject: [PATCH 71/77] mm-drivers: hw_fence: add interface to dump debug data Add interfaces to support dumping debug data. These interfaces should be used by drivers in case of a hw-fence error detected. Change-Id: Iab46c8e9dea8ffead06f192c8d01182912fffcce Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_debug.h | 12 +- hw_fence/src/hw_fence_drv_debug.c | 276 ++++++++++++++++++-------- hw_fence/src/msm_hw_fence.c | 69 +++++++ 3 files changed, 272 insertions(+), 85 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_debug.h b/hw_fence/include/hw_fence_drv_debug.h index a1d66e0cdd..b6f6f14e19 100644 --- a/hw_fence/include/hw_fence_drv_debug.h +++ b/hw_fence/include/hw_fence_drv_debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HW_FENCE_DRV_DEBUG @@ -60,6 +60,9 @@ extern u32 msm_hw_fence_debug_level; #define HWFNC_DBG_LOCK(fmt, ...) \ dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__) +#define HWFNC_DBG_DUMP(prio, fmt, ...) \ + dprintk(prio, "[hwfence:%s:%d][dbgd]"fmt, __func__, __LINE__, ##__VA_ARGS__) + #define HWFNC_WARN(fmt, ...) \ pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \ __builtin_return_address(0), ##__VA_ARGS__) @@ -70,6 +73,13 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data); int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id); +void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client); +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count); +void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); +void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data); + extern const struct file_operations hw_sync_debugfs_fops; struct hw_fence_out_clients_map { diff --git a/hw_fence/src/hw_fence_drv_debug.c b/hw_fence/src/hw_fence_drv_debug.c index 28fb37cccc..405d0c0681 100644 --- a/hw_fence/src/hw_fence_drv_debug.c +++ b/hw_fence/src/hw_fence_drv_debug.c @@ -14,6 +14,14 @@ #define HW_FENCE_DEBUG_MAX_LOOPS 200 +#define HFENCE_TBL_MSG \ + "[%d]hfence[%d] v:%d err:%lu ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d" \ + "%s ct:%llu tt:%llu wt:%llu\n" + +/* each hwfence parent includes one "32-bit" element + "," separator */ +#define HW_FENCE_MAX_PARENTS_SUBLIST_DUMP (MSM_HW_FENCE_MAX_JOIN_PARENTS * 9) +#define HW_FENCE_MAX_PARENTS_DUMP (sizeof("parent_list[] ") + HW_FENCE_MAX_PARENTS_SUBLIST_DUMP) + /* event dump data includes one "32-bit" element + "|" separator */ #define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9) @@ -473,29 +481,82 @@ static ssize_t hw_fence_dbg_create_wr(struct file *file, return count; } -#define HFENCE_TBL_MSG \ - "[%d]hfence[%d] v:%d err:%d ctx:%d seqno:%d wait:0x%llx alloc:%d f:0x%lx tt:%llu wt:%llu\n" +static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, + char *parents_dump, u64 hash, u32 count) +{ + char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP]; + u32 parents_cnt; + int i, len = 0; + + if (!hw_fence || !parents_dump) { + HWFNC_ERR("invalid params hw_fence:0x%pK parents_dump:0x%pK\n", hw_fence, + parents_dump); + return; + } + + memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP); + if (hw_fence->parents_cnt) { + if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) { + HWFNC_ERR("hfence[%d] has invalid parents_cnt:%d greater than max:%d\n", + hash, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS); + parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS; + } else { + parents_cnt = hw_fence->parents_cnt; + } + + memset(sublist, 0, sizeof(sublist)); + for (i = 0; i < parents_cnt; i++) + len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len, + "%lu,", hw_fence->parent_list[i]); + scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist); + } + + HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG, + count, hash, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time); +} + +void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash, + u32 count) +{ + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + return _dump_fence_helper(prio, hw_fence, parents_dump, hash, count); +} static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size, u32 index, u32 cnt) { int ret; + char parents_dump[HW_FENCE_MAX_PARENTS_DUMP]; + + _dump_fence_helper(HW_FENCE_INFO, hw_fence, parents_dump, index, cnt); ret = scnprintf(buf + len, max_size - len, HFENCE_TBL_MSG, - cnt, index, hw_fence->valid, hw_fence->error, - hw_fence->ctx_id, hw_fence->seq_id, - hw_fence->wait_client_mask, hw_fence->fence_allocator, - hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); - - HWFNC_DBG_L(HFENCE_TBL_MSG, - cnt, index, hw_fence->valid, hw_fence->error, - hw_fence->ctx_id, hw_fence->seq_id, - hw_fence->wait_client_mask, hw_fence->fence_allocator, - hw_fence->flags, hw_fence->fence_trigger_time, hw_fence->fence_wait_time); + cnt, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id, + hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags, + hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time, + hw_fence->fence_trigger_time, hw_fence->fence_wait_time); return ret; } +void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data) +{ + u32 i, cnt = 0; + struct msm_hw_fence *hw_fence; + + for (i = 0; i < drv_data->hw_fences_tbl_cnt; i++) { + hw_fence = &drv_data->hw_fences_tbl[i]; + if (!hw_fence->valid) + continue; + hw_fence_debug_dump_fence(prio, hw_fence, i, cnt); + cnt++; + } +} + static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u32 *index, int max_size) { @@ -545,17 +606,40 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 return len; } -static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int len, int max_size, - u32 index) +static void _find_earliest_event(struct hw_fence_driver_data *drv_data, u32 *start_index, + u64 *start_time) +{ + u32 i; + + if (!start_index || !start_time) { + HWFNC_ERR("invalid params start_index:0x%pK start_time:0x%pK\n", start_index, + start_time); + return; + } + + mb(); /* make sure data is ready before read */ + for (i = 0; i < drv_data->total_events; i++) { + u64 time = drv_data->events[i].time; + + if (time && (!*start_time || time < *start_time)) { + *start_time = time; + *start_index = i; + } + } +} + +static void _dump_event(enum hw_fence_drv_prio prio, struct msm_hw_fence_event *event, + char *data, u32 index) { - char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; u32 data_cnt; - int i, tmp_len = 0, ret = 0; + int i, len = 0; - if (!event->time) - return 0; + if (!event || !data) { + HWFNC_ERR("invalid params event:0x%pK data:0x%pK\n", event, data); + return; + } - memset(&data, 0, sizeof(data)); + memset(data, 0, sizeof(char) * HW_FENCE_MAX_DATA_PER_EVENT_DUMP); if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) { HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n", index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA); @@ -565,15 +649,29 @@ static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int l } for (i = 0; i < data_cnt; i++) - tmp_len += scnprintf(data + tmp_len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - tmp_len, + len += scnprintf(data + len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - len, "%lx|", event->data[i]); - ret = scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, event->cpu, event->time, - event->data_cnt, data); + HWFNC_DBG_DUMP(prio, HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); +} - HWFNC_DBG_INFO(HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data); +void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data) +{ + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + u32 start_index; + u64 start_time; + int i; - return ret; + if (!drv_data->events) { + HWFNC_ERR("events not supported\n"); + return; + } + + _find_earliest_event(drv_data, &start_index, &start_time); + for (i = start_index; i < drv_data->total_events && drv_data->events[i].time; i++) + _dump_event(prio, &drv_data->events[i], data, i); + for (i = 0; i < start_index; i++) + _dump_event(prio, &drv_data->events[i], data, i); } /** @@ -626,15 +724,7 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ /* find index of earliest event */ if (!start_time) { - mb(); /* make sure data is ready before read */ - for (index = 0; index < drv_data->total_events; index++) { - u64 time = drv_data->events[index].time; - - if (time && (!start_time || time < start_time)) { - start_time = time; - start_index = index; - } - } + _find_earliest_event(drv_data, &start_index, &start_time); index = start_index; HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n", drv_data->events, start_index, start_time, drv_data->total_events); @@ -642,7 +732,15 @@ static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_ HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data)); while ((!wraparound || index < start_index) && len < (max_size - entry_size)) { - len += _dump_event(&drv_data->events[index], buf, len, max_size, index); + char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP]; + + if (drv_data->events[index].time) { + _dump_event(HW_FENCE_INFO, &drv_data->events[index], data, index); + len += scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, + drv_data->events[index].cpu, drv_data->events[index].time, + drv_data->events[index].data_cnt, data); + } + index++; if (index >= drv_data->total_events) { index = 0; @@ -668,6 +766,63 @@ exit: return len; } +static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client, + int queue_type) +{ + struct msm_hw_fence_queue *queue; + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue_payload *payload; + u64 timestamp; + u32 *read_ptr, queue_entries; + int i; + + queue = &hw_fence_client->queues[queue_type - 1]; + + if ((queue_type > hw_fence_client->queues_num) || !queue || !queue->va_header + || !queue->va_queue) { + HWFNC_ERR("Cannot dump client:%d q_type:%s q_ptr:0x%pK q_header:0x%pK q_va:0x%pK\n", + hw_fence_client->client_id, + (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", + queue, queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL); + return; + } + hfi_header = (struct msm_hw_fence_hfi_queue_header *)queue->va_header; + + mb(); /* make sure data is ready before read */ + HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%lu wr_idx:%lu tx_wm:%lu q_size_bytes:%lu\n", + (queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", queue->va_queue, + hfi_header->read_index, hfi_header->write_index, hfi_header->tx_wm, + queue->q_size_bytes); + queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD; + + for (i = 0; i < queue_entries; i++) { + read_ptr = ((u32 *)queue->va_queue + + (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); + payload = (struct msm_hw_fence_queue_payload *)read_ptr; + timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32); + + HWFNC_DBG_DUMP(prio, + "%s[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", + (queue_type == HW_FENCE_TX_QUEUE) ? "tx" : "rx", i, payload->hash, + payload->ctxt_id, payload->seqno, payload->flags, payload->client_data, + payload->error, timestamp); + } +} + +void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio, + struct msm_hw_fence_client *hw_fence_client) +{ + if (!hw_fence_client) { + HWFNC_ERR("Invalid params client:0x%pK\n", hw_fence_client); + return; + } + + HWFNC_DBG_DUMP(prio, "Queues for client %d\n", hw_fence_client->client_id); + if (hw_fence_client->queues_num == HW_FENCE_CLIENT_QUEUES) + _dump_queue(prio, hw_fence_client, HW_FENCE_RX_QUEUE); + _dump_queue(prio, hw_fence_client, HW_FENCE_TX_QUEUE); +} + /** * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues. * @file: file handler. @@ -682,12 +837,7 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user size_t count, loff_t *ppos) { struct hw_fence_driver_data *drv_data; - struct msm_hw_fence_queue *rx_queue; - struct msm_hw_fence_queue *tx_queue; - u64 hash, ctx_id, seqno, timestamp, flags, client_data; - u32 *read_ptr, error; - int client_id, i; - struct msm_hw_fence_queue_payload *read_ptr_payload; + int client_id; if (!file || !file->private_data) { HWFNC_ERR("unexpected data %d\n", file); @@ -699,53 +849,11 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user if (client_id < 0) return -EINVAL; - if (!drv_data->clients[client_id] || - IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]) || - IS_ERR_OR_NULL(&drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1])) { + if (!drv_data->clients[client_id]) { HWFNC_ERR("client %d not initialized\n", client_id); return -EINVAL; } - - HWFNC_DBG_L("Queues for client %d\n", client_id); - - rx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_RX_QUEUE - 1]; - tx_queue = &drv_data->clients[client_id]->queues[HW_FENCE_TX_QUEUE - 1]; - - HWFNC_DBG_L("-------RX QUEUE------\n"); - for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { - read_ptr = ((u32 *)rx_queue->va_queue + - (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); - read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; - - ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); - seqno = readq_relaxed(&read_ptr_payload->seqno); - hash = readq_relaxed(&read_ptr_payload->hash); - flags = readq_relaxed(&read_ptr_payload->flags); - client_data = readq_relaxed(&read_ptr_payload->client_data); - error = readl_relaxed(&read_ptr_payload->error); - timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | - ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - - HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n", - i, hash, ctx_id, seqno, flags, client_data, error, timestamp); - } - - HWFNC_DBG_L("-------TX QUEUE------\n"); - for (i = 0; i < drv_data->hw_fence_queue_entries; i++) { - read_ptr = ((u32 *)tx_queue->va_queue + - (i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32)))); - read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr; - - ctx_id = readq_relaxed(&read_ptr_payload->ctxt_id); - seqno = readq_relaxed(&read_ptr_payload->seqno); - hash = readq_relaxed(&read_ptr_payload->hash); - flags = readq_relaxed(&read_ptr_payload->flags); - error = readl_relaxed(&read_ptr_payload->error); - timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) | - ((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32); - HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n", - i, hash, ctx_id, seqno, flags, error, timestamp); - } + hw_fence_debug_dump_queues(HW_FENCE_PRINTK, drv_data->clients[client_id]); return count; } diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 82ee33bdaa..f16e74b448 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -476,6 +476,75 @@ int msm_hw_fence_trigger_signal(void *client_handle, } EXPORT_SYMBOL(msm_hw_fence_trigger_signal); +#if IS_ENABLED(CONFIG_DEBUG_FS) +int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) +{ + struct msm_hw_fence_client *hw_fence_client; + int client_id; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) { + hw_fence_debug_dump_queues(HW_FENCE_PRINTK, hw_fence_client); + + if (dump_clients_mask) + for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++) + if ((dump_clients_mask & (1 << client_id)) && + hw_fence_drv_data->clients[client_id]) + hw_fence_debug_dump_queues(HW_FENCE_PRINTK, + hw_fence_drv_data->clients[client_id]); + } + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_TABLE) + hw_fence_debug_dump_table(HW_FENCE_PRINTK, hw_fence_drv_data); + + if (dump_flags & MSM_HW_FENCE_DBG_DUMP_EVENTS) + hw_fence_debug_dump_events(HW_FENCE_PRINTK, hw_fence_drv_data); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_dump_debug_data); + +int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence *hw_fence; + u64 hash; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle)); + return -EINVAL; + } else if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) { + HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%llx\n", + fence->context, fence->seqno, fence->flags); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, fence->context, + fence->seqno, &hash); + if (!hw_fence) { + HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n", + hw_fence_client->client_id, fence, fence->context, fence->seqno); + return -EINVAL; + } + hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_dump_fence); +#endif /* CONFIG_DEBUG_FS */ + /* Function used for simulation purposes only. */ int msm_hw_fence_driver_doorbell_sim(u64 db_mask) { From 8b9d1c6da83fa88a568399476ae217138aa0bad6 Mon Sep 17 00:00:00 2001 From: Varsha Suresh Date: Wed, 15 Mar 2023 16:56:03 -0700 Subject: [PATCH 72/77] mm-drivers: Add LOCAL_MODULE_DDK_BUILD argument A parameter is set to enable the build to be executed with DDK framework. Change-Id: Ib98d5a990aa1cfe836d9214111bfef317a4c4fae Signed-off-by: Varsha Suresh --- hw_fence/Android.mk | 1 + msm_ext_display/Android.mk | 1 + sync_fence/Android.mk | 1 + 3 files changed, 3 insertions(+) diff --git a/hw_fence/Android.mk b/hw_fence/Android.mk index bad9f10b96..149702d2d7 100644 --- a/hw_fence/Android.mk +++ b/hw_fence/Android.mk @@ -1,4 +1,5 @@ LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # This makefile is only for DLKM diff --git a/msm_ext_display/Android.mk b/msm_ext_display/Android.mk index 78d659c784..cef996a482 100644 --- a/msm_ext_display/Android.mk +++ b/msm_ext_display/Android.mk @@ -1,4 +1,5 @@ LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # This makefile is only for DLKM diff --git a/sync_fence/Android.mk b/sync_fence/Android.mk index d784b18e9c..f041c70ef4 100644 --- a/sync_fence/Android.mk +++ b/sync_fence/Android.mk @@ -1,4 +1,5 @@ LOCAL_PATH := $(call my-dir) +LOCAL_MODULE_DDK_BUILD := true include $(CLEAR_VARS) # This makefile is only for DLKM From abf0680f4cbe7fc37fa54ddd08ea7cb581bfdc47 Mon Sep 17 00:00:00 2001 From: Grace An Date: Tue, 28 Mar 2023 13:42:17 -0700 Subject: [PATCH 73/77] mm-drivers: hw_fence: add fence error support for clients without rxq Add HW Fence Driver support to notify waiting clients of fence error in HLOS. This is a requirement by clients that do not have Rx Queue. Such clients can register a fence error callback function with data that will be passed back with callback. The fence error callback function is called by HW Fence Driver when: 1. Client registers for a fence already signaled with error. 2. Error is signaled for a fence that the client registered to wait on. Change-Id: I2892333838001bed1152118b947cfe12b1a8dd04 Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 14 ++- hw_fence/include/hw_fence_drv_utils.h | 15 +++ hw_fence/src/hw_fence_drv_priv.c | 64 ++++++----- hw_fence/src/hw_fence_drv_utils.c | 160 ++++++++++++++++++++++---- hw_fence/src/msm_hw_fence.c | 69 +++++++++++ 5 files changed, 273 insertions(+), 49 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 359347b171..ac002c82d9 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -131,9 +131,12 @@ struct msm_hw_fence_queue { /** * enum payload_type - Enum with the queue payload types. + * HW_FENCE_PAYLOAD_TYPE_1: client queue payload + * HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id */ enum payload_type { - HW_FENCE_PAYLOAD_TYPE_1 = 1 + HW_FENCE_PAYLOAD_TYPE_1 = 1, + HW_FENCE_PAYLOAD_TYPE_2 }; /** @@ -144,6 +147,10 @@ enum payload_type { * @mem_descriptor: hfi header memory descriptor * @queues: queues descriptor * @queues_num: number of client queues + * @fence_error_cb: function called for waiting clients that need HLOS notification of fence error + * @fence_error_cb_userdata: opaque pointer registered with fence error callback and passed to + * client during invocation of callback function + * @error_cb_lock: lock to synchronize access to fence error cb and fence error cb data * @ipc_signal_id: id of the signal to be triggered for this client * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client * @ipc_client_pid: physical id of the ipc client for this hw fence driver client @@ -158,6 +165,9 @@ struct msm_hw_fence_client { struct msm_hw_fence_mem_addr mem_descriptor; struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES]; int queues_num; + msm_hw_fence_error_cb_t fence_error_cb; + void *fence_error_cb_userdata; + struct mutex error_cb_lock; int ipc_signal_id; int ipc_client_vid; int ipc_client_pid; @@ -508,6 +518,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); +int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, + struct msm_hw_fence_queue_payload *payload); int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno, u64 *hash, u64 client_data); diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 43871ee571..29c0f343e8 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -122,6 +122,21 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash, u32 reset_flags); +/** + * hw_fence_utils_fence_error_cb() - Invokes fence error callback registered by specified client + * + * @hw_fence_client: client, for which fence error callback must be invoked + * @ctxt_id: context id of the hw-fence + * @seqno: sequence number of the hw-fence + * @hash: hash of the hw-fence + * @flags: flags of the hw-fence + * @error: error of the hw-fence + * + * Returns zero if success, otherwise returns negative error code + */ +int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, + u64 seqno, u64 hash, u64 flags, u32 error); + /** * hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver * from the client_id used externally diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8fba461b66..4b71d93498 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -187,15 +187,7 @@ char *_get_queue_type(int queue_type) int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type) { - struct msm_hw_fence_hfi_queue_header *hfi_header; struct msm_hw_fence_queue *queue; - u32 read_idx; - u32 write_idx; - u32 to_read_idx; - u32 *read_ptr; - u32 payload_size_u32; - u32 q_size_u32; - struct msm_hw_fence_queue_payload *read_ptr_payload; if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) { HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type, @@ -204,6 +196,20 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, } queue = &hw_fence_client->queues[queue_type]; + HWFNC_DBG_Q("read client:%lu queue:0x%pK\n", hw_fence_client->client_id, queue); + + return hw_fence_read_queue_helper(queue, payload); +} + +int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue, + struct msm_hw_fence_queue_payload *payload) +{ + struct msm_hw_fence_hfi_queue_header *hfi_header; + u32 read_idx, write_idx, to_read_idx; + u32 *read_ptr; + u32 payload_size_u32, q_size_u32; + struct msm_hw_fence_queue_payload *read_ptr_payload; + hfi_header = queue->va_header; q_size_u32 = (queue->q_size_bytes / sizeof(u32)); @@ -230,13 +236,12 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } - HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", - hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, - read_idx, write_idx, queue); + HWFNC_DBG_Q("read rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", + &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); if (read_idx == write_idx) { HWFNC_DBG_Q("Nothing to read!\n"); - return 0; + return -EINVAL; } /* Move the pointer where we need to read and cast it */ @@ -264,12 +269,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, } /* Read the Client Queue */ - payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); - payload->seqno = readq_relaxed(&read_ptr_payload->seqno); - payload->hash = readq_relaxed(&read_ptr_payload->hash); - payload->flags = readq_relaxed(&read_ptr_payload->flags); - payload->client_data = readq_relaxed(&read_ptr_payload->client_data); - payload->error = readl_relaxed(&read_ptr_payload->error); + *payload = *read_ptr_payload; /* update the read index */ writel_relaxed(to_read_idx, &hfi_header->read_index); @@ -1204,15 +1204,22 @@ static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data, HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash); - /* Write to Rx queue */ - if (hw_fence_client->update_rxq) - hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, - hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1); + /* Call fence error callback */ + if (error && hw_fence_client->fence_error_cb) { + hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id, + hash, flags, error); + } else { + /* Write to Rx queue */ + if (hw_fence_client->update_rxq) + hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id, + hw_fence->seq_id, hash, flags, client_data, error, + HW_FENCE_RX_QUEUE - 1); - /* Signal the hw fence now */ - if (hw_fence_client->send_ipc) - hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, - hw_fence_client->ipc_signal_id); + /* Signal the hw fence now */ + if (hw_fence_client->send_ipc) + hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id, + hw_fence_client->ipc_signal_id); + } #if IS_ENABLED(CONFIG_DEBUG_FS) if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0 @@ -1355,6 +1362,7 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, /* child fence is already signaled */ GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */ + join_fence->error |= hw_fence_child->error; if (--join_fence->pending_child_cnt == 0) signal_join_fence = true; @@ -1400,8 +1408,8 @@ int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data, if (signal_join_fence) { /* signal the join hw fence */ - _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0, - client_data); + _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, + client_data, join_fence->error); set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags); /* diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index 9e8629b428..f2b34c1c4f 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -53,11 +53,34 @@ #define HW_FENCE_CLIENT_TYPE_MAX_VPU 32 #define HW_FENCE_CLIENT_TYPE_MAX_IFE 32 -/* - * Each bit in this mask represents each of the loopback clients supported in - * the enum hw_fence_client_id +/** + * HW_FENCE_CTRL_QUEUE_DOORBELL: + * Bit set in doorbell flags mask if hw fence driver should read ctrl rx queue */ -#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff +#define HW_FENCE_CTRL_QUEUE_DOORBELL 0 + +/** + * HW_FENCE_DOORBELL_FLAGS_ID_LAST: + * Last doorbell flags id for which HW Fence Driver can receive doorbell + */ +#if IS_ENABLED(CONFIG_DEBUG_FS) +#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CLIENT_ID_VAL6 +#else +#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CTRL_QUEUE_DOORBELL +#endif /* CONFIG_DEBUG_FS */ + +/** + * HW_FENCE_DOORBELL_MASK: + * Each bit in this mask represents possible doorbell flag ids for which hw fence driver can receive + */ +#define HW_FENCE_DOORBELL_MASK \ + GENMASK(HW_FENCE_DOORBELL_FLAGS_ID_LAST, HW_FENCE_CTRL_QUEUE_DOORBELL) + +/** + * HW_FENCE_MAX_ITER_READ: + * Maximum number of iterations when reading queue + */ +#define HW_FENCE_MAX_ITER_READ 100 /** * HW_FENCE_MAX_EVENTS: @@ -179,12 +202,110 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, } } -static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id) +int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, + u64 seqno, u64 hash, u64 flags, u32 error) +{ + struct msm_hw_fence_cb_data cb_data; + struct dma_fence fence; + int ret = 0; + + if (IS_ERR_OR_NULL(hw_fence_client)) { + HWFNC_ERR("Invalid client:0x%pK\n", hw_fence_client); + return -EINVAL; + } + + mutex_lock(&hw_fence_client->error_cb_lock); + if (!error || !hw_fence_client->fence_error_cb) { + HWFNC_ERR("Invalid error:%d fence_error_cb:0x%pK\n", error, + hw_fence_client->fence_error_cb); + ret = -EINVAL; + goto exit; + } + + /* initialize cb_data info */ + fence.context = ctxt_id; + fence.seqno = seqno; + fence.flags = flags; + fence.error = error; + cb_data.fence = &fence; + cb_data.data = hw_fence_client->fence_error_cb_userdata; + + HWFNC_DBG_L("invoking cb for client:%d ctx:%llu seq:%llu flags:%llu e:%u data:0x%pK\n", + hw_fence_client->client_id, ctxt_id, seqno, flags, error, + hw_fence_client->fence_error_cb_userdata); + + hw_fence_client->fence_error_cb(hash, error, &cb_data); + +exit: + mutex_unlock(&hw_fence_client->error_cb_lock); + + return ret; +} + +static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv_data, + int db_flag_id) +{ + struct msm_hw_fence_client *hw_fence_client; + struct msm_hw_fence_queue_payload payload; + int i, cb_ret, ret = 0, read = 1; + u32 client_id; + + for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) { + read = hw_fence_read_queue_helper(&drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1], + &payload); + if (read < 0) { + HWFNC_DBG_Q("unable to read ctrl rxq for db_flag_id:%d\n", db_flag_id); + return read; + } + if (payload.type != HW_FENCE_PAYLOAD_TYPE_2) { + HWFNC_ERR("unsupported payload type in ctrl rxq received:%u expected:%u\n", + payload.type, HW_FENCE_PAYLOAD_TYPE_2); + ret = -EINVAL; + continue; + } + if (payload.client_data < HW_FENCE_CLIENT_ID_CTX0 || + payload.client_data >= drv_data->clients_num) { + HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n", + payload.client_data, HW_FENCE_CLIENT_ID_CTX0, + drv_data->clients_num); + ret = -EINVAL; + continue; + } + + client_id = payload.client_data; + HWFNC_DBG_Q("ctrl rxq rd: it:%d h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n", + i, payload.hash, payload.ctxt_id, payload.seqno, payload.flags, + payload.error, client_id); + + hw_fence_client = drv_data->clients[client_id]; + if (!hw_fence_client) { + HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n", + client_id); + ret = -EINVAL; + continue; + } + + cb_ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload.ctxt_id, + payload.seqno, payload.hash, payload.flags, payload.error); + if (cb_ret) { + HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n", + client_id, payload.ctxt_id, payload.seqno, payload.error); + ret = cb_ret; + } + } + + return ret; +} + +static int _process_doorbell_id(struct hw_fence_driver_data *drv_data, int db_flag_id) { int ret; - HWFNC_DBG_H("Processing doorbell client_id:%d\n", client_id); - switch (client_id) { + HWFNC_DBG_H("Processing doorbell mask id:%d\n", db_flag_id); + switch (db_flag_id) { + case HW_FENCE_CTRL_QUEUE_DOORBELL: + ret = _process_fence_error_client_loopback(drv_data, db_flag_id); + break; #if IS_ENABLED(CONFIG_DEBUG_FS) case HW_FENCE_CLIENT_ID_VAL0: case HW_FENCE_CLIENT_ID_VAL1: @@ -193,11 +314,11 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c case HW_FENCE_CLIENT_ID_VAL4: case HW_FENCE_CLIENT_ID_VAL5: case HW_FENCE_CLIENT_ID_VAL6: - ret = process_validation_client_loopback(drv_data, client_id); + ret = process_validation_client_loopback(drv_data, db_flag_id); break; #endif /* CONFIG_DEBUG_FS */ default: - HWFNC_ERR("unknown client:%d\n", client_id); + HWFNC_ERR("unknown mask id:%d\n", db_flag_id); ret = -EINVAL; } @@ -206,22 +327,21 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags) { - int client_id = HW_FENCE_CLIENT_ID_CTL0; + int db_flag_id = HW_FENCE_CTRL_QUEUE_DOORBELL; u64 mask; - for (; client_id <= HW_FENCE_CLIENT_ID_VAL6; client_id++) { - mask = 1 << client_id; + for (; db_flag_id <= HW_FENCE_DOORBELL_FLAGS_ID_LAST; db_flag_id++) { + mask = 1 << db_flag_id; if (mask & db_flags) { - HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags); + HWFNC_DBG_H("db_flag:%d signaled! flags:0x%llx\n", db_flag_id, db_flags); - /* process client */ - if (_process_doorbell_client(drv_data, client_id)) - HWFNC_ERR("Failed to process client:%d\n", client_id); + if (_process_doorbell_id(drv_data, db_flag_id)) + HWFNC_ERR("Failed to process db_flag_id:%d\n", db_flag_id); - /* clear mask for this client and if nothing else pending finish */ + /* clear mask for this flag id if nothing else pending finish */ db_flags = db_flags & ~(mask); - HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", - client_id, db_flags, mask, ~(mask)); + HWFNC_DBG_H("db_flag_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n", + db_flag_id, db_flags, mask, ~(mask)); if (!db_flags) break; } @@ -232,7 +352,7 @@ void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, static void _hw_fence_cb(int irq, void *data) { struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data; - gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK; + gh_dbl_flags_t clear_flags = HW_FENCE_DOORBELL_MASK; int ret; if (!drv_data) diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index f16e74b448..4842ba0cd7 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -118,6 +118,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, if (ret) goto error; + mutex_init(&hw_fence_client->error_cb_lock); + HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n", hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num, hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid, @@ -476,6 +478,73 @@ int msm_hw_fence_trigger_signal(void *client_handle, } EXPORT_SYMBOL(msm_hw_fence_trigger_signal); +int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle) || IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) { + HWFNC_ERR("Invalid params client:0x%pK cb_func:0x%pK data:0x%pK\n", client_handle, + cb, data); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (hw_fence_client->fence_error_cb) { + HWFNC_ERR("client_id:%d client_id_ext:%d already registered cb_func:%pK data:%pK\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext, + hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata); + return -EINVAL; + } + + hw_fence_client->fence_error_cb_userdata = data; + hw_fence_client->fence_error_cb = cb; + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_register_error_cb); + +int msm_hw_fence_deregister_error_cb(void *client_handle) +{ + struct msm_hw_fence_client *hw_fence_client; + int ret = 0; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) { + HWFNC_ERR("hw fence driver not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle)) { + HWFNC_ERR("Invalid client: 0x%pK\n", client_handle); + return -EINVAL; + } + + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + if (!mutex_trylock(&hw_fence_client->error_cb_lock)) { + HWFNC_ERR("client_id:%d is modifying or using fence_error_cb:0x%pK data:0x%pK\n", + hw_fence_client->client_id, hw_fence_client->fence_error_cb, + hw_fence_client->fence_error_cb_userdata); + return -EAGAIN; + } + + if (!hw_fence_client->fence_error_cb) { + HWFNC_ERR("client_id:%d client_id_ext:%d did not register cb:%pK data:%pK\n", + hw_fence_client->client_id, hw_fence_client->client_id_ext, + hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata); + ret = -EINVAL; + goto exit; + } + + hw_fence_client->fence_error_cb = NULL; + hw_fence_client->fence_error_cb_userdata = NULL; + +exit: + mutex_unlock(&hw_fence_client->error_cb_lock); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_deregister_error_cb); + #if IS_ENABLED(CONFIG_DEBUG_FS) int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask) { From b48c190c8bc41c34dce239c69abda28d1290330a Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 16 Dec 2022 17:33:40 -0800 Subject: [PATCH 74/77] mm-drivers: hw_fence: add support for out of order signaling Fence error use case may require that later fence in tx queue be signaled with error before earlier fence. HW Fence Driver provides limited support for this scenario by providing a way to swap the first two entries of client Tx Queue. Change-Id: I00faada95a3c33c1dcced79bea5fef3b581152cd Signed-off-by: Grace An --- hw_fence/include/hw_fence_drv_priv.h | 2 + hw_fence/src/hw_fence_drv_priv.c | 172 ++++++++++++++++++++++----- hw_fence/src/msm_hw_fence.c | 29 +++++ 3 files changed, 173 insertions(+), 30 deletions(-) diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 359347b171..95cf76d5a5 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -505,6 +505,8 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data, int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash, u64 flags, u64 client_data, u32 error, int queue_type); +int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error); inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data); int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type); diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index 8fba461b66..1f73494fc5 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -17,6 +17,15 @@ #define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1) +#define REQUIRES_IDX_TRANSLATION(queue) \ + ((queue)->rd_wr_idx_factor && ((queue)->rd_wr_idx_start || (queue)->rd_wr_idx_factor > 1)) + +#define IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, idx) \ + (((idx) - (queue)->rd_wr_idx_start) * (queue)->rd_wr_idx_factor) + +#define IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, idx) \ + (((idx) / (queue)->rd_wr_idx_factor) + (queue)->rd_wr_idx_start) + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -184,6 +193,17 @@ char *_get_queue_type(int queue_type) return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ"; } +static void _translate_queue_indexes_custom_to_default(struct msm_hw_fence_queue *queue, + u32 *read_idx, u32 *write_idx) +{ + if (REQUIRES_IDX_TRANSLATION(queue)) { + *read_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *read_idx); + *write_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *write_idx); + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + *read_idx, *write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } +} + int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload, int queue_type) { @@ -223,12 +243,7 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, write_idx = readl_relaxed(&hfi_header->write_index); /* translate read and write indexes from custom indexing to dwords with no offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", - read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); - } + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, @@ -257,8 +272,8 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, to_read_idx = 0; /* translate to_read_idx to custom indexing with offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + if (REQUIRES_IDX_TRANSLATION(queue)) { + to_read_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_read_idx); HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n", to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } @@ -281,6 +296,34 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, return to_read_idx == write_idx ? 0 : 1; } +static int _get_update_queue_params(struct msm_hw_fence_queue *queue, + struct msm_hw_fence_hfi_queue_header **hfi_header, u32 *q_size_u32, u32 *payload_size, + u32 *payload_size_u32, u32 **wr_ptr) +{ + if (!queue) { + HWFNC_ERR("invalid queue\n"); + return -EINVAL; + } + + *hfi_header = queue->va_header; + if (!*hfi_header) { + HWFNC_ERR("Invalid queue hfi_header\n"); + return -EINVAL; + } + + *q_size_u32 = (queue->q_size_bytes / sizeof(u32)); + *payload_size = sizeof(struct msm_hw_fence_queue_payload); + *payload_size_u32 = (*payload_size / sizeof(u32)); + + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ + if (queue->skip_wr_idx) + *wr_ptr = &((*hfi_header)->tx_wm); + else + *wr_ptr = &((*hfi_header)->write_index); + + return 0; +} + /* * This function writes to the queue of the client. The 'queue_type' determines * if this function is writing to the rx or tx queue @@ -312,23 +355,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, } queue = &hw_fence_client->queues[queue_type]; - hfi_header = queue->va_header; - - q_size_u32 = (queue->q_size_bytes / sizeof(u32)); - payload_size = sizeof(struct msm_hw_fence_queue_payload); - payload_size_u32 = (payload_size / sizeof(u32)); - - if (!hfi_header) { - HWFNC_ERR("Invalid queue\n"); + if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size, + &payload_size_u32, &wr_ptr)) { + HWFNC_ERR("Invalid client:%d q_type:%d queue\n", hw_fence_client->client_id, + queue_type); return -EINVAL; } - /* if skipping update wr_index, then use hfi_header->tx_wm instead */ - if (queue->skip_wr_idx) - wr_ptr = &hfi_header->tx_wm; - else - wr_ptr = &hfi_header->write_index; - /* * We need to lock the client if there is an Rx Queue update, since that * is the only time when HW Fence driver can have a race condition updating @@ -361,12 +394,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false"); /* translate read and write indexes from custom indexing to dwords with no offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; - HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", - read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); - } + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -402,8 +430,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, to_write_idx = 0; /* translate to_write_idx to custom indexing with offset */ - if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { - to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + if (REQUIRES_IDX_TRANSLATION(queue)) { + to_write_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_write_idx); HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n", to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); } @@ -438,6 +466,90 @@ exit: return ret; } +int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data, + struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error) +{ + u32 q_size_u32, payload_size, payload_size_u32, read_idx, write_idx, second_idx, *wr_ptr; + struct msm_hw_fence_queue_payload tmp, *first_payload, *second_payload; + struct msm_hw_fence_hfi_queue_header *hfi_header; + struct msm_hw_fence_queue *queue; + int ret = 0; + + queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1]; + if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size, + &payload_size_u32, &wr_ptr)) { + HWFNC_ERR("Invalid client:%d tx queue\n", hw_fence_client->client_id); + return -EINVAL; + } + + /* Make sure data is ready before read */ + mb(); + + /* Get read and write index */ + read_idx = hfi_header->read_index; + write_idx = *wr_ptr; + + /* translate read and write indexes from custom indexing to dwords with no offset */ + _translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx); + + if (read_idx == write_idx) { + HWFNC_DBG_Q("Empty queue, no entry matches with hash:%llu\n", hash); + return -EINVAL; + } + + first_payload = (struct msm_hw_fence_queue_payload *)((u32 *)queue->va_queue + read_idx); + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n", + hw_fence_client->client_id, queue->va_queue, queue->pa_queue, read_idx, + first_payload); + + if (first_payload->hash == hash) { + /* Swap not needed, update first payload in client queue with fence error */ + first_payload->error = error; + } else { + /* Check whether second entry matches hash */ + second_idx = read_idx + payload_size_u32; + + /* wrap-around case */ + if (second_idx >= q_size_u32) + second_idx = 0; + + if (second_idx == write_idx) { + HWFNC_ERR("Failed to find matching entry with hash:%llu\n", hash); + return -EINVAL; + } + + second_payload = (struct msm_hw_fence_queue_payload *) + ((u32 *)queue->va_queue + second_idx); + HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n", + hw_fence_client->client_id, queue->va_queue, queue->pa_queue, second_idx, + second_payload); + + if (second_payload->hash != hash) { + HWFNC_ERR("hash:%llu not found in first two queue payloads:%u, %u\n", hash, + read_idx, second_idx); + return -EINVAL; + } + + /* swap first and second payload, updating error field in new first payload */ + tmp = *first_payload; + *first_payload = *second_payload; + first_payload->error = error; + *second_payload = tmp; + + HWFNC_DBG_L("client_id:%d txq move from idx:%u to idx:%u hash:%llu c:%llu s:%llu\n", + hw_fence_client->client_id, read_idx, second_idx, hash, tmp.ctxt_id, + tmp.seqno); + } + + /* update memory for the messages */ + wmb(); + + HWFNC_DBG_L("client_id:%d update tx queue index:%u hash:%llu error:%u\n", + hw_fence_client->client_id, read_idx, hash, error); + + return ret; +} + static int init_global_locks(struct hw_fence_driver_data *drv_data) { struct msm_hw_fence_mem_addr *mem_descriptor; diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index 82ee33bdaa..9486fe507d 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -451,6 +451,35 @@ int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 erro } EXPORT_SYMBOL(msm_hw_fence_update_txq); + +int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags) +{ + struct msm_hw_fence_client *hw_fence_client; + + if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready || + !hw_fence_drv_data->vm_ready) { + HWFNC_ERR("hw fence driver or vm not ready\n"); + return -EAGAIN; + } else if (IS_ERR_OR_NULL(client_handle) || + (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) { + HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%d max:%d or error:%d\n", + client_handle, handle, hw_fence_drv_data->hw_fences_tbl_cnt, error); + return -EINVAL; + } else if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) { + HWFNC_ERR("invalid flags:0x%x expected:0x%x no support of in-place error update\n", + update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE); + return -EINVAL; + } + hw_fence_client = (struct msm_hw_fence_client *)client_handle; + + /* Write to Tx queue */ + hw_fence_update_existing_txq_payload(hw_fence_drv_data, hw_fence_client, + handle, error); + + return 0; +} +EXPORT_SYMBOL(msm_hw_fence_update_txq_error); + /* tx client has to be the physical, rx client virtual id*/ int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_pid, u32 rx_client_vid, From 508cc02147fdc14c4fc776b62caf4960b4ca05a1 Mon Sep 17 00:00:00 2001 From: Varsha Suresh Date: Thu, 4 May 2023 15:46:47 -0700 Subject: [PATCH 75/77] mm-driver: Refactor to separate modules into packages Currently, all of the modules are built from a single Bazel package. This complicates integration with the vendor build because the Android build system builds the modules individually. Split the modules into their own Bazel packages to align more closely with the Android build system's expectations and easy to hook bazel build. Change-Id: I100e9ec9edbe96212089a5944cbba4d6677ff83a Signed-off-by: Varsha Suresh --- BUILD.bazel | 32 ++----- hw_fence/BUILD.bazel | 16 ++++ hw_fence/Kconfig | 4 + hw_fence/defconfig | 1 + hw_fence/define_hw_fence.bzl | 46 +++++++++ mm_module_build.bzl | 103 --------------------- mm_modules.bzl | 44 --------- msm_ext_display/BUILD.bazel | 10 ++ msm_ext_display/Kconfig | 4 + msm_ext_display/defconfig | 1 + msm_ext_display/define_msm_ext_display.bzl | 31 +++++++ sync_fence/BUILD.bazel | 16 ++++ sync_fence/Kconfig | 4 + sync_fence/defconfig | 1 + sync_fence/define_sync_fence.bzl | 33 +++++++ target.bzl | 16 ---- 16 files changed, 176 insertions(+), 186 deletions(-) create mode 100644 hw_fence/BUILD.bazel create mode 100644 hw_fence/Kconfig create mode 100644 hw_fence/defconfig create mode 100644 hw_fence/define_hw_fence.bzl delete mode 100644 mm_module_build.bzl delete mode 100644 mm_modules.bzl create mode 100644 msm_ext_display/BUILD.bazel create mode 100644 msm_ext_display/Kconfig create mode 100644 msm_ext_display/defconfig create mode 100644 msm_ext_display/define_msm_ext_display.bzl create mode 100644 sync_fence/BUILD.bazel create mode 100644 sync_fence/Kconfig create mode 100644 sync_fence/defconfig create mode 100644 sync_fence/define_sync_fence.bzl delete mode 100644 target.bzl diff --git a/BUILD.bazel b/BUILD.bazel index 77944804ce..5f4185bcfe 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -2,35 +2,21 @@ load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") package( default_visibility = [ - "//visibility:public"], + "//visibility:public", + ], ) ddk_headers( name = "mm_drivers_configs", - hdrs = glob([ - "config/*.h"]), - includes = ["config"] -) - -ddk_headers( - name = "hw_fence_headers", - hdrs = glob([ - "hw_fence/include/*.h"]), - includes = ["hw_fence/include"] -) - -ddk_headers( - name = "sync_fence_uapi_headers", - hdrs = glob([ - "sync_fence/include/uapi/sync_fence/*.h", - "sync_fence/include/*.h"]), - includes = ["sync_fence/include"] + hdrs = glob(["config/*.h"]), + includes = ["config"], ) ddk_headers( name = "mm_drivers_headers", - hdrs = [":mm_drivers_configs", ":hw_fence_headers", ":sync_fence_uapi_headers"] + hdrs = [ + ":mm_drivers_configs", + "//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers", + "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers", + ], ) - -load(":target.bzl", "define_pineapple") -define_pineapple() \ No newline at end of file diff --git a/hw_fence/BUILD.bazel b/hw_fence/BUILD.bazel new file mode 100644 index 0000000000..808c0ec9d3 --- /dev/null +++ b/hw_fence/BUILD.bazel @@ -0,0 +1,16 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_hw_fence.bzl", "define_hw_fence") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "hw_fence_headers", + hdrs = glob(["include/*.h"]), + includes = ["include"] +) + +define_hw_fence() diff --git a/hw_fence/Kconfig b/hw_fence/Kconfig new file mode 100644 index 0000000000..a50b02eefd --- /dev/null +++ b/hw_fence/Kconfig @@ -0,0 +1,4 @@ +config QTI_HW_FENCE + bool "HW Fence" + help + Enable the hw_fence module \ No newline at end of file diff --git a/hw_fence/defconfig b/hw_fence/defconfig new file mode 100644 index 0000000000..f80d4f65f7 --- /dev/null +++ b/hw_fence/defconfig @@ -0,0 +1 @@ +CONFIG_QTI_HW_FENCE=y diff --git a/hw_fence/define_hw_fence.bzl b/hw_fence/define_hw_fence.bzl new file mode 100644 index 0000000000..1598ed183b --- /dev/null +++ b/hw_fence/define_hw_fence.bzl @@ -0,0 +1,46 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_msm_hw_fence".format(tv), + srcs = [ + "src/hw_fence_drv_debug.c", + "src/hw_fence_drv_ipc.c", + "src/hw_fence_drv_priv.c", + "src/hw_fence_drv_utils.c", + "src/msm_hw_fence.c", + "src/msm_hw_fence_synx_translation.c", + ], + out = "msm_hw_fence.ko", + defconfig = "defconfig", + kconfig = "Kconfig", + conditional_srcs = { + "CONFIG_DEBUG_FS": { + True: ["src/hw_fence_ioctl.c"], + }, + }, + deps = [ + "//msm-kernel:all_headers", + "//vendor/qcom/opensource/synx-kernel:synx_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers", + ], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_msm_hw_fence_dist".format(tv), + data = [":{}_msm_hw_fence".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_hw_fence(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/mm_module_build.bzl b/mm_module_build.bzl deleted file mode 100644 index dc708705b5..0000000000 --- a/mm_module_build.bzl +++ /dev/null @@ -1,103 +0,0 @@ -load("//build/kernel/kleaf:kernel.bzl", "ddk_module","ddk_submodule") -load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") -load("//msm-kernel:target_variants.bzl", "get_all_variants") - -def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps): - processed_config_srcs = {} - - for config_src_name in config_srcs: - config_src = config_srcs[config_src_name] - - if type(config_src) == "list": - processed_config_srcs[config_src_name] = {True: config_src} - else: - processed_config_srcs[config_src_name] = config_src - - module = struct( - name = name, - path = path, - srcs = srcs, - config_srcs = processed_config_srcs, - config_option = config_option, - deps = deps, - ) - - module_map[name] = module - -def _get_config_choices(map, options): - choices = [] - for option in map: - choices.extend(map[option].get(option in options,[])) - return choices - -def _get_kernel_build_options(modules, config_options): - all_options = {option: True for option in config_options} - all_options = all_options | {module.config_option: True for module in modules if module.config_option} - return all_options - -def _get_kernel_build_module_srcs(module, options, formatter): - srcs = module.srcs + _get_config_choices(module.config_srcs, options) - print("-",module.name,",",module.config_option,",srcs =",srcs) - module_path = "{}/".format(module.path) if module.path else "" - return ["{}{}".format(module_path, formatter(src)) for src in srcs] - -def _get_kernel_build_module_deps(module, options, formatter): - return [formatter(dep) for dep in module.deps] - -def mm_driver_module_entry(hdrs = []): - module_map = {} - - def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps =[]): - _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps) - return struct( - register = register, - get = module_map.get, - hdrs = hdrs, - module_map = module_map - ) - -def define_target_variant_modules(target, variant, registry, modules, config_options = []): - kernel_build = "{}_{}".format(target, variant) - kernel_build_label = "//msm-kernel:{}".format(kernel_build) - modules = [registry.get(module_name) for module_name in modules] - options = _get_kernel_build_options(modules, config_options) - build_print = lambda message : print("{}: {}".format(kernel_build, message)) - formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target) - headers = ["//msm-kernel:all_headers"] + registry.hdrs - all_module_rules = [] - - for module in modules: - rule_name = "{}_{}".format(kernel_build, module.name) - module_srcs = _get_kernel_build_module_srcs(module, options, formatter) - - if not module_srcs: - continue - - ddk_submodule( - name = rule_name, - srcs = module_srcs, - out = "{}.ko".format(module.name), - deps = headers + _get_kernel_build_module_deps(module, options, formatter), - local_defines = options.keys(), - ) - all_module_rules.append(rule_name) - - ddk_module( - name = "{}_mm_drivers".format(kernel_build), - kernel_build = kernel_build_label, - deps = all_module_rules, - ) - copy_to_dist_dir( - name = "{}_mm_drivers_dist".format(kernel_build), - data = [":{}_mm_drivers".format(kernel_build)], - dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), - flat = True, - wipe_dist_dir = False, - allow_duplicate_filenames = False, - mode_overrides = {"**/*": "644"}, - log = "info", - ) - -def define_consolidate_gki_modules(target, registry, modules, config_options = []): - for (targets, variant) in get_all_variants(): - define_target_variant_modules(targets, variant, registry, modules, config_options) \ No newline at end of file diff --git a/mm_modules.bzl b/mm_modules.bzl deleted file mode 100644 index ef8b175e79..0000000000 --- a/mm_modules.bzl +++ /dev/null @@ -1,44 +0,0 @@ -load(":mm_module_build.bzl", "mm_driver_module_entry") - -HW_FENCE_PATH = "hw_fence" -MSM_EXT_DISPLAY_PATH = "msm_ext_display" -SYNC_FENCE_PATH = "sync_fence" - -mm_driver_modules = mm_driver_module_entry([":mm_drivers_headers"]) -module_entry = mm_driver_modules.register - -#--------------- MM-DRIVERS MODULES ------------------ - -module_entry( - name = "hw_fence", - path = HW_FENCE_PATH + "/src", - config_option = "CONFIG_QTI_HW_FENCE", - config_srcs = { - "CONFIG_DEBUG_FS" : [ - "hw_fence_ioctl.c", - ] - }, - srcs = ["hw_fence_drv_debug.c", - "hw_fence_drv_ipc.c", - "hw_fence_drv_priv.c", - "hw_fence_drv_utils.c", - "msm_hw_fence.c", - "msm_hw_fence_synx_translation.c"], - deps =[ - "//vendor/qcom/opensource/synx-kernel:synx_headers" - ] -) - -module_entry( - name = "msm_ext_display", - path = MSM_EXT_DISPLAY_PATH + "/src", - config_option = "CONFIG_MSM_EXT_DISPLAY", - srcs = ["msm_ext_display.c"], -) - -module_entry( - name = "sync_fence", - path = SYNC_FENCE_PATH + "/src", - config_option = "CONFIG_QCOM_SPEC_SYNC", - srcs = ["qcom_sync_file.c"], -) \ No newline at end of file diff --git a/msm_ext_display/BUILD.bazel b/msm_ext_display/BUILD.bazel new file mode 100644 index 0000000000..5f30f80c12 --- /dev/null +++ b/msm_ext_display/BUILD.bazel @@ -0,0 +1,10 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_msm_ext_display.bzl", "define_msm_ext_display") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +define_msm_ext_display() diff --git a/msm_ext_display/Kconfig b/msm_ext_display/Kconfig new file mode 100644 index 0000000000..a7257e499a --- /dev/null +++ b/msm_ext_display/Kconfig @@ -0,0 +1,4 @@ +config MSM_EXT_DISPLAY + bool "Enable msm_ext_display" + help + Enable msm_ext_display driver diff --git a/msm_ext_display/defconfig b/msm_ext_display/defconfig new file mode 100644 index 0000000000..53017a5990 --- /dev/null +++ b/msm_ext_display/defconfig @@ -0,0 +1 @@ +CONFIG_MSM_EXT_DISPLAY=y diff --git a/msm_ext_display/define_msm_ext_display.bzl b/msm_ext_display/define_msm_ext_display.bzl new file mode 100644 index 0000000000..3287983898 --- /dev/null +++ b/msm_ext_display/define_msm_ext_display.bzl @@ -0,0 +1,31 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_msm_ext_display".format(tv), + srcs = ["src/msm_ext_display.c"], + out = "msm_ext_display.ko", + defconfig = "defconfig", + kconfig = "Kconfig", + deps = ["//msm-kernel:all_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers"], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_msm_ext_display_dist".format(tv), + data = [":{}_msm_ext_display".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_msm_ext_display(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/sync_fence/BUILD.bazel b/sync_fence/BUILD.bazel new file mode 100644 index 0000000000..8da9507b61 --- /dev/null +++ b/sync_fence/BUILD.bazel @@ -0,0 +1,16 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") +load(":define_sync_fence.bzl", "define_sync_fence") + +package( + default_visibility = [ + "//visibility:public" + ], +) + +ddk_headers( + name = "sync_fence_uapi_headers", + hdrs = glob(["include/uapi/sync_fence/*.h"]), + includes = ["include"] +) + +define_sync_fence() diff --git a/sync_fence/Kconfig b/sync_fence/Kconfig new file mode 100644 index 0000000000..6422d5cafa --- /dev/null +++ b/sync_fence/Kconfig @@ -0,0 +1,4 @@ +config QCOM_SPEC_SYNC + bool "Enable spec fence" + help + Enable sync_fence driver \ No newline at end of file diff --git a/sync_fence/defconfig b/sync_fence/defconfig new file mode 100644 index 0000000000..33c414d0f9 --- /dev/null +++ b/sync_fence/defconfig @@ -0,0 +1 @@ +CONFIG_QCOM_SPEC_SYNC=y diff --git a/sync_fence/define_sync_fence.bzl b/sync_fence/define_sync_fence.bzl new file mode 100644 index 0000000000..b7dcf21700 --- /dev/null +++ b/sync_fence/define_sync_fence.bzl @@ -0,0 +1,33 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") +load("//msm-kernel:target_variants.bzl", "get_all_variants") + +def _define_module(target, variant): + tv = "{}_{}".format(target, variant) + ddk_module( + name = "{}_sync_fence".format(tv), + srcs = ["src/qcom_sync_file.c"], + out = "sync_fence.ko", + kconfig = "Kconfig", + defconfig = "defconfig", + deps = [ + "//msm-kernel:all_headers", + "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers", + ], + kernel_build = "//msm-kernel:{}".format(tv), + ) + + copy_to_dist_dir( + name = "{}_sync_fence_dist".format(tv), + data = [":{}_sync_fence".format(tv)], + dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) + +def define_sync_fence(): + for (t, v) in get_all_variants(): + _define_module(t, v) diff --git a/target.bzl b/target.bzl deleted file mode 100644 index 6d63bab3c8..0000000000 --- a/target.bzl +++ /dev/null @@ -1,16 +0,0 @@ -load(":mm_modules.bzl", "mm_driver_modules") -load(":mm_module_build.bzl", "define_consolidate_gki_modules") - -def define_pineapple(): - define_consolidate_gki_modules( - target = "pineapple", - registry = mm_driver_modules, - modules = [ - "hw_fence", - "msm_ext_display", - "sync_fence", - ], - config_options = [ - "CONFIG_DEBUG_FS", - ], -) \ No newline at end of file From 72d7c6a3c5706d87e86c3591975058083d895b1b Mon Sep 17 00:00:00 2001 From: Yu Wu Date: Tue, 13 Jun 2023 19:10:30 +0800 Subject: [PATCH 76/77] mm-drivers: hw_fence: avoid dereference before NULL check Fix hw_fence_wait_client dereferenced before NULL check. Change-Id: Ib34c4969c9042f4f815b2eca75b553bc23d4b6cc Signed-off-by: Yu Wu --- hw_fence/src/hw_fence_drv_priv.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index e084e61882..52d1159b2f 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -1634,14 +1634,17 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data, for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) { if (hw_fence->wait_client_mask & BIT(wait_client_id)) { hw_fence_wait_client = drv_data->clients[wait_client_id]; + + if (!hw_fence_wait_client) + continue; + data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext); if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA) client_data = hw_fence->client_data[data_id]; - if (hw_fence_wait_client) - _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, - hash, 0, client_data, error); + _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence, + hash, 0, client_data, error); } } } From 129a3797e8dd1163aa49809b26355c8bb0573584 Mon Sep 17 00:00:00 2001 From: Grace An Date: Fri, 7 Apr 2023 13:18:37 -0700 Subject: [PATCH 77/77] mm-drivers: hw_fence: add guard to avoid redefinition of synx client IDs On some targets, the synx api defines synx hwfence client IDs. Add guard to prevent redefinitions. Change-Id: If947aa39fa15756c7845613d666dbea84adc3a4b Signed-off-by: Grace An --- hw_fence/include/msm_hw_fence_synx_translation.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hw_fence/include/msm_hw_fence_synx_translation.h b/hw_fence/include/msm_hw_fence_synx_translation.h index 1235d7639e..b1724b588b 100644 --- a/hw_fence/include/msm_hw_fence_synx_translation.h +++ b/hw_fence/include/msm_hw_fence_synx_translation.h @@ -8,12 +8,13 @@ #include +extern bool hw_fence_driver_enable; + +#ifndef SYNX_HW_FENCE_CLIENT_START #define SYNX_HW_FENCE_CLIENT_START 1024 #define SYNX_HW_FENCE_CLIENT_END 4096 #define SYNX_MAX_SIGNAL_PER_CLIENT 64 -extern bool hw_fence_driver_enable; - /** * enum synx_client_id : Unique identifier of the supported clients * @@ -80,6 +81,7 @@ enum synx_hwfence_client_id { SYNX_MAX_SIGNAL_PER_CLIENT, SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END, }; +#endif #if IS_ENABLED(CONFIG_QTI_HW_FENCE) /**