Merge tag 'KERNEL.PLATFORM.1.0.r1-18300-kernel.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.10 into android13-5.10-waipio
"KERNEL.PLATFORM.1.0.r1-18300-kernel.0" * tag 'KERNEL.PLATFORM.1.0.r1-18300-kernel.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.10: (35 commits) soc: qcom: power_state: Fix storing suspend_delay value msm-5.10: qseecom: Fix possible race condition sched: walt: add support for sibling clusters Revert "sched: walt: add support for adding frequency relationship between clusters" drivers: soc: qcom: Add support for CDSP hibernation drivers: dcvs: bwmon: synchronize_irq before hibernation dsp-kernel: Avoid taking reference for group_info msm-geni-se: Update Tx and Rx fifo depth based on QUP HW version usb: gadget: f_fs_ipc_log: Remove status variable from ffs_ep adsprpc: Handle UAF scenario in put_args usb: phy: eusb2: Allow eud_detect_reg to be optional usb: phy: eusb2: Check for eud detect reg before accessing it cnss2: Fix mbox_msg size not aligned issue sched: walt: uclamp based frequency guidance msm: synx: Fixing the Uninitializing h_child_list msm: Enable qseecom clocks when smcinvoke sends an invoke soc: qcom: rpmh: shift spin_lock_init before cpu_pm_register_notifier usb: phy: eusb2: Add 10us delay in eusb2 phy init cnss2: Fix mbox_msg size calculated neo_le: Disable autosleep on neo_le ... Change-Id: I6190a64ecbe4cba3b8f5dc23b92c0003e6c5de41
This commit is contained in:
commit
27f38502e7
1
arch/arm64/configs/vendor/neo_la.config
vendored
1
arch/arm64/configs/vendor/neo_la.config
vendored
@ -189,6 +189,7 @@ CONFIG_MSM_GPI_DMA=m
|
||||
# CONFIG_MSM_MMCC_8996 is not set
|
||||
# CONFIG_MSM_MMCC_8998 is not set
|
||||
CONFIG_MSM_PERFORMANCE=m
|
||||
CONFIG_MSM_POWER_STATE=m
|
||||
CONFIG_MSM_QMP=m
|
||||
CONFIG_MSM_RDBG=m
|
||||
# CONFIG_MSM_REMOTEQDSS is not set
|
||||
|
1
arch/arm64/configs/vendor/neo_le.config
vendored
1
arch/arm64/configs/vendor/neo_le.config
vendored
@ -216,7 +216,6 @@ CONFIG_PID_IN_CONTEXTIDR=y
|
||||
CONFIG_PINCTRL_MSM=m
|
||||
CONFIG_PINCTRL_NEO=m
|
||||
CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
|
||||
CONFIG_PM_AUTOSLEEP=y
|
||||
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m
|
||||
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y
|
||||
# CONFIG_POWER_RESET_QCOM_PON is not set
|
||||
|
@ -631,6 +631,8 @@ struct fastrpc_mmap {
|
||||
struct timespec64 map_end_time;
|
||||
bool is_filemap; /* flag to indicate map used in process init */
|
||||
unsigned int ctx_refs; /* Indicates reference count for context map */
|
||||
/* Map in use for dma handle */
|
||||
unsigned int dma_handle_refs;
|
||||
};
|
||||
|
||||
enum fastrpc_perfkeys {
|
||||
@ -1203,7 +1205,10 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
|
||||
|
||||
if ((va + len) < va)
|
||||
return -EFAULT;
|
||||
if (mflags == ADSP_MMAP_DMA_BUFFER) {
|
||||
if (mflags == ADSP_MMAP_HEAP_ADDR ||
|
||||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
return -EFAULT;
|
||||
} else if (mflags == ADSP_MMAP_DMA_BUFFER) {
|
||||
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
|
||||
if (map->buf == buf) {
|
||||
if (refs) {
|
||||
@ -1303,6 +1308,8 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va,
|
||||
map->refs == 1 &&
|
||||
/* Remove if only one reference map and no context map */
|
||||
!map->ctx_refs &&
|
||||
/* Remove map only if it isn't being used by DSP */
|
||||
!map->dma_handle_refs &&
|
||||
/* Skip unmap if it is fastrpc shell memory */
|
||||
!map->is_filemap) {
|
||||
match = map;
|
||||
@ -1342,7 +1349,8 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
|
||||
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
|
||||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
spin_lock_irqsave(&me->hlock, irq_flags);
|
||||
map->refs--;
|
||||
if (map->refs)
|
||||
map->refs--;
|
||||
if (!map->refs && !map->ctx_refs)
|
||||
hlist_del_init(&map->hn);
|
||||
if (map->refs > 0) {
|
||||
@ -1354,8 +1362,12 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
|
||||
}
|
||||
spin_unlock_irqrestore(&me->hlock, irq_flags);
|
||||
} else {
|
||||
map->refs--;
|
||||
if (!map->refs && !map->ctx_refs)
|
||||
if (map->refs)
|
||||
map->refs--;
|
||||
/* flags is passed as 1 during fastrpc_file_free (ie process exit),
|
||||
* so that maps will be cleared even though references are present.
|
||||
*/
|
||||
if (!map->refs && !map->ctx_refs && !map->dma_handle_refs)
|
||||
hlist_del_init(&map->hn);
|
||||
if (map->refs > 0 && !flags)
|
||||
return;
|
||||
@ -2661,12 +2673,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
||||
FASTRPC_ATTR_NOVA, 0, 0, dmaflags,
|
||||
&ctx->maps[i]);
|
||||
if (!err && ctx->maps[i])
|
||||
ctx->maps[i]->ctx_refs++;
|
||||
ctx->maps[i]->dma_handle_refs++;
|
||||
if (err) {
|
||||
for (j = bufs; j < i; j++) {
|
||||
if (ctx->maps[j] && ctx->maps[j]->ctx_refs)
|
||||
ctx->maps[j]->ctx_refs--;
|
||||
fastrpc_mmap_free(ctx->maps[j], 0);
|
||||
if (ctx->maps[j] && ctx->maps[j]->dma_handle_refs) {
|
||||
ctx->maps[j]->dma_handle_refs--;
|
||||
fastrpc_mmap_free(ctx->maps[j], 0);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx->fl->map_mutex);
|
||||
goto bail;
|
||||
@ -2804,13 +2817,33 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
|
||||
rpra[i].buf.pv = buf;
|
||||
}
|
||||
PERF_END);
|
||||
/* Since we are not holidng map_mutex during get args whole time
|
||||
* it is possible that dma handle map may be removed by some invalid
|
||||
* fd passed by DSP. Inside the lock check if the map present or not
|
||||
*/
|
||||
mutex_lock(&ctx->fl->map_mutex);
|
||||
for (i = bufs; i < bufs + handles; ++i) {
|
||||
struct fastrpc_mmap *map = ctx->maps[i];
|
||||
if (map) {
|
||||
pages[i].addr = map->phys;
|
||||
pages[i].size = map->size;
|
||||
struct fastrpc_mmap *mmap = NULL;
|
||||
/* check if map was created */
|
||||
if (ctx->maps[i]) {
|
||||
/* check if map still exist */
|
||||
if (!fastrpc_mmap_find(ctx->fl, ctx->fds[i], NULL, 0, 0,
|
||||
0, 0, &mmap)) {
|
||||
if (mmap) {
|
||||
pages[i].addr = mmap->phys;
|
||||
pages[i].size = mmap->size;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* map already freed by some other call */
|
||||
mutex_unlock(&ctx->fl->map_mutex);
|
||||
ADSPRPC_ERR("could not find map associated with dma handle fd %d\n",
|
||||
ctx->fds[i]);
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx->fl->map_mutex);
|
||||
fdlist = (uint64_t *)&pages[bufs + handles];
|
||||
crclist = (uint32_t *)&fdlist[M_FDLIST];
|
||||
/* reset fds, crc and early wakeup hint memory */
|
||||
@ -3009,9 +3042,10 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
|
||||
break;
|
||||
if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], NULL, 0, 0,
|
||||
0, 0, &mmap)){
|
||||
if (mmap && mmap->ctx_refs)
|
||||
mmap->ctx_refs--;
|
||||
fastrpc_mmap_free(mmap, 0);
|
||||
if (mmap && mmap->dma_handle_refs) {
|
||||
mmap->dma_handle_refs = 0;
|
||||
fastrpc_mmap_free(mmap, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx->fl->map_mutex);
|
||||
@ -6296,7 +6330,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
|
||||
|
||||
static int fastrpc_get_process_gids(struct gid_list *gidlist)
|
||||
{
|
||||
struct group_info *group_info = get_current_groups();
|
||||
struct group_info *group_info = current_cred()->group_info;
|
||||
int i = 0, err = 0, num_gids = group_info->ngroups + 1;
|
||||
unsigned int *gids = NULL;
|
||||
|
||||
|
@ -345,8 +345,12 @@ int qcedev_check_and_map_buffer(void *handle,
|
||||
return 0;
|
||||
|
||||
unmap:
|
||||
if (!found)
|
||||
if (!found) {
|
||||
qcedev_unmap_buffer(handle, mem_client, binfo);
|
||||
mutex_lock(&qce_hndl->registeredbufs.lock);
|
||||
list_del(&binfo->list);
|
||||
mutex_unlock(&qce_hndl->registeredbufs.lock);
|
||||
}
|
||||
|
||||
error:
|
||||
kfree(binfo);
|
||||
|
@ -3,8 +3,11 @@
|
||||
* QTI TEE shared memory bridge driver
|
||||
*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "qtee_shmbridge: [%s][%d]:" fmt, __func__, __LINE__
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
@ -299,6 +302,12 @@ int32_t qtee_shmbridge_allocate_shm(size_t size, struct qtee_shm *shm)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!default_bridge.genpool) {
|
||||
pr_err("Shmbridge pool not available!\n");
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
size = roundup(size, 1 << default_bridge.min_alloc_order);
|
||||
|
||||
va = gen_pool_alloc(default_bridge.genpool, size);
|
||||
@ -314,7 +323,7 @@ int32_t qtee_shmbridge_allocate_shm(size_t size, struct qtee_shm *shm)
|
||||
shm->size = size;
|
||||
|
||||
pr_debug("%s: shm->paddr %llx, size %zu\n",
|
||||
__func__, (uint64_t)shm->paddr, shm->size);
|
||||
__func__, (uint64_t)shm->paddr, shm->size);
|
||||
|
||||
exit:
|
||||
return ret;
|
||||
@ -362,7 +371,6 @@ static int qtee_shmbridge_init(struct platform_device *pdev)
|
||||
uint32_t ns_vm_ids_hlos[] = {VMID_HLOS};
|
||||
uint32_t ns_vm_ids_hyp[] = {};
|
||||
uint32_t ns_vm_perms[] = {VM_PERM_R|VM_PERM_W};
|
||||
int mem_protection_enabled = 0;
|
||||
|
||||
support_hyp = of_property_read_bool((&pdev->dev)->of_node,
|
||||
"qcom,support-hypervisor");
|
||||
@ -383,7 +391,7 @@ static int qtee_shmbridge_init(struct platform_device *pdev)
|
||||
else
|
||||
default_bridge.size = custom_bridge_size * MIN_BRIDGE_SIZE;
|
||||
|
||||
pr_err("qtee shmbridge registered default bridge with size %d bytes\n",
|
||||
pr_debug("qtee shmbridge registered default bridge with size %d bytes\n",
|
||||
default_bridge.size);
|
||||
|
||||
default_bridge.vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP,
|
||||
@ -449,13 +457,9 @@ static int qtee_shmbridge_init(struct platform_device *pdev)
|
||||
goto exit_deregister_default_bridge;
|
||||
}
|
||||
|
||||
pr_debug("qtee shmbridge registered default bridge with size %d bytes\n",
|
||||
default_bridge.size);
|
||||
pr_err("shmbridge registered default bridge with size %zu bytes, paddr: %llx\n",
|
||||
default_bridge.size, (uint64_t)default_bridge.paddr);
|
||||
|
||||
mem_protection_enabled = scm_mem_protection_init_do();
|
||||
pr_debug("MEM protection %s, %d\n",
|
||||
(!mem_protection_enabled ? "Enabled" : "Not enabled"),
|
||||
mem_protection_enabled);
|
||||
return 0;
|
||||
|
||||
exit_deregister_default_bridge:
|
||||
@ -463,6 +467,7 @@ static int qtee_shmbridge_init(struct platform_device *pdev)
|
||||
qtee_shmbridge_enable(false);
|
||||
exit_destroy_pool:
|
||||
gen_pool_destroy(default_bridge.genpool);
|
||||
default_bridge.genpool = NULL;
|
||||
exit_unmap:
|
||||
dma_unmap_single(&pdev->dev, default_bridge.paddr, default_bridge.size,
|
||||
DMA_TO_DEVICE);
|
||||
@ -475,22 +480,71 @@ static int qtee_shmbridge_init(struct platform_device *pdev)
|
||||
|
||||
static int qtee_shmbridge_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret = 0;
|
||||
#ifdef CONFIG_ARM64
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (ret < 0)
|
||||
pr_err("Failed to set mask, ret:%d\n", ret);
|
||||
#endif
|
||||
return qtee_shmbridge_init(pdev);
|
||||
}
|
||||
|
||||
static int qtee_shmbridge_remove(struct platform_device *pdev)
|
||||
{
|
||||
qtee_shmbridge_deregister(default_bridge.handle);
|
||||
int ret = 0;
|
||||
|
||||
ret = qtee_shmbridge_deregister(default_bridge.handle);
|
||||
if (ret < 0)
|
||||
pr_err("shmbridge deregisteration fails, ret: %d\n", ret);
|
||||
ret = qtee_shmbridge_enable(false);
|
||||
if (ret < 0)
|
||||
pr_err("Disabling shmbridge fails, ret: %d\n", ret);
|
||||
gen_pool_destroy(default_bridge.genpool);
|
||||
default_bridge.genpool = NULL;
|
||||
dma_unmap_single(&pdev->dev, default_bridge.paddr, default_bridge.size,
|
||||
DMA_TO_DEVICE);
|
||||
free_pages((long)default_bridge.vaddr, get_order(default_bridge.size));
|
||||
default_bridge.vaddr = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int qtee_shmbridge_freeze(struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pr_err("Freeze entry\n");
|
||||
ret = qtee_shmbridge_remove(to_platform_device(dev));
|
||||
if (ret < 0)
|
||||
pr_err("Error in removing shmbridge instance, ret: %d\n", ret);
|
||||
pr_err("Freeze exit\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qtee_shmbridge_restore(struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pr_err("Restore entry\n");
|
||||
ret = qtee_shmbridge_probe(to_platform_device(dev));
|
||||
if (ret < 0)
|
||||
pr_err("Issue in shmbridge reinit\n");
|
||||
pr_err("Restore exit\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops qtee_shmbridge_pmops = {
|
||||
.freeze_late = qtee_shmbridge_freeze,
|
||||
.restore_early = qtee_shmbridge_restore,
|
||||
.thaw_early = qtee_shmbridge_restore,
|
||||
};
|
||||
|
||||
#define QTEE_SHMBRIDGE_PMOPS (&qtee_shmbridge_pmops)
|
||||
|
||||
#else
|
||||
#define QTEE_SHMBRIDGE_PMOPS NULL
|
||||
#endif
|
||||
|
||||
static const struct of_device_id qtee_shmbridge_of_match[] = {
|
||||
{ .compatible = "qcom,tee-shared-memory-bridge"},
|
||||
{}
|
||||
@ -503,6 +557,7 @@ static struct platform_driver qtee_shmbridge_driver = {
|
||||
.driver = {
|
||||
.name = "shared_memory_bridge",
|
||||
.of_match_table = qtee_shmbridge_of_match,
|
||||
.pm = QTEE_SHMBRIDGE_PMOPS,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -154,7 +154,7 @@ static int get_sg_from_child(struct sg_table *sgt, struct kgsl_memdesc *child,
|
||||
int pgoffset = (offset >> PAGE_SHIFT);
|
||||
struct scatterlist *target_sg;
|
||||
struct sg_page_iter iter;
|
||||
int ret;
|
||||
int i = 0, ret;
|
||||
|
||||
if (child->pages)
|
||||
return sg_alloc_table_from_pages(sgt,
|
||||
@ -167,9 +167,12 @@ static int get_sg_from_child(struct sg_table *sgt, struct kgsl_memdesc *child,
|
||||
|
||||
target_sg = sgt->sgl;
|
||||
|
||||
for_each_sg_page(child->sgt->sgl, &iter, npages, pgoffset) {
|
||||
for_each_sgtable_page(child->sgt, &iter, pgoffset) {
|
||||
sg_set_page(target_sg, sg_page_iter_page(&iter), PAGE_SIZE, 0);
|
||||
target_sg = sg_next(target_sg);
|
||||
|
||||
if (++i == npages)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/property.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "coresight-priv.h"
|
||||
#include "coresight-cti.h"
|
||||
@ -1142,6 +1143,27 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int cti_freeze(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cti_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (drvdata->config.hw_enabled)
|
||||
rc = cti_disable(drvdata->csdev);
|
||||
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops cti_dev_pm_ops = {
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
.freeze = cti_freeze,
|
||||
#endif
|
||||
};
|
||||
static struct amba_cs_uci_id uci_id_cti[] = {
|
||||
{
|
||||
/* CTI UCI data */
|
||||
@ -1167,6 +1189,7 @@ static struct amba_driver cti_driver = {
|
||||
.drv = {
|
||||
.name = "coresight-cti",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &cti_dev_pm_ops,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = cti_probe,
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/sections.h>
|
||||
@ -2038,6 +2039,24 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int etm_freeze(struct device *dev)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
coresight_disable(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops etm_dev_pm_ops = {
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
.freeze = etm_freeze,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct amba_cs_uci_id uci_id_etm4[] = {
|
||||
{
|
||||
/* ETMv4 UCI data */
|
||||
@ -2123,6 +2142,7 @@ static struct amba_driver etm4x_amba_driver = {
|
||||
.name = "coresight-etm4x",
|
||||
.owner = THIS_MODULE,
|
||||
.suppress_bind_attrs = true,
|
||||
.pm = &etm_dev_pm_ops,
|
||||
},
|
||||
.probe = etm4_probe_amba,
|
||||
.remove = etm4_remove_amba,
|
||||
|
@ -29,8 +29,10 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/stm.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "coresight-priv.h"
|
||||
#include "../stm/stm.h"
|
||||
|
||||
#define STMDMASTARTR 0xc04
|
||||
#define STMDMASTOPR 0xc08
|
||||
@ -925,6 +927,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
dev_info(dev,
|
||||
"%s : stm_register_device failed, probing deferred\n",
|
||||
desc.name);
|
||||
pm_runtime_put(&adev->dev);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
@ -989,8 +992,35 @@ static int stm_runtime_resume(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int stm_freeze(struct device *dev)
|
||||
{
|
||||
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
struct stm_device *stm_dev;
|
||||
struct list_head *head, *p;
|
||||
|
||||
atomic_set(csdev->refcnt, 1);
|
||||
coresight_disable(drvdata->csdev);
|
||||
|
||||
stm_dev = drvdata->stm.stm;
|
||||
if (stm_dev) {
|
||||
head = &stm_dev->link_list;
|
||||
list_for_each(p, head)
|
||||
pm_runtime_put_autosuspend(&stm_dev->dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops stm_dev_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
.freeze = stm_freeze,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct amba_id stm_ids[] = {
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/coresight.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "coresight-priv.h"
|
||||
#include "coresight-common.h"
|
||||
@ -4244,6 +4245,23 @@ static void __exit tpdm_remove(struct amba_device *adev)
|
||||
coresight_unregister(drvdata->csdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int tpdm_freeze(struct device *dev)
|
||||
{
|
||||
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
coresight_disable(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops tpdm_dev_pm_ops = {
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
.freeze = tpdm_freeze,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct amba_id tpdm_ids[] = {
|
||||
{
|
||||
.id = 0x0003b968,
|
||||
@ -4259,6 +4277,7 @@ static struct amba_driver tpdm_driver = {
|
||||
.name = "coresight-tpdm",
|
||||
.owner = THIS_MODULE,
|
||||
.suppress_bind_attrs = true,
|
||||
.pm = &tpdm_dev_pm_ops,
|
||||
},
|
||||
.probe = tpdm_probe,
|
||||
.remove = tpdm_remove,
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@ -620,7 +620,7 @@ static int geni_i2c_prepare(struct geni_i2c_dev *gi2c)
|
||||
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
|
||||
"i2c in GSI ONLY mode\n");
|
||||
} else {
|
||||
int gi2c_tx_depth = get_tx_fifo_depth(gi2c->base);
|
||||
int gi2c_tx_depth = get_tx_fifo_depth(&gi2c->i2c_rsc);
|
||||
|
||||
gi2c->se_mode = FIFO_SE_DMA;
|
||||
|
||||
@ -2078,6 +2078,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
gi2c->i2c_rsc.wrapper_dev = &wrapper_pdev->dev;
|
||||
gi2c->i2c_rsc.ctrl_dev = gi2c->dev;
|
||||
gi2c->i2c_rsc.base = gi2c->base;
|
||||
|
||||
/*
|
||||
* For LE, clocks, gpio and icb voting will be provided by
|
||||
|
@ -1837,6 +1837,7 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
|
||||
}
|
||||
|
||||
gi3c->se.i3c_rsc.wrapper_dev = &wrapper_pdev->dev;
|
||||
gi3c->se.i3c_rsc.base = gi3c->se.base;
|
||||
|
||||
ret = geni_se_resources_init(&gi3c->se.i3c_rsc, I3C_CORE2X_VOTE,
|
||||
(DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
|
||||
@ -2093,7 +2094,7 @@ static int geni_i3c_probe(struct platform_device *pdev)
|
||||
goto geni_resources_off;
|
||||
}
|
||||
|
||||
tx_depth = get_tx_fifo_depth(gi3c->se.base);
|
||||
tx_depth = get_tx_fifo_depth(&gi3c->se.i3c_rsc);
|
||||
gi3c->tx_wm = tx_depth - 1;
|
||||
geni_se_init(gi3c->se.base, gi3c->tx_wm, tx_depth);
|
||||
se_config_packing(gi3c->se.base, BITS_PER_BYTE, PACKING_BYTES_PW, true);
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
@ -491,7 +492,11 @@ static int msm_cvp_remove(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
core = dev_get_drvdata(&pdev->dev);
|
||||
if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cvp"))
|
||||
core = dev_get_drvdata(&pdev->dev);
|
||||
else
|
||||
core = dev_get_drvdata(pdev->dev.parent);
|
||||
|
||||
if (!core) {
|
||||
dprintk(CVP_ERR, "%s invalid core", __func__);
|
||||
return -EINVAL;
|
||||
|
@ -655,7 +655,6 @@ int msm_cvp_mark_user_persist(struct msm_cvp_inst *inst,
|
||||
rc = -EFAULT;
|
||||
break;
|
||||
}
|
||||
pbuf->ktid = ktid;
|
||||
rc = 0;
|
||||
}
|
||||
return rc;
|
||||
|
@ -1034,13 +1034,13 @@ int synx_merge(struct synx_session *session,
|
||||
kfree(h_child_list);
|
||||
goto clear;
|
||||
}
|
||||
kfree(h_child_list);
|
||||
}
|
||||
|
||||
dprintk(SYNX_MEM,
|
||||
"[sess :%llu] merge allocated %u, core %pK, fence %pK\n",
|
||||
client->id, *params->h_merged_obj, synx_obj,
|
||||
synx_obj->fence);
|
||||
kfree(h_child_list);
|
||||
synx_put_client(client);
|
||||
return SYNX_SUCCESS;
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
* QTI Secure Execution Environment Communicator (QSEECOM) driver
|
||||
*
|
||||
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
|
||||
@ -1883,6 +1883,20 @@ static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qseecom_set_msm_bus_request_from_smcinvoke(enum qseecom_bandwidth_request_mode mode)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (mode > HIGH || mode < INACTIVE) {
|
||||
pr_err("Invalid mode %d for clock requested, setting clock to INACTIVE\n", mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = qseecom_scale_bus_bandwidth_timer(mode);
|
||||
if (ret)
|
||||
pr_err("Failed (%d) to set bw for request_mode (%d)\n", ret, mode);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qseecom_set_msm_bus_request_from_smcinvoke);
|
||||
|
||||
static int qseecom_unregister_bus_bandwidth_needs(
|
||||
struct qseecom_dev_handle *data)
|
||||
@ -7637,14 +7651,15 @@ long qseecom_ioctl(struct file *file,
|
||||
|
||||
switch (cmd) {
|
||||
case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
|
||||
mutex_lock(&listener_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("reg lstnr req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&listener_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pr_debug("ioctl register_listener_req()\n");
|
||||
mutex_lock(&listener_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
data->type = QSEECOM_LISTENER_SERVICE;
|
||||
ret = qseecom_register_listener(data, argp);
|
||||
@ -7656,15 +7671,16 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
|
||||
mutex_lock(&listener_access_lock);
|
||||
if ((data->listener.id == 0) ||
|
||||
(data->type != QSEECOM_LISTENER_SERVICE)) {
|
||||
pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
|
||||
data->type, data->listener.id);
|
||||
mutex_unlock(&listener_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pr_debug("ioctl unregister_listener_req()\n");
|
||||
mutex_lock(&listener_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_unregister_listener(data);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -7675,15 +7691,16 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_SEND_CMD_REQ: {
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if (qseecom.support_bus_scaling) {
|
||||
/* register bus bw in case the client doesn't do it */
|
||||
if (!data->mode) {
|
||||
@ -7737,15 +7754,16 @@ long qseecom_ioctl(struct file *file,
|
||||
}
|
||||
case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
|
||||
case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if (qseecom.support_bus_scaling) {
|
||||
if (!data->mode) {
|
||||
mutex_lock(&qsee_bw_mutex);
|
||||
@ -7801,13 +7819,16 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_RECEIVE_REQ: {
|
||||
mutex_lock(&listener_access_lock);
|
||||
if ((data->listener.id == 0) ||
|
||||
(data->type != QSEECOM_LISTENER_SERVICE)) {
|
||||
pr_err("receive req: invalid handle (%d), lid(%d)\n",
|
||||
data->type, data->listener.id);
|
||||
mutex_unlock(&listener_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&listener_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_receive_req(data);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -7817,14 +7838,15 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_SEND_RESP_REQ: {
|
||||
mutex_lock(&listener_access_lock);
|
||||
if ((data->listener.id == 0) ||
|
||||
(data->type != QSEECOM_LISTENER_SERVICE)) {
|
||||
pr_err("send resp req: invalid handle (%d), lid(%d)\n",
|
||||
data->type, data->listener.id);
|
||||
mutex_unlock(&listener_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_lock(&listener_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
if (!qseecom.qsee_reentrancy_support)
|
||||
ret = qseecom_send_resp();
|
||||
@ -7838,16 +7860,17 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->type != QSEECOM_CLIENT_APP) &&
|
||||
(data->type != QSEECOM_GENERIC) &&
|
||||
(data->type != QSEECOM_SECURE_SERVICE)) {
|
||||
pr_err("set mem param req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_set_client_mem_param(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -7858,16 +7881,17 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_LOAD_APP_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->type != QSEECOM_GENERIC) &&
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("load app req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->type = QSEECOM_CLIENT_APP;
|
||||
pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_load_app(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -7878,15 +7902,16 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_unload_app(data, false);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -7905,10 +7930,12 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->type != QSEECOM_GENERIC) &&
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("perf enable req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -7916,6 +7943,7 @@ long qseecom_ioctl(struct file *file,
|
||||
(data->client.app_id == 0)) {
|
||||
pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -7930,13 +7958,16 @@ long qseecom_ioctl(struct file *file,
|
||||
pr_err("Fail to vote for clocks %d\n", ret);
|
||||
}
|
||||
atomic_dec(&data->ioctl_count);
|
||||
mutex_unlock(&app_access_lock);
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->type != QSEECOM_SECURE_SERVICE) &&
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("perf disable req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -7944,6 +7975,7 @@ long qseecom_ioctl(struct file *file,
|
||||
(data->client.app_id == 0)) {
|
||||
pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -7957,6 +7989,7 @@ long qseecom_ioctl(struct file *file,
|
||||
mutex_unlock(&qsee_bw_mutex);
|
||||
}
|
||||
atomic_dec(&data->ioctl_count);
|
||||
mutex_unlock(&app_access_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -7966,28 +7999,32 @@ long qseecom_ioctl(struct file *file,
|
||||
pr_debug("crypto clock is not handled by HLOS\n");
|
||||
break;
|
||||
}
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_scale_bus_bandwidth(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
mutex_unlock(&app_access_lock);
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("load ext elf req: invalid client handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_load_external_elf(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -7997,14 +8034,15 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
|
||||
pr_err("unload ext elf req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_unload_external_elf(data);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8014,15 +8052,16 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->type != QSEECOM_GENERIC) &&
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("app loaded query req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->type = QSEECOM_CLIENT_APP;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
|
||||
ret = qseecom_query_app_loaded(data, argp);
|
||||
@ -8031,9 +8070,11 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("send cmd svc req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -8041,9 +8082,9 @@ long qseecom_ioctl(struct file *file,
|
||||
if (qseecom.qsee_version < QSEE_VERSION_03) {
|
||||
pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_send_service_cmd(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8053,19 +8094,21 @@ long qseecom_ioctl(struct file *file,
|
||||
case QSEECOM_IOCTL_CREATE_KEY_REQ: {
|
||||
if (!(qseecom.support_pfe || qseecom.support_fde))
|
||||
pr_err("Features requiring key init not supported\n");
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("create key req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_05) {
|
||||
pr_err("Create Key feature unsupported: qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_create_key(data, argp);
|
||||
if (ret)
|
||||
@ -8078,19 +8121,21 @@ long qseecom_ioctl(struct file *file,
|
||||
case QSEECOM_IOCTL_WIPE_KEY_REQ: {
|
||||
if (!(qseecom.support_pfe || qseecom.support_fde))
|
||||
pr_err("Features requiring key init not supported\n");
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("wipe key req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_05) {
|
||||
pr_err("Wipe Key feature unsupported in qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_wipe_key(data, argp);
|
||||
if (ret)
|
||||
@ -8102,19 +8147,21 @@ long qseecom_ioctl(struct file *file,
|
||||
case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
|
||||
if (!(qseecom.support_pfe || qseecom.support_fde))
|
||||
pr_err("Features requiring key init not supported\n");
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("update key req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_05) {
|
||||
pr_err("Update Key feature unsupported in qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_update_key_user_info(data, argp);
|
||||
if (ret)
|
||||
@ -8124,14 +8171,15 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("save part hash req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_save_partition_hash(argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8139,14 +8187,15 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("ES activated req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_is_es_activated(argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8154,14 +8203,15 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
|
||||
mutex_lock(&app_access_lock);
|
||||
if (data->type != QSEECOM_GENERIC) {
|
||||
pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
|
||||
data->type);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
data->released = true;
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_mdtp_cipher_dip(argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8170,14 +8220,15 @@ long qseecom_ioctl(struct file *file,
|
||||
}
|
||||
case QSEECOM_IOCTL_SEND_MODFD_RESP:
|
||||
case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
|
||||
mutex_lock(&listener_access_lock);
|
||||
if ((data->listener.id == 0) ||
|
||||
(data->type != QSEECOM_LISTENER_SERVICE)) {
|
||||
pr_err("receive req: invalid handle (%d), lid(%d)\n",
|
||||
data->type, data->listener.id);
|
||||
mutex_unlock(&listener_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_lock(&listener_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
|
||||
ret = qseecom_send_modfd_resp(data, argp);
|
||||
@ -8192,20 +8243,22 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("Open session: invalid handle (%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_40) {
|
||||
pr_err("GP feature unsupported: qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_qteec_open_session(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8217,20 +8270,22 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("Close session: invalid handle (%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_40) {
|
||||
pr_err("GP feature unsupported: qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_qteec_close_session(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8241,20 +8296,22 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_40) {
|
||||
pr_err("GP feature unsupported: qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
@ -8266,20 +8323,22 @@ long qseecom_ioctl(struct file *file,
|
||||
break;
|
||||
}
|
||||
case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
if ((data->client.app_id == 0) ||
|
||||
(data->type != QSEECOM_CLIENT_APP)) {
|
||||
pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
|
||||
data->type, data->client.app_id);
|
||||
mutex_unlock(&app_access_lock);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (qseecom.qsee_version < QSEE_VERSION_40) {
|
||||
pr_err("GP feature unsupported: qsee ver %u\n",
|
||||
qseecom.qsee_version);
|
||||
mutex_unlock(&app_access_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Only one client allowed here at a time */
|
||||
mutex_lock(&app_access_lock);
|
||||
atomic_inc(&data->ioctl_count);
|
||||
ret = qseecom_qteec_request_cancellation(data, argp);
|
||||
atomic_dec(&data->ioctl_count);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _CNSS_MAIN_H
|
||||
@ -53,6 +53,7 @@
|
||||
#define POWER_ON_RETRY_MAX_TIMES 3
|
||||
#define POWER_ON_RETRY_DELAY_MS 500
|
||||
#define WLFW_MAX_HANG_EVENT_DATA_SIZE 384
|
||||
#define CNSS_MBOX_MSG_MAX_LEN 64
|
||||
|
||||
#define CNSS_EVENT_SYNC BIT(0)
|
||||
#define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@ -79,7 +79,6 @@ static struct cnss_clk_cfg cnss_clk_list[] = {
|
||||
#define MAX_TCS_NUM 8
|
||||
#define MAX_TCS_CMD_NUM 5
|
||||
#define BT_CXMX_VOLTAGE_MV 950
|
||||
#define CNSS_MBOX_MSG_MAX_LEN 64
|
||||
#define CNSS_MBOX_TIMEOUT_MS 1000
|
||||
/* Platform HW config */
|
||||
#define CNSS_PMIC_VOLTAGE_STEP 4
|
||||
@ -1205,10 +1204,19 @@ int cnss_aop_mbox_init(struct cnss_plat_data *plat_priv)
|
||||
int cnss_aop_send_msg(struct cnss_plat_data *plat_priv, char *mbox_msg)
|
||||
{
|
||||
struct qmp_pkt pkt;
|
||||
int mbox_msg_size;
|
||||
int ret = 0;
|
||||
|
||||
/* 4 bytes alignment is MUST */
|
||||
mbox_msg_size = ((strlen(mbox_msg) + 1) + 0x3) & ~0x3;
|
||||
|
||||
if (mbox_msg_size > CNSS_MBOX_MSG_MAX_LEN) {
|
||||
cnss_pr_err("message length greater than max length\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cnss_pr_dbg("Sending AOP Mbox msg: %s\n", mbox_msg);
|
||||
pkt.size = CNSS_MBOX_MSG_MAX_LEN;
|
||||
pkt.size = mbox_msg_size;
|
||||
pkt.data = mbox_msg;
|
||||
|
||||
ret = mbox_send_message(plat_priv->mbox_chan, &pkt);
|
||||
@ -1232,8 +1240,15 @@ int cnss_aop_pdc_reconfig(struct cnss_plat_data *plat_priv)
|
||||
cnss_pr_dbg("Setting PDC defaults for device ID: %d\n",
|
||||
plat_priv->device_id);
|
||||
for (i = 0; i < plat_priv->pdc_init_table_len; i++) {
|
||||
ret = cnss_aop_send_msg(plat_priv,
|
||||
(char *)plat_priv->pdc_init_table[i]);
|
||||
char buf[CNSS_MBOX_MSG_MAX_LEN] = {0x00};
|
||||
|
||||
if (strlen(plat_priv->pdc_init_table[i]) > CNSS_MBOX_MSG_MAX_LEN) {
|
||||
cnss_pr_err("msg too long: %s\n", plat_priv->pdc_init_table[i]);
|
||||
continue;
|
||||
}
|
||||
|
||||
snprintf(buf, CNSS_MBOX_MSG_MAX_LEN, plat_priv->pdc_init_table[i]);
|
||||
ret = cnss_aop_send_msg(plat_priv, buf);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@ -557,10 +557,11 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
|
||||
for (i = 0; i < plat_priv->on_chip_pmic_devices_count; i++) {
|
||||
if (plat_priv->board_info.board_id ==
|
||||
plat_priv->on_chip_pmic_board_ids[i]) {
|
||||
char buf[CNSS_MBOX_MSG_MAX_LEN] =
|
||||
"{class: wlan_pdc, ss: rf, res: pdc, enable: 0}";
|
||||
cnss_pr_dbg("Disabling WLAN PDC for board_id: %02x\n",
|
||||
plat_priv->board_info.board_id);
|
||||
ret = cnss_aop_send_msg(plat_priv,
|
||||
"{class: wlan_pdc, ss: rf, res: pdc, enable: 0}");
|
||||
ret = cnss_aop_send_msg(plat_priv, buf);
|
||||
if (ret < 0)
|
||||
cnss_pr_dbg("Failed to Send AOP Msg");
|
||||
break;
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@ -20,7 +20,6 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
|
||||
|
||||
#define QUPV3_TEST_BUS_EN 0x204 //write 0x11
|
||||
#define QUPV3_TEST_BUS_SEL 0x200 //write 0x5 [for SE index 4)
|
||||
#define QUPV3_TEST_BUS_REG 0x208 //Read only reg, to be read as part of dump
|
||||
@ -623,19 +622,27 @@ EXPORT_SYMBOL(geni_abort_s_cmd);
|
||||
|
||||
/**
|
||||
* get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
|
||||
* @base: Base address of the serial engine's register block.
|
||||
* @se: Pointer to the concerned serial engine.
|
||||
*
|
||||
* This function is used to get the depth i.e. number of elements in the
|
||||
* TX fifo of the serial engine.
|
||||
*
|
||||
* Return: TX fifo depth in units of FIFO words.
|
||||
*/
|
||||
int get_tx_fifo_depth(void __iomem *base)
|
||||
int get_tx_fifo_depth(struct se_geni_rsc *se)
|
||||
{
|
||||
int tx_fifo_depth;
|
||||
u32 ret, hw_major, hw_minor, hw_step, tx_fifo_depth_mask;
|
||||
|
||||
tx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_0)
|
||||
& TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT);
|
||||
ret = geni_se_qupv3_hw_version(se->wrapper_dev, &hw_major,
|
||||
&hw_minor, &hw_step);
|
||||
if ((hw_major == 3 && hw_minor >= 10) || hw_major > 3)
|
||||
tx_fifo_depth_mask = TX_FIFO_DEPTH_MSK_256_BYTES;
|
||||
else
|
||||
tx_fifo_depth_mask = TX_FIFO_DEPTH_MSK;
|
||||
|
||||
tx_fifo_depth = ((geni_read_reg(se->base, SE_HW_PARAM_0)
|
||||
& tx_fifo_depth_mask) >> TX_FIFO_DEPTH_SHFT);
|
||||
return tx_fifo_depth;
|
||||
}
|
||||
EXPORT_SYMBOL(get_tx_fifo_depth);
|
||||
@ -661,19 +668,27 @@ EXPORT_SYMBOL(get_tx_fifo_width);
|
||||
|
||||
/**
|
||||
* get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
|
||||
* @base: Base address of the serial engine's register block.
|
||||
* @se: Pointer to the concerned serial engine.
|
||||
*
|
||||
* This function is used to get the depth i.e. number of elements in the
|
||||
* RX fifo of the serial engine.
|
||||
*
|
||||
* Return: RX fifo depth in units of FIFO words
|
||||
*/
|
||||
int get_rx_fifo_depth(void __iomem *base)
|
||||
int get_rx_fifo_depth(struct se_geni_rsc *se)
|
||||
{
|
||||
int rx_fifo_depth;
|
||||
u32 ret, hw_major, hw_minor, hw_step, rx_fifo_depth_mask;
|
||||
|
||||
rx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_1)
|
||||
& RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT);
|
||||
ret = geni_se_qupv3_hw_version(se->wrapper_dev, &hw_major,
|
||||
&hw_minor, &hw_step);
|
||||
if ((hw_major == 3 && hw_minor >= 10) || hw_major > 3)
|
||||
rx_fifo_depth_mask = RX_FIFO_DEPTH_MSK_256_BYTES;
|
||||
else
|
||||
rx_fifo_depth_mask = RX_FIFO_DEPTH_MSK;
|
||||
|
||||
rx_fifo_depth = ((geni_read_reg(se->base, SE_HW_PARAM_1)
|
||||
& rx_fifo_depth_mask) >> RX_FIFO_DEPTH_SHFT);
|
||||
return rx_fifo_depth;
|
||||
}
|
||||
EXPORT_SYMBOL(get_rx_fifo_depth);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <soc/qcom/memory_dump.h>
|
||||
#include <soc/qcom/minidump.h>
|
||||
#include <dt-bindings/soc/qcom,dcc_v2.h>
|
||||
@ -149,6 +150,16 @@ struct dcc_config_entry {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct reg_state
|
||||
* offset: the offset of the reg to be preserved when dcc is without power
|
||||
* val : the val of the reg to be preserved when dcc is without power
|
||||
*/
|
||||
struct reg_state {
|
||||
uint32_t offset;
|
||||
uint32_t val;
|
||||
};
|
||||
|
||||
struct dcc_drvdata {
|
||||
void __iomem *base;
|
||||
uint32_t reg_size;
|
||||
@ -176,6 +187,11 @@ struct dcc_drvdata {
|
||||
uint8_t curr_list;
|
||||
uint8_t *cti_trig;
|
||||
uint8_t loopoff;
|
||||
uint32_t ram_cpy_len;
|
||||
uint32_t per_ll_reg_cnt;
|
||||
int32_t ll_state_cnt;
|
||||
struct reg_state *ll_state;
|
||||
void *sram_state;
|
||||
uint8_t *qad_output;
|
||||
};
|
||||
|
||||
@ -651,7 +667,7 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_err(drvdata->dev, "DCC list passed %d\n", curr_list);
|
||||
dev_info(drvdata->dev, "DCC list passed %d\n", curr_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -766,6 +782,7 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
|
||||
}
|
||||
}
|
||||
|
||||
drvdata->ram_cpy_len = drvdata->ram_cfg * 4;
|
||||
err:
|
||||
mutex_unlock(&drvdata->mutex);
|
||||
return ret;
|
||||
@ -791,6 +808,7 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
|
||||
}
|
||||
memset_io(drvdata->ram_base, 0, drvdata->ram_size);
|
||||
drvdata->ram_cfg = 0;
|
||||
drvdata->ram_cpy_len = 0;
|
||||
drvdata->ram_start = 0;
|
||||
mutex_unlock(&drvdata->mutex);
|
||||
}
|
||||
@ -1394,6 +1412,7 @@ static void dcc_config_reset(struct dcc_drvdata *drvdata)
|
||||
}
|
||||
drvdata->ram_start = 0;
|
||||
drvdata->ram_cfg = 0;
|
||||
drvdata->ram_cpy_len = 0;
|
||||
mutex_unlock(&drvdata->mutex);
|
||||
}
|
||||
|
||||
@ -1763,6 +1782,7 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
|
||||
{
|
||||
unsigned char *buf;
|
||||
struct dcc_drvdata *drvdata = file->private_data;
|
||||
int ret;
|
||||
|
||||
/* EOF check */
|
||||
if (drvdata->ram_size <= *ppos)
|
||||
@ -1776,7 +1796,13 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
dcc_sram_memcpy(buf, (drvdata->ram_base + *ppos), len);
|
||||
ret = dcc_sram_memcpy(buf, (drvdata->ram_base + *ppos), len);
|
||||
if (ret) {
|
||||
dev_err(drvdata->dev,
|
||||
"Target address or size not aligned with 4 bytes\n");
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (copy_to_user(data, buf, len)) {
|
||||
dev_err(drvdata->dev,
|
||||
@ -2016,6 +2042,18 @@ static int dcc_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
drvdata->ll_state_cnt = of_property_count_elems_of_size(dev->of_node,
|
||||
"ll-reg-offsets", sizeof(u32)); /* optional */
|
||||
if (drvdata->ll_state_cnt <= 0) {
|
||||
dev_info(dev, "ll-reg-offsets property doesn't exist\n");
|
||||
drvdata->ll_state_cnt = 0;
|
||||
} else {
|
||||
ret = of_property_read_u32(pdev->dev.of_node, "per-ll-reg-cnt",
|
||||
&drvdata->per_ll_reg_cnt);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (BVAL(dcc_readl(drvdata, DCC_HW_INFO), 9)) {
|
||||
drvdata->mem_map_ver = DCC_MEM_MAP_VER3;
|
||||
drvdata->nr_link_list = dcc_readl(drvdata, DCC_LL_NUM_INFO);
|
||||
@ -2123,6 +2161,188 @@ static int dcc_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int dcc_state_store(struct device *dev)
|
||||
{
|
||||
int ret = 0, n, i;
|
||||
u32 *ll_reg_offsets;
|
||||
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (!drvdata) {
|
||||
dev_dbg(dev, "Invalid drvdata\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_dcc_enabled(drvdata)) {
|
||||
dev_dbg(dev, "DCC is not enabled.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!drvdata->ll_state_cnt) {
|
||||
dev_dbg(dev, "reg-offsets property doesn't exist\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
n = drvdata->ll_state_cnt;
|
||||
ll_reg_offsets = kcalloc(n, sizeof(u32), GFP_KERNEL);
|
||||
if (!ll_reg_offsets) {
|
||||
dev_err(dev, "Failed to alloc memory for reg_offsets\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = of_property_read_variable_u32_array(dev->of_node,
|
||||
"ll-reg-offsets", ll_reg_offsets, n, n);
|
||||
if (ret < 0) {
|
||||
dev_dbg(dev, "Not found reg-offsets property\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
drvdata->ll_state = kzalloc(n * sizeof(struct reg_state), GFP_KERNEL);
|
||||
if (!drvdata->ll_state) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
drvdata->sram_state = kzalloc(drvdata->ram_size, GFP_KERNEL);
|
||||
if (!drvdata->sram_state) {
|
||||
ret = -ENOMEM;
|
||||
goto sram_alloc_err;
|
||||
}
|
||||
|
||||
if (dcc_sram_memcpy(drvdata->sram_state, drvdata->ram_base,
|
||||
drvdata->ram_cpy_len)) {
|
||||
dev_err(dev, "Failed to copy DCC SRAM contents\n");
|
||||
ret = -EINVAL;
|
||||
goto sram_cpy_err;
|
||||
}
|
||||
|
||||
mutex_lock(&drvdata->mutex);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
drvdata->ll_state[i].offset = ll_reg_offsets[i];
|
||||
drvdata->ll_state[i].val = __raw_readl(drvdata->base + ll_reg_offsets[i]);
|
||||
}
|
||||
|
||||
mutex_unlock(&drvdata->mutex);
|
||||
|
||||
kfree(ll_reg_offsets);
|
||||
return 0;
|
||||
|
||||
sram_cpy_err:
|
||||
kfree(drvdata->sram_state);
|
||||
drvdata->sram_state = NULL;
|
||||
sram_alloc_err:
|
||||
kfree(drvdata->ll_state);
|
||||
drvdata->ll_state = NULL;
|
||||
out:
|
||||
kfree(ll_reg_offsets);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dcc_state_restore(struct device *dev)
|
||||
{
|
||||
int n, i, j, dcc_ll_index;
|
||||
int ret = 0;
|
||||
int *sram_state;
|
||||
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
uint32_t ram_cpy_wlen;
|
||||
|
||||
if (!drvdata) {
|
||||
dev_err(dev, "Err: %s Invalid argument\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_dcc_enabled(drvdata)) {
|
||||
dev_dbg(dev, "DCC is not enabled.\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!drvdata->ll_state_cnt) {
|
||||
dev_dbg(dev, "reg-offsets property doesn't exist\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!drvdata->sram_state || !drvdata->ll_state) {
|
||||
dev_err(dev, "Err: Restore state is NULL\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ram_cpy_wlen = drvdata->ram_cpy_len / 4;
|
||||
sram_state = drvdata->sram_state;
|
||||
n = drvdata->ll_state_cnt;
|
||||
|
||||
for (i = 0; i < ram_cpy_wlen; i++)
|
||||
dcc_sram_writel(drvdata, sram_state[i], i * 4);
|
||||
|
||||
mutex_lock(&drvdata->mutex);
|
||||
|
||||
for (i = 0, dcc_ll_index = 0;
|
||||
(dcc_ll_index < drvdata->nr_link_list) && (i < n);
|
||||
dcc_ll_index++) {
|
||||
|
||||
if (list_empty(&drvdata->cfg_head[dcc_ll_index])) {
|
||||
i += drvdata->per_ll_reg_cnt;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (j = 0; j < drvdata->per_ll_reg_cnt; i++, j++)
|
||||
__raw_writel(drvdata->ll_state[i].val,
|
||||
drvdata->base + drvdata->ll_state[i].offset);
|
||||
}
|
||||
|
||||
mutex_unlock(&drvdata->mutex);
|
||||
out:
|
||||
kfree(drvdata->sram_state);
|
||||
drvdata->sram_state = NULL;
|
||||
|
||||
kfree(drvdata->ll_state);
|
||||
drvdata->ll_state = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int dcc_v2_freeze(struct device *dev)
|
||||
{
|
||||
return dcc_state_store(dev);
|
||||
}
|
||||
|
||||
static int dcc_v2_restore(struct device *dev)
|
||||
{
|
||||
return dcc_state_restore(dev);
|
||||
}
|
||||
|
||||
static int dcc_v2_thaw(struct device *dev)
|
||||
{
|
||||
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (!drvdata || !drvdata->ll_state || !drvdata->sram_state) {
|
||||
dev_err(dev, "Err: %s Invalid argument\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kfree(drvdata->sram_state);
|
||||
kfree(drvdata->ll_state);
|
||||
drvdata->sram_state = NULL;
|
||||
drvdata->ll_state = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops dcc_v2_pm_ops = {
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
.freeze = dcc_v2_freeze,
|
||||
.restore = dcc_v2_restore,
|
||||
.thaw = dcc_v2_thaw,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct of_device_id msm_dcc_match[] = {
|
||||
{ .compatible = "qcom,dcc-v2"},
|
||||
{}
|
||||
@ -2134,6 +2354,7 @@ static struct platform_driver dcc_driver = {
|
||||
.driver = {
|
||||
.name = "msm-dcc",
|
||||
.of_match_table = msm_dcc_match,
|
||||
.pm = &dcc_v2_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -1577,6 +1577,7 @@ void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
|
||||
|
||||
bwmon_monitor_stop(hw);
|
||||
mon_irq_disable(m, type);
|
||||
synchronize_irq(m->irq);
|
||||
free_irq(m->irq, m);
|
||||
mon_disable(m, type);
|
||||
mon_clear(m, true, type);
|
||||
|
@ -207,9 +207,10 @@ bool qti_hwkm_is_ice_tpkey_set(const struct ice_mmio_data *mmio_data)
|
||||
|
||||
val = qti_hwkm_readl(mmio_data->ice_hwkm_mmio,
|
||||
QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS,
|
||||
DONE);
|
||||
ICE_SLAVE);
|
||||
val = val >> 8;
|
||||
|
||||
return (val == 1);
|
||||
return (val == 0x1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qti_hwkm_is_ice_tpkey_set);
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
|
||||
#include "linux/power_state.h"
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARCH_MONACO)
|
||||
#define DS_ENTRY_SMC_ID 0xC3000924
|
||||
#else
|
||||
@ -48,6 +49,7 @@
|
||||
#define DS_ENTRY 1
|
||||
#define DS_EXIT 0
|
||||
|
||||
|
||||
#define POWER_STATS_BASEMINOR 0
|
||||
#define POWER_STATS_MAX_MINOR 1
|
||||
#define POWER_STATE_DEVICE_NAME "power_state"
|
||||
@ -236,6 +238,7 @@ static int send_deep_sleep_vote(int state, struct power_state_drvdata *drv)
|
||||
return msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET, RPM_XO_DS_REQ,
|
||||
RPM_XO_DS_ID, &drv->kvp_req, 1);
|
||||
}
|
||||
|
||||
#elif IS_ENABLED(CONFIG_NOTIFY_AOP)
|
||||
static int send_deep_sleep_vote(int state, struct power_state_drvdata *drv)
|
||||
{
|
||||
@ -304,12 +307,15 @@ static long ps_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case POWER_STATE_MODEM_EXIT:
|
||||
case ADSP_SUSPEND:
|
||||
case ADSP_EXIT:
|
||||
case CDSP_EXIT:
|
||||
case CDSP_SUSPEND:
|
||||
case POWER_STATE_ADSP_SUSPEND:
|
||||
case POWER_STATE_ADSP_EXIT:
|
||||
pr_debug("Deprecated ioctl\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("Inside default in power_state.c due to %d\n", cmd);
|
||||
ret = -ENOIOCTLCMD;
|
||||
pr_err("%s: Default\n", __func__);
|
||||
break;
|
||||
@ -473,7 +479,7 @@ static ssize_t suspend_delay_store(struct kobject *kobj, struct kobj_attribute *
|
||||
return ret;
|
||||
}
|
||||
|
||||
drv->deep_sleep_allowed = val;
|
||||
drv->suspend_delay = val;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2014, 2017-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
@ -152,7 +152,7 @@ static void cdsp_loader_unload(struct platform_device *pdev)
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
if (priv->pil_h) {
|
||||
if (priv->pil_h && cdsp_state == CDSP_SUBSYS_LOADED) {
|
||||
dev_dbg(&pdev->dev, "%s: calling subsystem_put\n", __func__);
|
||||
rproc_shutdown(priv->pil_h);
|
||||
priv->pil_h = NULL;
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
|
||||
@ -1348,6 +1348,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&drv->lock);
|
||||
spin_lock_init(&drv->client.cache_lock);
|
||||
init_waitqueue_head(&drv->tcs_wait);
|
||||
bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
|
||||
|
||||
@ -1391,7 +1392,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
|
||||
writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
|
||||
drv->tcs_base + RSC_DRV_IRQ_ENABLE);
|
||||
|
||||
spin_lock_init(&drv->client.cache_lock);
|
||||
INIT_LIST_HEAD(&drv->client.cache);
|
||||
INIT_LIST_HEAD(&drv->client.batch_cache);
|
||||
|
||||
|
@ -172,6 +172,7 @@ static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id;
|
||||
static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
|
||||
static unsigned int cb_reqs_inflight;
|
||||
static bool legacy_smc_call;
|
||||
static bool smc_clock_support;
|
||||
static int invoke_cmd;
|
||||
|
||||
static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
|
||||
@ -1701,7 +1702,7 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
|
||||
struct qtee_shm *in_shm, struct qtee_shm *out_shm,
|
||||
bool retry)
|
||||
{
|
||||
int ret = 0, cmd, retry_count = 0;
|
||||
int ret = 0, cmd, retry_count = 0, ret_smc_clk = 0;
|
||||
u64 response_type;
|
||||
unsigned int data;
|
||||
struct file *arr_filp[SMCI_OBJECT_COUNTS_MAX_OO] = {NULL};
|
||||
@ -1725,9 +1726,32 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
|
||||
mutex_lock(&g_smcinvoke_lock);
|
||||
|
||||
do {
|
||||
/*
|
||||
* If clock-support is enabled for smcinvoke,
|
||||
* a notification will be sent to qseecom to enable/disable
|
||||
* clocks when smcinvoke sends an invoke command
|
||||
*/
|
||||
if (smc_clock_support) {
|
||||
ret_smc_clk = qseecom_set_msm_bus_request_from_smcinvoke(HIGH);
|
||||
if (ret_smc_clk) {
|
||||
pr_err("Clock enablement failed, ret: %d\n",
|
||||
ret_smc_clk);
|
||||
ret = -EPERM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret = invoke_cmd_handler(cmd, in_paddr, in_buf_len, out_buf,
|
||||
out_paddr, out_buf_len, &req->result,
|
||||
&response_type, &data, in_shm, out_shm);
|
||||
if (smc_clock_support) {
|
||||
ret_smc_clk = qseecom_set_msm_bus_request_from_smcinvoke(INACTIVE);
|
||||
if (ret_smc_clk) {
|
||||
pr_err("smc_clock enablement failed, ret: %d\n",
|
||||
ret_smc_clk);
|
||||
ret = -EPERM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == -EBUSY) {
|
||||
pr_err("Secure side is busy,will retry after 30 ms, retry_count = %d\n",
|
||||
@ -2325,8 +2349,11 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
|
||||
}
|
||||
} while (!cb_txn);
|
||||
out:
|
||||
if (server_info)
|
||||
if (server_info) {
|
||||
mutex_lock(&g_smcinvoke_lock);
|
||||
kref_put(&server_info->ref_cnt, destroy_cb_server);
|
||||
mutex_unlock(&g_smcinvoke_lock);
|
||||
}
|
||||
|
||||
if (ret && ret != -ERESTARTSYS)
|
||||
pr_err("accept thread returning with ret: %d\n", ret);
|
||||
@ -2869,6 +2896,8 @@ static int smcinvoke_probe(struct platform_device *pdev)
|
||||
goto exit_destroy_device;
|
||||
}
|
||||
smcinvoke_pdev = pdev;
|
||||
smc_clock_support = of_property_read_bool((&pdev->dev)->of_node,
|
||||
"qcom,clock-support");
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@ -1308,8 +1308,8 @@ static int spi_geni_mas_setup(struct spi_master *spi)
|
||||
spi_master_setup(mas);
|
||||
}
|
||||
|
||||
mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
|
||||
mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
|
||||
mas->tx_fifo_depth = get_tx_fifo_depth(&mas->spi_rsc);
|
||||
mas->rx_fifo_depth = get_rx_fifo_depth(&mas->spi_rsc);
|
||||
mas->tx_fifo_width = get_tx_fifo_width(mas->base);
|
||||
mas->oversampling = 1;
|
||||
geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
|
||||
@ -2391,6 +2391,7 @@ static int spi_geni_probe(struct platform_device *pdev)
|
||||
goto spi_geni_probe_err;
|
||||
}
|
||||
|
||||
geni_mas->spi_rsc.base = geni_mas->base;
|
||||
geni_mas->slave_cross_connected =
|
||||
of_property_read_bool(pdev->dev.of_node, "slv-cross-connected");
|
||||
spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
|
||||
|
@ -2750,7 +2750,7 @@ static int get_tx_fifo_size(struct msm_geni_serial_port *port)
|
||||
return -ENODEV;
|
||||
|
||||
uport = &port->uport;
|
||||
port->tx_fifo_depth = get_tx_fifo_depth(uport->membase);
|
||||
port->tx_fifo_depth = get_tx_fifo_depth(&port->serial_rsc);
|
||||
if (!port->tx_fifo_depth) {
|
||||
dev_err(uport->dev, "%s:Invalid TX FIFO depth read\n",
|
||||
__func__);
|
||||
@ -2764,7 +2764,7 @@ static int get_tx_fifo_size(struct msm_geni_serial_port *port)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
port->rx_fifo_depth = get_rx_fifo_depth(uport->membase);
|
||||
port->rx_fifo_depth = get_rx_fifo_depth(&port->serial_rsc);
|
||||
if (!port->rx_fifo_depth) {
|
||||
dev_err(uport->dev, "%s:Invalid RX FIFO depth read\n",
|
||||
__func__);
|
||||
@ -3832,6 +3832,7 @@ static int msm_geni_serial_read_dtsi(struct platform_device *pdev,
|
||||
dev_err(&pdev->dev, "Err IO mapping serial iomem\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_port->serial_rsc.base = uport->membase;
|
||||
|
||||
ret = msm_geni_serial_get_irq_pinctrl(pdev, dev_port);
|
||||
if (ret)
|
||||
@ -3893,6 +3894,7 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
|
||||
goto exit_geni_serial_probe;
|
||||
}
|
||||
|
||||
dev_port->is_console = is_console;
|
||||
dev_port->port_state = UART_PORT_CLOSED_SHUTDOWN;
|
||||
if (drv->cons && !con_enabled) {
|
||||
dev_err(&pdev->dev, "%s, Console Disabled\n", __func__);
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@ -55,8 +56,6 @@ struct ffs_ep {
|
||||
struct usb_endpoint_descriptor *descs[3];
|
||||
|
||||
u8 num;
|
||||
|
||||
int status; /* P: epfile->mutex */
|
||||
};
|
||||
|
||||
/* Copied from f_fs.c */
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "eusb2_phy: %s: " fmt, __func__
|
||||
@ -227,11 +227,19 @@ static void msm_eusb2_phy_clocks(struct msm_eusb2_phy *phy, bool on)
|
||||
|
||||
static void msm_eusb2_phy_update_eud_detect(struct msm_eusb2_phy *phy, bool set)
|
||||
{
|
||||
if (set)
|
||||
if (!phy->eud_detect_reg)
|
||||
return;
|
||||
|
||||
if (set) {
|
||||
/* Make sure all the writes are processed before setting EUD_DETECT */
|
||||
mb();
|
||||
writel_relaxed(EUD_DETECT, phy->eud_detect_reg);
|
||||
else
|
||||
} else {
|
||||
writel_relaxed(readl_relaxed(phy->eud_detect_reg) & ~EUD_DETECT,
|
||||
phy->eud_detect_reg);
|
||||
/* Make sure clearing EUD_DETECT is completed before turning off the regulators */
|
||||
mb();
|
||||
}
|
||||
}
|
||||
|
||||
static int msm_eusb2_phy_power(struct msm_eusb2_phy *phy, bool on)
|
||||
@ -288,8 +296,6 @@ static int msm_eusb2_phy_power(struct msm_eusb2_phy *phy, bool on)
|
||||
goto unset_vdda12;
|
||||
}
|
||||
|
||||
/* Make sure all the writes are processed before setting EUD_DETECT */
|
||||
mb();
|
||||
/* Set eud_detect_reg after powering on eUSB PHY rails to bring EUD out of reset */
|
||||
msm_eusb2_phy_update_eud_detect(phy, true);
|
||||
|
||||
@ -301,9 +307,6 @@ static int msm_eusb2_phy_power(struct msm_eusb2_phy *phy, bool on)
|
||||
/* Clear eud_detect_reg to put EUD in reset */
|
||||
msm_eusb2_phy_update_eud_detect(phy, false);
|
||||
|
||||
/* Make sure clearing EUD_DETECT is completed before turning off the regulators */
|
||||
mb();
|
||||
|
||||
ret = regulator_disable(phy->vdda12);
|
||||
if (ret)
|
||||
dev_err(phy->phy.dev, "Unable to disable vdda12:%d\n", ret);
|
||||
@ -720,6 +723,8 @@ static int msm_eusb2_phy_init(struct usb_phy *uphy)
|
||||
|
||||
msm_eusb2_write_readback(phy->base, USB_PHY_UTMI_CTRL5, POR, POR);
|
||||
|
||||
udelay(10);
|
||||
|
||||
msm_eusb2_write_readback(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
|
||||
PHY_ENABLE | RETENABLEN, PHY_ENABLE | RETENABLEN);
|
||||
|
||||
@ -983,17 +988,13 @@ static int msm_eusb2_phy_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eud_detect_reg");
|
||||
if (!res) {
|
||||
dev_err(dev, "missing eud_detect register address\n");
|
||||
ret = -ENODEV;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
phy->eud_detect_reg = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(phy->eud_detect_reg)) {
|
||||
dev_err(dev, "eud_detect_reg ioremap err:%d\n", phy->eud_detect_reg);
|
||||
ret = PTR_ERR(phy->eud_detect_reg);
|
||||
goto err_ret;
|
||||
if (res) {
|
||||
phy->eud_detect_reg = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(phy->eud_detect_reg)) {
|
||||
dev_err(dev, "eud_detect_reg ioremap err:%d\n", phy->eud_detect_reg);
|
||||
ret = PTR_ERR(phy->eud_detect_reg);
|
||||
goto err_ret;
|
||||
}
|
||||
}
|
||||
|
||||
phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_MSM_GENI_SE
|
||||
@ -30,7 +30,8 @@ enum se_protocol_types {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct geni_se_rsc - GENI Serial Engine Resource
|
||||
* struct se_geni_rsc - GENI Serial Engine Resource
|
||||
* @base: Base Address of the Serial Engine's register block
|
||||
* @ctrl_dev Pointer to controller device.
|
||||
* @wrapper_dev: Pointer to the parent QUPv3 core.
|
||||
* @se_clk: Handle to the core serial engine clock.
|
||||
@ -56,6 +57,7 @@ enum se_protocol_types {
|
||||
* @is_list_add; To synchronize list add and del.
|
||||
*/
|
||||
struct se_geni_rsc {
|
||||
void __iomem *base;
|
||||
struct device *ctrl_dev;
|
||||
struct device *wrapper_dev;
|
||||
struct clk *se_clk;
|
||||
@ -323,15 +325,25 @@ struct se_geni_rsc {
|
||||
/* SE_HW_PARAM_0 fields */
|
||||
#define TX_FIFO_WIDTH_MSK (GENMASK(29, 24))
|
||||
#define TX_FIFO_WIDTH_SHFT (24)
|
||||
#define TX_FIFO_DEPTH_MSK (GENMASK(21, 16))
|
||||
#define TX_FIFO_DEPTH_SHFT (16)
|
||||
#define GEN_I3C_IBI_CTRL (BIT(7))
|
||||
/*
|
||||
* For QUP HW Version >= 3.10 Tx fifo depth support is increased
|
||||
* to 256bytes and corresponding bits are 16 to 23
|
||||
*/
|
||||
#define TX_FIFO_DEPTH_MSK_256_BYTES (GENMASK(23, 16))
|
||||
#define TX_FIFO_DEPTH_MSK (GENMASK(21, 16))
|
||||
#define TX_FIFO_DEPTH_SHFT (16)
|
||||
#define GEN_I3C_IBI_CTRL (BIT(7))
|
||||
|
||||
/* SE_HW_PARAM_1 fields */
|
||||
#define RX_FIFO_WIDTH_MSK (GENMASK(29, 24))
|
||||
#define RX_FIFO_WIDTH_SHFT (24)
|
||||
#define RX_FIFO_DEPTH_MSK (GENMASK(21, 16))
|
||||
#define RX_FIFO_DEPTH_SHFT (16)
|
||||
/*
|
||||
* For QUP HW Version >= 3.10 Rx fifo depth support is increased
|
||||
* to 256bytes and corresponding bits are 16 to 23
|
||||
*/
|
||||
#define RX_FIFO_DEPTH_MSK_256_BYTES (GENMASK(23, 16))
|
||||
#define RX_FIFO_DEPTH_MSK (GENMASK(21, 16))
|
||||
#define RX_FIFO_DEPTH_SHFT (16)
|
||||
|
||||
/* SE_HW_PARAM_2 fields */
|
||||
#define GEN_HW_FSM_I2C (BIT(15))
|
||||
@ -608,14 +620,14 @@ void geni_abort_s_cmd(void __iomem *base);
|
||||
|
||||
/**
|
||||
* get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
|
||||
* @base: Base address of the serial engine's register block.
|
||||
* @se: Pointer to the concerned serial engine.
|
||||
*
|
||||
* This function is used to get the depth i.e. number of elements in the
|
||||
* TX fifo of the serial engine.
|
||||
*
|
||||
* Return: TX fifo depth in units of FIFO words.
|
||||
*/
|
||||
int get_tx_fifo_depth(void __iomem *base);
|
||||
int get_tx_fifo_depth(struct se_geni_rsc *se);
|
||||
|
||||
/**
|
||||
* get_tx_fifo_width() - Get the TX fifo width of the serial engine
|
||||
@ -630,14 +642,14 @@ int get_tx_fifo_width(void __iomem *base);
|
||||
|
||||
/**
|
||||
* get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
|
||||
* @base: Base address of the serial engine's register block.
|
||||
* @se: Pointer to the concerned serial engine.
|
||||
*
|
||||
* This function is used to get the depth i.e. number of elements in the
|
||||
* RX fifo of the serial engine.
|
||||
*
|
||||
* Return: RX fifo depth in units of FIFO words.
|
||||
*/
|
||||
int get_rx_fifo_depth(void __iomem *base);
|
||||
int get_rx_fifo_depth(struct se_geni_rsc *se);
|
||||
|
||||
/**
|
||||
* se_get_packing_config() - Get the packing configuration based on input
|
||||
|
@ -7,7 +7,7 @@
|
||||
#define __QSEECOM_KERNEL_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/qseecom.h>
|
||||
|
||||
#define QSEECOM_ALIGN_SIZE 0x40
|
||||
#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
|
||||
@ -36,12 +36,23 @@ int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
|
||||
#if IS_ENABLED(CONFIG_QSEECOM)
|
||||
int qseecom_process_listener_from_smcinvoke(uint32_t *result,
|
||||
u64 *response_type, unsigned int *data);
|
||||
/*
|
||||
* If clock-support is enabled for smcinvoke,
|
||||
* a notification will be sent to qseecom to enable/disable
|
||||
* clocks when smcinvoke sends an invoke command
|
||||
*/
|
||||
int qseecom_set_msm_bus_request_from_smcinvoke(enum qseecom_bandwidth_request_mode mode);
|
||||
#else
|
||||
static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result,
|
||||
u64 *response_type, unsigned int *data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int qseecom_set_msm_bus_request_from_smcinvoke(
|
||||
enum qseecom_bandwidth_request_mode mode)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "walt.h"
|
||||
@ -42,7 +43,6 @@ unsigned int sysctl_sched_wake_up_idle[2];
|
||||
unsigned int sysctl_input_boost_ms;
|
||||
unsigned int sysctl_input_boost_freq[8];
|
||||
unsigned int sysctl_sched_boost_on_input;
|
||||
int sysctl_cluster_arr[3][15];
|
||||
|
||||
/* sysctl nodes accesed by other files */
|
||||
unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns;
|
||||
@ -68,11 +68,30 @@ unsigned int sysctl_sched_suppress_region2;
|
||||
unsigned int sysctl_sched_skip_sp_newly_idle_lb = 1;
|
||||
unsigned int sysctl_sched_hyst_min_coloc_ns = 80000000;
|
||||
unsigned int sysctl_sched_asymcap_boost;
|
||||
|
||||
struct cluster_freq_relation cluster_arr[3][5];
|
||||
static int sysctl_sched_sibling_cluster_map[4] = {-1, -1, -1, -1};
|
||||
/* range is [1 .. INT_MAX] */
|
||||
static int sysctl_task_read_pid = 1;
|
||||
|
||||
static int sched_sibling_cluster_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = -EACCES, i = 0;
|
||||
static bool done;
|
||||
struct walt_sched_cluster *cluster;
|
||||
|
||||
if (write && done)
|
||||
return ret;
|
||||
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (!ret && write) {
|
||||
done = true;
|
||||
for_each_sched_cluster(cluster)
|
||||
cluster->sibling_cluster = sysctl_sched_sibling_cluster_map[i++];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
static int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
@ -872,31 +891,13 @@ struct ctl_table walt_table[] = {
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
{
|
||||
.procname = "cluster0_rel",
|
||||
.data = sysctl_cluster_arr[0],
|
||||
.maxlen = sizeof(int) * 15,
|
||||
.procname = "sched_sibling_cluster",
|
||||
.data = &sysctl_sched_sibling_cluster_map,
|
||||
.maxlen = sizeof(int) * 4,
|
||||
.mode = 0644,
|
||||
.proc_handler = sched_ignore_cluster_handler,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_INT_MAX,
|
||||
},
|
||||
{
|
||||
.procname = "cluster1_rel",
|
||||
.data = sysctl_cluster_arr[1],
|
||||
.maxlen = sizeof(int) * 15,
|
||||
.mode = 0644,
|
||||
.proc_handler = sched_ignore_cluster_handler,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_INT_MAX,
|
||||
},
|
||||
{
|
||||
.procname = "cluster2_rel",
|
||||
.data = sysctl_cluster_arr[2],
|
||||
.maxlen = sizeof(int) * 15,
|
||||
.mode = 0644,
|
||||
.proc_handler = sched_ignore_cluster_handler,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_INT_MAX,
|
||||
.proc_handler = sched_sibling_cluster_handler,
|
||||
.extra1 = SYSCTL_NEG_ONE,
|
||||
.extra2 = &three,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/syscore_ops.h>
|
||||
@ -2361,6 +2362,7 @@ static struct walt_sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
|
||||
cluster->cur_freq = 1;
|
||||
cluster->max_freq = 1;
|
||||
cluster->max_possible_freq = 1;
|
||||
cluster->sibling_cluster = -1;
|
||||
|
||||
raw_spin_lock_init(&cluster->load_lock);
|
||||
cluster->cpus = *cpus;
|
||||
@ -3668,6 +3670,10 @@ static void walt_sched_init_rq(struct rq *rq)
|
||||
wrq->notif_pending = false;
|
||||
|
||||
wrq->num_mvp_tasks = 0;
|
||||
|
||||
wrq->uclamp_limit[UCLAMP_MIN] = 0;
|
||||
wrq->uclamp_limit[UCLAMP_MAX] = SCHED_CAPACITY_SCALE;
|
||||
|
||||
INIT_LIST_HEAD(&wrq->mvp_tasks);
|
||||
}
|
||||
|
||||
@ -3845,7 +3851,10 @@ static void android_rvh_enqueue_task(void *unused, struct rq *rq, struct task_st
|
||||
{
|
||||
u64 wallclock = walt_ktime_get_ns();
|
||||
struct walt_task_struct *wts = (struct walt_task_struct *) p->android_vendor_data1;
|
||||
struct walt_rq *wrq = (struct walt_rq *) rq->android_vendor_data1;
|
||||
bool double_enqueue = false;
|
||||
unsigned long min = uclamp_rq_get(rq, UCLAMP_MIN);
|
||||
unsigned long max = uclamp_rq_get(rq, UCLAMP_MAX);
|
||||
|
||||
if (unlikely(walt_disabled))
|
||||
return;
|
||||
@ -3878,6 +3887,14 @@ static void android_rvh_enqueue_task(void *unused, struct rq *rq, struct task_st
|
||||
|
||||
if (!double_enqueue)
|
||||
walt_inc_cumulative_runnable_avg(rq, p);
|
||||
|
||||
if ((wrq->uclamp_limit[UCLAMP_MIN] != min) ||
|
||||
(wrq->uclamp_limit[UCLAMP_MAX] != max)) {
|
||||
wrq->uclamp_limit[UCLAMP_MIN] = min;
|
||||
wrq->uclamp_limit[UCLAMP_MAX] = max;
|
||||
waltgov_run_callback(rq, WALT_CPUFREQ_UCLAMP);
|
||||
}
|
||||
|
||||
trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_mask)[0], is_mvp(wts));
|
||||
}
|
||||
|
||||
@ -3886,6 +3903,8 @@ static void android_rvh_dequeue_task(void *unused, struct rq *rq, struct task_st
|
||||
struct walt_rq *wrq = (struct walt_rq *) rq->android_vendor_data1;
|
||||
struct walt_task_struct *wts = (struct walt_task_struct *) p->android_vendor_data1;
|
||||
bool double_dequeue = false;
|
||||
unsigned long min = uclamp_rq_get(rq, UCLAMP_MIN);
|
||||
unsigned long max = uclamp_rq_get(rq, UCLAMP_MAX);
|
||||
|
||||
if (unlikely(walt_disabled))
|
||||
return;
|
||||
@ -3926,6 +3945,13 @@ static void android_rvh_dequeue_task(void *unused, struct rq *rq, struct task_st
|
||||
if (!double_dequeue)
|
||||
walt_dec_cumulative_runnable_avg(rq, p);
|
||||
|
||||
if ((wrq->uclamp_limit[UCLAMP_MIN] != min) ||
|
||||
(wrq->uclamp_limit[UCLAMP_MAX] != max)) {
|
||||
wrq->uclamp_limit[UCLAMP_MIN] = min;
|
||||
wrq->uclamp_limit[UCLAMP_MAX] = max;
|
||||
waltgov_run_callback(rq, WALT_CPUFREQ_UCLAMP);
|
||||
}
|
||||
|
||||
trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_mask)[0], is_mvp(wts));
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _WALT_H
|
||||
@ -122,6 +122,9 @@ struct walt_rq {
|
||||
u64 cycles;
|
||||
int num_mvp_tasks;
|
||||
struct list_head mvp_tasks;
|
||||
|
||||
/* UCLAMP tracking */
|
||||
unsigned long uclamp_limit[UCLAMP_CNT];
|
||||
};
|
||||
|
||||
struct walt_sched_cluster {
|
||||
@ -138,6 +141,7 @@ struct walt_sched_cluster {
|
||||
unsigned int max_freq;
|
||||
u64 aggr_grp_load;
|
||||
unsigned long util_to_cost[1024];
|
||||
int8_t sibling_cluster;
|
||||
};
|
||||
|
||||
extern struct walt_sched_cluster *sched_cluster[WALT_NR_CPUS];
|
||||
@ -295,6 +299,8 @@ extern unsigned int sched_lib_mask_force;
|
||||
#define WALT_CPUFREQ_PL (1U << 3)
|
||||
#define WALT_CPUFREQ_EARLY_DET (1U << 4)
|
||||
#define WALT_CPUFREQ_BOOST_UPDATE (1U << 5)
|
||||
#define WALT_CPUFREQ_UCLAMP (1U << 6)
|
||||
|
||||
|
||||
#define NO_BOOST 0
|
||||
#define FULL_THROTTLE_BOOST 1
|
||||
@ -910,65 +916,6 @@ struct compute_energy_output {
|
||||
unsigned int cluster_first_cpu[MAX_CLUSTERS];
|
||||
};
|
||||
|
||||
struct cluster_freq_relation {
|
||||
int src_freq_scale;
|
||||
int dst_cpu;
|
||||
int tgt_freq_scale;
|
||||
};
|
||||
|
||||
extern struct cluster_freq_relation cluster_arr[3][5];
|
||||
extern int sched_ignore_cluster_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
//Check to confirm if we can ignore cluster for p
|
||||
static inline bool ignore_cluster_valid(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
cpumask_t tmp;
|
||||
int i;
|
||||
struct walt_rq *wrq = (struct walt_rq *)rq->android_vendor_data1;
|
||||
int cluster = wrq->cluster->id;
|
||||
int src_cpu = cpumask_first(&wrq->cluster->cpus);
|
||||
int src_freq_scale = arch_scale_freq_capacity(src_cpu);
|
||||
int tgt_scale, tgt_cpu;
|
||||
|
||||
|
||||
/* if src cluster has no relationship */
|
||||
if (cluster_arr[cluster][0].src_freq_scale <= 0)
|
||||
return false;
|
||||
|
||||
/* if src cluster is below its threshold frequency */
|
||||
if (src_freq_scale < cluster_arr[cluster][0].src_freq_scale)
|
||||
return false;
|
||||
|
||||
/* if p is only affine to src cluster */
|
||||
if (p) {
|
||||
cpumask_andnot(&tmp, cpu_active_mask, &wrq->cluster->cpus);
|
||||
if (!cpumask_intersects(&tmp, &p->cpus_mask))
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < 5; i++)
|
||||
if (cluster_arr[cluster][i].src_freq_scale > src_freq_scale)
|
||||
break;
|
||||
tgt_cpu = cpumask_first(&sched_cluster[cluster_arr[cluster][i - 1].dst_cpu]->cpus);
|
||||
tgt_scale = cluster_arr[cluster][i - 1].tgt_freq_scale;
|
||||
|
||||
/*
|
||||
* In case target cluster is frequency limited to a frequency below target
|
||||
* scale then skip ignoring src cluster.
|
||||
*/
|
||||
if (capacity_orig_of(tgt_cpu) < tgt_scale)
|
||||
return false;
|
||||
|
||||
/* Target cluster is above target scale */
|
||||
if (arch_scale_freq_capacity(tgt_cpu) >= tgt_scale)
|
||||
return false;
|
||||
|
||||
/* We reach here, means we need to ignore src cluster for placement */
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
extern void walt_task_dump(struct task_struct *p);
|
||||
extern void walt_rq_dump(int cpu);
|
||||
extern void walt_dump(void);
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <trace/hooks/sched.h>
|
||||
@ -155,66 +155,13 @@ static inline bool walt_target_ok(int target_cpu, int order_index)
|
||||
(target_cpu == cpumask_first(&cpu_array[order_index][0])));
|
||||
}
|
||||
|
||||
extern int sysctl_cluster_arr[3][15];
|
||||
int sched_ignore_cluster_handler(struct ctl_table *table,
|
||||
int write, void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = -EPERM, i;
|
||||
int *data = (int *)table->data;
|
||||
static int configured[3] = {0};
|
||||
static DEFINE_MUTEX(ignore_cluster_mutex);
|
||||
int index = (table->data == sysctl_cluster_arr[0]) ?
|
||||
0 : (table->data == sysctl_cluster_arr[1]) ? 1 : 2;
|
||||
|
||||
if (index >= num_sched_clusters - 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ignore_cluster_mutex);
|
||||
|
||||
if (!write) {
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (configured[index])
|
||||
goto unlock;
|
||||
|
||||
configured[index] = 1;
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
int idx = i * 3;
|
||||
|
||||
if ((data[idx + 0] <= 0) || (data[idx + 0] > 1024))
|
||||
break;
|
||||
if ((data[idx + 1] < 0) || (data[idx + 1] >= num_sched_clusters))
|
||||
break;
|
||||
if ((data[idx + 2] <= 0) || (data[idx + 2] > 1024))
|
||||
break;
|
||||
|
||||
cluster_arr[index][i].src_freq_scale = data[i + 0];
|
||||
cluster_arr[index][i].dst_cpu = data[i + 1];
|
||||
cluster_arr[index][i].tgt_freq_scale = data[i + 2];
|
||||
}
|
||||
|
||||
/* update the next entry as last entry */
|
||||
if (i)
|
||||
cluster_arr[index][i].src_freq_scale = 1025;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ignore_cluster_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MIN_UTIL_FOR_ENERGY_EVAL 52
|
||||
static void walt_get_indicies(struct task_struct *p, int *order_index,
|
||||
int *end_index, int per_task_boost, bool is_uclamp_boosted,
|
||||
bool *energy_eval_needed, bool *ignore_cluster)
|
||||
bool *energy_eval_needed)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
*order_index = 0;
|
||||
*end_index = 0;
|
||||
|
||||
@ -227,7 +174,6 @@ static void walt_get_indicies(struct task_struct *p, int *order_index,
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (is_full_throttle_boost()) {
|
||||
*energy_eval_needed = false;
|
||||
*order_index = num_sched_clusters - 1;
|
||||
@ -242,71 +188,27 @@ static void walt_get_indicies(struct task_struct *p, int *order_index,
|
||||
walt_task_skip_min_cpu(p)) {
|
||||
*energy_eval_needed = false;
|
||||
*order_index = 1;
|
||||
/* BIG cluster could have relationship with next cluster */
|
||||
/*
|
||||
* For ignore case
|
||||
* G S -> Since G cannot have relationship exit with i = 0
|
||||
* G P S -> Enter loop and exit with i = 1.
|
||||
* G T P S -> here we will exit with i = 1 OR 2 (if T also needs
|
||||
* to be ignored).
|
||||
*/
|
||||
i = 0;
|
||||
while (*order_index + i <= num_sched_clusters - 1) {
|
||||
if (!ignore_cluster[*order_index + i])
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
*order_index = *order_index + i;
|
||||
/*
|
||||
* If starting with cluster lower than prime check if prime need
|
||||
* to be scanned.
|
||||
*/
|
||||
if ((*order_index < num_sched_clusters - 1) && sysctl_sched_asymcap_boost) {
|
||||
for (i = 1; i < num_sched_clusters - 1; i++) {
|
||||
int cpu = cpumask_first(&cpu_array[*order_index][i]);
|
||||
|
||||
if (is_max_capacity_cpu(cpu))
|
||||
break;
|
||||
}
|
||||
|
||||
*end_index = i;
|
||||
if (sysctl_sched_asymcap_boost) {
|
||||
*end_index = 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = *order_index ; i < num_sched_clusters - 1; i++) {
|
||||
if (task_demand_fits(p, cpumask_first(&cpu_array[i][0]))) {
|
||||
if (!ignore_cluster[i])
|
||||
break;
|
||||
}
|
||||
if (task_demand_fits(p, cpumask_first(&cpu_array[i][0])))
|
||||
break;
|
||||
}
|
||||
|
||||
*order_index = i;
|
||||
|
||||
/* order_index == 0 means we never hit ignore cluster */
|
||||
if (*order_index == 0 &&
|
||||
(task_util(p) >= MIN_UTIL_FOR_ENERGY_EVAL) &&
|
||||
!(p->in_iowait && task_in_related_thread_group(p)) &&
|
||||
!walt_get_rtg_status(p) &&
|
||||
!(sched_boost_type == CONSERVATIVE_BOOST && task_sched_boost(p)) &&
|
||||
!sysctl_sched_suppress_region2
|
||||
) {
|
||||
|
||||
/*
|
||||
* Identify end cluster based on frequency relation, not
|
||||
* considering prime cluster for region2.
|
||||
*/
|
||||
i = 1;
|
||||
while (i <= num_sched_clusters - 2) {
|
||||
if (!ignore_cluster[i])
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i <= num_sched_clusters - 2)
|
||||
*end_index = i;
|
||||
}
|
||||
)
|
||||
*end_index = 1;
|
||||
|
||||
if (p->in_iowait && task_in_related_thread_group(p))
|
||||
*energy_eval_needed = false;
|
||||
@ -332,7 +234,7 @@ static void walt_find_best_target(struct sched_domain *sd,
|
||||
cpumask_t *candidates,
|
||||
struct task_struct *p,
|
||||
struct find_best_target_env *fbt_env,
|
||||
bool *ignore_cluster)
|
||||
bool *force_energy_eval)
|
||||
{
|
||||
unsigned long min_task_util = uclamp_task_util(p);
|
||||
long target_max_spare_cap = 0;
|
||||
@ -351,7 +253,9 @@ static void walt_find_best_target(struct sched_domain *sd,
|
||||
bool rtg_high_prio_task = task_rtg_high_prio(p);
|
||||
cpumask_t visit_cpus;
|
||||
struct walt_task_struct *wts = (struct walt_task_struct *) p->android_vendor_data1;
|
||||
bool scan_ignore_cluster = false, ignored = false;
|
||||
unsigned int visited_cluster = 0;
|
||||
unsigned int search_sibling_cluster = 0;
|
||||
int cpu;
|
||||
|
||||
/* Find start CPU based on boost value */
|
||||
start_cpu = fbt_env->start_cpu;
|
||||
@ -378,36 +282,35 @@ static void walt_find_best_target(struct sched_domain *sd,
|
||||
cpumask_test_cpu(prev_cpu, p->cpus_ptr)) {
|
||||
fbt_env->fastpath = PREV_CPU_FASTPATH;
|
||||
cpumask_set_cpu(prev_cpu, candidates);
|
||||
visited_cluster = BIT(cpu_cluster(prev_cpu)->id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry_ignore_cluster:
|
||||
/* retry for sibling clusters */
|
||||
retry:
|
||||
for (cluster = 0; cluster < num_sched_clusters; cluster++) {
|
||||
int best_idle_cpu_cluster = -1;
|
||||
int target_cpu_cluster = -1;
|
||||
int this_complex_idle = 0;
|
||||
int best_complex_idle = 0;
|
||||
struct rq *rq;
|
||||
struct walt_rq *wrq;
|
||||
|
||||
rq = cpu_rq(cpumask_first(&cpu_array[order_index][cluster]));
|
||||
wrq = (struct walt_rq *) rq->android_vendor_data1;
|
||||
|
||||
if ((!scan_ignore_cluster && ignore_cluster[wrq->cluster->id])
|
||||
|| (scan_ignore_cluster && !ignore_cluster[wrq->cluster->id])) {
|
||||
ignored = true;
|
||||
if (BIT(sched_cluster[cluster]->id) & visited_cluster)
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Handle case where intermediate cluster between start and end
|
||||
* index is skipped due to frequency relation.
|
||||
*/
|
||||
target_max_spare_cap = 0;
|
||||
min_exit_latency = INT_MAX;
|
||||
best_idle_cuml_util = ULONG_MAX;
|
||||
|
||||
cpumask_and(&visit_cpus, &p->cpus_mask,
|
||||
&cpu_array[order_index][cluster]);
|
||||
if (search_sibling_cluster) {
|
||||
if (!(search_sibling_cluster & BIT(cluster)))
|
||||
continue;
|
||||
visited_cluster |= BIT(cluster);
|
||||
cpumask_and(&visit_cpus, p->cpus_ptr, &sched_cluster[cluster]->cpus);
|
||||
} else {
|
||||
cpumask_and(&visit_cpus, p->cpus_ptr, &cpu_array[order_index][cluster]);
|
||||
visited_cluster |= BIT(cpu_cluster(
|
||||
cpumask_first(&cpu_array[order_index][cluster]))->id);
|
||||
}
|
||||
|
||||
for_each_cpu(i, &visit_cpus) {
|
||||
unsigned long capacity_orig = capacity_orig_of(i);
|
||||
unsigned long wake_cpu_util, new_cpu_util, new_util_cuml;
|
||||
@ -566,20 +469,8 @@ static void walt_find_best_target(struct sched_domain *sd,
|
||||
|
||||
if (most_spare_cap_cpu != -1 && cluster >= stop_index)
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
if (unlikely(most_spare_cap_cpu == -1) && cpumask_empty(candidates) &&
|
||||
!scan_ignore_cluster && ignored && (cluster == num_sched_clusters)) {
|
||||
/*
|
||||
* We enter here when we have ignored some cluster and
|
||||
* didn't find any valid candidate in any of the valid
|
||||
* cluster.
|
||||
* Fallback and try ignored cluster once.
|
||||
*/
|
||||
scan_ignore_cluster = true;
|
||||
goto retry_ignore_cluster;
|
||||
}
|
||||
/*
|
||||
* We have set idle or target as long as they are valid CPUs.
|
||||
* If we don't find either, then we fallback to most_spare_cap,
|
||||
@ -600,6 +491,20 @@ static void walt_find_best_target(struct sched_domain *sd,
|
||||
}
|
||||
|
||||
out:
|
||||
search_sibling_cluster = 0;
|
||||
for_each_cpu(cpu, candidates) {
|
||||
struct walt_sched_cluster *cluster = cpu_cluster(cpu);
|
||||
|
||||
if ((cluster->sibling_cluster >= 0) &&
|
||||
!(BIT(cluster->sibling_cluster) & visited_cluster)) {
|
||||
search_sibling_cluster |= BIT(cluster->sibling_cluster);
|
||||
}
|
||||
}
|
||||
if (search_sibling_cluster) {
|
||||
*force_energy_eval = true;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
trace_sched_find_best_target(p, min_task_util, start_cpu, cpumask_bits(candidates)[0],
|
||||
most_spare_cap_cpu, order_index, end_index,
|
||||
fbt_env->skip_cpu, task_on_rq_queued(p), least_nr_cpu,
|
||||
@ -898,8 +803,7 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
|
||||
int first_cpu;
|
||||
bool energy_eval_needed = true;
|
||||
struct compute_energy_output output;
|
||||
bool ignore_cluster[4] = {0};
|
||||
struct walt_sched_cluster *sched_cluster;
|
||||
bool force_energy_eval = false;
|
||||
|
||||
if (walt_is_many_wakeup(sibling_count_hint) && prev_cpu != cpu &&
|
||||
cpumask_test_cpu(prev_cpu, &p->cpus_mask))
|
||||
@ -908,12 +812,8 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
|
||||
if (unlikely(!cpu_array))
|
||||
return -EPERM;
|
||||
|
||||
for_each_sched_cluster(sched_cluster)
|
||||
ignore_cluster[sched_cluster->id] =
|
||||
ignore_cluster_valid(p, cpu_rq(cpumask_first(&sched_cluster->cpus)));
|
||||
|
||||
walt_get_indicies(p, &order_index, &end_index, task_boost, uclamp_boost,
|
||||
&energy_eval_needed, ignore_cluster);
|
||||
&energy_eval_needed);
|
||||
start_cpu = cpumask_first(&cpu_array[order_index][0]);
|
||||
|
||||
is_rtg = task_in_related_thread_group(p);
|
||||
@ -932,12 +832,12 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
|
||||
fbt_env.fastpath = 0;
|
||||
fbt_env.need_idle = need_idle;
|
||||
|
||||
if (sync && (need_idle || (is_rtg && curr_is_rtg) ||
|
||||
ignore_cluster_valid(p, cpu_rq(cpu))))
|
||||
if (sync && (need_idle || (is_rtg && curr_is_rtg)))
|
||||
sync = 0;
|
||||
|
||||
if (sysctl_sched_sync_hint_enable && sync
|
||||
&& bias_to_this_cpu(p, cpu, start_cpu)) {
|
||||
&& bias_to_this_cpu(p, cpu, start_cpu)
|
||||
&& (cpu_cluster(cpu)->sibling_cluster == -1)) {
|
||||
best_energy_cpu = cpu;
|
||||
fbt_env.fastpath = SYNC_WAKEUP;
|
||||
goto unlock;
|
||||
@ -956,7 +856,7 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
|
||||
fbt_env.skip_cpu = walt_is_many_wakeup(sibling_count_hint) ?
|
||||
cpu : -1;
|
||||
|
||||
walt_find_best_target(NULL, candidates, p, &fbt_env, ignore_cluster);
|
||||
walt_find_best_target(NULL, candidates, p, &fbt_env, &force_energy_eval);
|
||||
|
||||
/* Bail out if no candidate was found. */
|
||||
weight = cpumask_weight(candidates);
|
||||
@ -976,7 +876,7 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!energy_eval_needed) {
|
||||
if (!energy_eval_needed && !force_energy_eval) {
|
||||
int max_spare_cpu = first_cpu;
|
||||
|
||||
for_each_cpu(cpu, candidates) {
|
||||
@ -990,8 +890,7 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
|
||||
if (p->state == TASK_WAKING)
|
||||
delta = task_util(p);
|
||||
|
||||
if (cpumask_test_cpu(prev_cpu, &p->cpus_mask) && !__cpu_overutilized(prev_cpu, delta) &&
|
||||
!ignore_cluster_valid(p, cpu_rq(prev_cpu))) {
|
||||
if (cpumask_test_cpu(prev_cpu, &p->cpus_mask) && !__cpu_overutilized(prev_cpu, delta)) {
|
||||
if (trace_sched_compute_energy_enabled()) {
|
||||
memset(&output, 0, sizeof(output));
|
||||
prev_energy = walt_compute_energy(p, prev_cpu, pd, candidates, fbt_env.prs,
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <trace/hooks/sched.h>
|
||||
@ -570,29 +571,23 @@ static int walt_lb_find_busiest_from_lower_cap_cpu(int dst_cpu, const cpumask_t
|
||||
return busiest_cpu;
|
||||
}
|
||||
|
||||
#define NOBUSY -1
|
||||
static int walt_lb_find_busiest_cpu(int dst_cpu, const cpumask_t *src_mask, int *has_misfit,
|
||||
bool is_newidle)
|
||||
{
|
||||
int fsrc_cpu = cpumask_first(src_mask);
|
||||
int busiest_cpu;
|
||||
|
||||
/*
|
||||
* could there be extra intra cluster migration ? task may be pulled to
|
||||
* other cluster if cluster needs to be ignored?
|
||||
*/
|
||||
if (capacity_orig_of(dst_cpu) == capacity_orig_of(fsrc_cpu))
|
||||
return walt_lb_find_busiest_similar_cap_cpu(dst_cpu,
|
||||
src_mask, has_misfit, is_newidle);
|
||||
busiest_cpu = walt_lb_find_busiest_similar_cap_cpu(dst_cpu,
|
||||
src_mask, has_misfit, is_newidle);
|
||||
else if (capacity_orig_of(dst_cpu) > capacity_orig_of(fsrc_cpu))
|
||||
busiest_cpu = walt_lb_find_busiest_from_lower_cap_cpu(dst_cpu,
|
||||
src_mask, has_misfit, is_newidle);
|
||||
else
|
||||
busiest_cpu = walt_lb_find_busiest_from_higher_cap_cpu(dst_cpu,
|
||||
src_mask, has_misfit, is_newidle);
|
||||
|
||||
if (ignore_cluster_valid(NULL, cpu_rq(dst_cpu)))
|
||||
return NOBUSY;
|
||||
|
||||
if (capacity_orig_of(dst_cpu) > capacity_orig_of(fsrc_cpu))
|
||||
return walt_lb_find_busiest_from_lower_cap_cpu(dst_cpu,
|
||||
src_mask, has_misfit, is_newidle);
|
||||
|
||||
return walt_lb_find_busiest_from_higher_cap_cpu(dst_cpu,
|
||||
src_mask, has_misfit, is_newidle);
|
||||
return busiest_cpu;
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(walt_lb_migration_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user