Merge tag 'KERNEL.PLATFORM.1.0.r1-18000-kernel.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.10 into android13-5.10-waipio

"KERNEL.PLATFORM.1.0.r1-18000-kernel.0"

* tag 'KERNEL.PLATFORM.1.0.r1-18000-kernel.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.10:
  msm: kgsl: Pin a process in memory while creating a bind object
  msm: kgsl: Remove duplicate bind operation code
  soc: qcom: Crypto driver notify TZ on hibenate exit
  Verify and implement ICE init before program key
  drivers: thermal: qcom: Ignore error value during cold interrupt register
  tzlog: Deregister shmbridge on Hibernate entry
  msm: adsprpc: use-after-free (UAF) in global maps
  drivers: thermal: MAX31760: add to distinguish fan quantity in resume
  firmware: qcom: Reinitialize the ring buffer log pointer
  firmware: qcom: Register call back functions for hibernation
  serial: msm_geni_serial: Update port setup field during hibernation
  mailbox: msm_qmp: Handle first interrupt after hibernate exit
  mailbox: msm_qmp: Skip negotiation during restore for qmp shim layer
  bus: mhi: host: Process invalid BW requests sent by device
  rpmsg: glink: Get reference of channel objects in rx path
  soc: qcom: mdt_loader: add bound checks for headers
  dma-heaps: Add hibernation callbacks to secure heaps
  iommu: arm-smmu: Destroy secure pools before entering hibernation
  power: supply: bq256xx: Add support for extcon in hibernation
  gunyah: arm64: Fix possible irq_data use-after-free
  ANDROID: GKI: Update symbols to symbol list
  FROMLIST: binder: check offset alignment in binder_get_object()
  drivers: thermal: qcom: Add tsens driver changes for hibernation mode
  iommu: arm-smmu: Destroy secure page table pools during hibernation
  iommu: arm-smmu: Add support for hibernation

Change-Id: I88d64b622e690430790e34e67e30357e34fb328b
This commit is contained in:
Jens Reidel 2024-07-23 09:39:07 +02:00
commit 9d2c545465
No known key found for this signature in database
GPG Key ID: 23C1E5F512C12303
33 changed files with 735 additions and 121 deletions

View File

@ -1,2 +1,2 @@
4d6c07fc6342c179a08b88650349e8457a333196
android12-5.10-2024-03_r1
c02e95e82b343fe03d03dfd6fb74ecc2077e1ee3
android12-5.10-2024-05_r1

View File

@ -112,6 +112,7 @@ EXPORT_SYMBOL(gh_put_virq);
int gh_put_irq(int irq)
{
struct irq_data *irq_data;
unsigned long virq;
if (irq <= 0)
return -EINVAL;
@ -120,8 +121,10 @@ int gh_put_irq(int irq)
if (!irq_data)
return -EINVAL;
virq = irq_data->hwirq;
irq_dispose_mapping(irq);
return gh_put_virq(irq_data->hwirq);
return gh_put_virq(virq);
}
EXPORT_SYMBOL(gh_put_irq);

View File

@ -1391,7 +1391,7 @@ int mhi_process_misc_bw_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_private *mhi_priv = dev_get_drvdata(dev);
u32 result = MHI_BW_SCALE_NACK;
enum mhi_bw_scale_req_status result = MHI_BW_SCALE_NACK;
int ret = -EINVAL;
if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
@ -1462,7 +1462,9 @@ int mhi_process_misc_bw_ev_ring(struct mhi_controller *mhi_cntrl,
ret = mhi_priv->bw_scale(mhi_cntrl, &link_info);
if (!ret) {
*cur_info = link_info;
result = 0;
result = MHI_BW_SCALE_SUCCESS;
} else if (ret == -EINVAL) {
result = MHI_BW_SCALE_INVALID;
}
write_lock_bh(&mhi_cntrl->pm_lock);

View File

@ -47,7 +47,12 @@
#define MHI_BW_SCALE_RESULT(status, seq) (((status) & 0xF) << 8 | \
((seq) & 0xFF))
#define MHI_BW_SCALE_NACK 0xF
enum mhi_bw_scale_req_status {
MHI_BW_SCALE_SUCCESS = 0x0,
MHI_BW_SCALE_INVALID = 0x1,
MHI_BW_SCALE_NACK = 0xF,
};
/* subsystem failure reason cfg command */
#define MHI_TRE_CMD_SFR_CFG_PTR(ptr) (ptr)

View File

@ -1177,54 +1177,33 @@ static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
} while (free);
}
static void fastrpc_mmap_add_global(struct fastrpc_mmap *map)
{
struct fastrpc_apps *me = &gfa;
unsigned long irq_flags = 0;
spin_lock_irqsave(&me->hlock, irq_flags);
hlist_add_head(&map->hn, &me->maps);
spin_unlock_irqrestore(&me->hlock, irq_flags);
}
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
{
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
struct fastrpc_apps *me = &gfa;
unsigned long irq_flags = 0;
struct fastrpc_file *fl = map->fl;
spin_lock_irqsave(&me->hlock, irq_flags);
hlist_add_head(&map->hn, &me->maps);
spin_unlock_irqrestore(&me->hlock, irq_flags);
} else {
struct fastrpc_file *fl = map->fl;
hlist_add_head(&map->hn, &fl->maps);
}
hlist_add_head(&map->hn, &fl->maps);
}
static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
struct dma_buf *buf, uintptr_t va, size_t len, int mflags, int refs,
struct fastrpc_mmap **ppmap)
{
struct fastrpc_apps *me = &gfa;
struct fastrpc_mmap *match = NULL, *map = NULL;
struct hlist_node *n;
unsigned long irq_flags = 0;
if ((va + len) < va)
return -EFAULT;
if (mflags == ADSP_MMAP_HEAP_ADDR ||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock_irqsave(&me->hlock, irq_flags);
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
if (va >= map->va &&
va + len <= map->va + map->len &&
map->fd == fd) {
if (refs) {
if (map->refs + 1 == INT_MAX) {
spin_unlock_irqrestore(&me->hlock, irq_flags);
return -ETOOMANYREFS;
}
map->refs++;
}
match = map;
break;
}
}
spin_unlock_irqrestore(&me->hlock, irq_flags);
} else if (mflags == ADSP_MMAP_DMA_BUFFER) {
if (mflags == ADSP_MMAP_DMA_BUFFER) {
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
if (map->buf == buf) {
if (refs) {
@ -1711,7 +1690,9 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf *
}
map->len = len;
fastrpc_mmap_add(map);
if ((mflags != ADSP_MMAP_HEAP_ADDR) &&
(mflags != ADSP_MMAP_REMOTE_HEAP_ADDR))
fastrpc_mmap_add(map);
*ppmap = map;
bail:
@ -4348,6 +4329,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl,
mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
fastrpc_mmap_add_global(mem);
phys = mem->phys;
size = mem->size;
/*
@ -5127,7 +5109,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
match->size, match->flags, locked);
else {
pr_err("Cannot communicate with DSP, ADSP is down\n");
fastrpc_mmap_add(match);
fastrpc_mmap_add_global(match);
}
}
if (err)
@ -5155,7 +5137,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
if (err && match) {
if (!locked && fl)
mutex_lock(&fl->map_mutex);
fastrpc_mmap_add(match);
fastrpc_mmap_add_global(match);
if (!locked && fl)
mutex_unlock(&fl->map_mutex);
}
@ -5284,7 +5266,11 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
bail:
if (err && map) {
mutex_lock(&fl->map_mutex);
fastrpc_mmap_add(map);
if ((map->flags == ADSP_MMAP_HEAP_ADDR) ||
(map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR))
fastrpc_mmap_add_global(map);
else
fastrpc_mmap_add(map);
mutex_unlock(&fl->map_mutex);
}
mutex_unlock(&fl->internal_map_mutex);
@ -5370,6 +5356,9 @@ static int fastrpc_internal_mem_map(struct fastrpc_file *fl,
if (err)
goto bail;
ud->m.vaddrout = map->raddr;
if (ud->m.flags == ADSP_MMAP_HEAP_ADDR ||
ud->m.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
fastrpc_mmap_add_global(map);
bail:
if (err) {
ADSPRPC_ERR("failed to map fd %d, len 0x%x, flags %d, map %pK, err %d\n",
@ -5432,7 +5421,11 @@ static int fastrpc_internal_mem_unmap(struct fastrpc_file *fl,
/* Add back to map list in case of error to unmap on DSP */
if (map) {
mutex_lock(&fl->map_mutex);
fastrpc_mmap_add(map);
if ((map->flags == ADSP_MMAP_HEAP_ADDR) ||
(map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR))
fastrpc_mmap_add_global(map);
else
fastrpc_mmap_add(map);
mutex_unlock(&fl->map_mutex);
}
}
@ -5507,6 +5500,9 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
if (err)
goto bail;
map->raddr = raddr;
if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
fastrpc_mmap_add_global(map);
}
ud->vaddrout = raddr;
bail:

View File

@ -24,6 +24,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/qcom_dma_heap.h>
#include <linux/types.h>
#include "qcom_dma_heap_secure_utils.h"
#include "qcom_sg_ops.h"
@ -31,6 +32,8 @@
#define CARVEOUT_ALLOCATE_FAIL -1
static LIST_HEAD(secure_carveout_heaps);
/*
* @pool_refcount_priv -
@ -54,11 +57,14 @@ struct carveout_heap {
void *pool_refcount_priv;
int (*pool_refcount_get)(void *priv);
void (*pool_refcount_put)(void *priv);
ssize_t size;
};
struct secure_carveout_heap {
u32 token;
struct carveout_heap carveout_heap;
struct list_head list;
atomic_long_t total_allocated;
};
static void sc_heap_free(struct qcom_sg_buffer *buffer);
@ -339,6 +345,7 @@ static int carveout_init_heap_memory(struct carveout_heap *co_heap,
return -ENOMEM;
co_heap->base = base;
co_heap->size = size;
gen_pool_add(co_heap->pool, co_heap->base, size, -1);
return 0;
@ -513,10 +520,16 @@ static struct dma_buf *sc_heap_allocate(struct dma_heap *heap,
unsigned long heap_flags)
{
struct secure_carveout_heap *sc_heap;
struct dma_buf *dbuf;
sc_heap = dma_heap_get_drvdata(heap);
return __carveout_heap_allocate(&sc_heap->carveout_heap, len,
dbuf = __carveout_heap_allocate(&sc_heap->carveout_heap, len,
fd_flags, heap_flags, sc_heap_free);
if (IS_ERR(dbuf))
return dbuf;
atomic_long_add(len, &sc_heap->total_allocated);
return dbuf;
}
static void sc_heap_free(struct qcom_sg_buffer *buffer)
@ -536,9 +549,42 @@ static void sc_heap_free(struct qcom_sg_buffer *buffer)
}
carveout_free(&sc_heap->carveout_heap, paddr, buffer->len);
sg_free_table(table);
atomic_long_sub(buffer->len, &sc_heap->total_allocated);
kfree(buffer);
}
int qcom_secure_carveout_freeze(void)
{
long sz;
struct secure_carveout_heap *sc_heap;
list_for_each_entry(sc_heap, &secure_carveout_heaps, list) {
sz = atomic_long_read(&sc_heap->total_allocated);
if (sz) {
pr_err("%s: %s allocations not freed. %lx bytes won't be saved. Aborting freeze\n",
__func__,
dma_heap_get_name(sc_heap->carveout_heap.heap),
sz);
return -EBUSY;
}
}
return 0;
}
int qcom_secure_carveout_restore(void)
{
struct secure_carveout_heap *sc_heap;
int ret;
list_for_each_entry(sc_heap, &secure_carveout_heaps, list) {
ret = hyp_assign_from_flags(sc_heap->carveout_heap.base,
sc_heap->carveout_heap.size,
sc_heap->token);
BUG_ON(ret);
}
return 0;
}
static struct dma_heap_ops sc_heap_ops = {
.allocate = sc_heap_allocate,
};
@ -578,6 +624,7 @@ int qcom_secure_carveout_heap_create(struct platform_heap *heap_data)
goto destroy_heap;
}
list_add(&sc_heap->list, &secure_carveout_heaps);
return 0;
destroy_heap:

View File

@ -11,6 +11,8 @@
#ifdef CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT
int qcom_secure_carveout_heap_create(struct platform_heap *heap_data);
int qcom_carveout_heap_create(struct platform_heap *heap_data);
int qcom_secure_carveout_freeze(void);
int qcom_secure_carveout_restore(void);
#else
static int qcom_secure_carveout_heap_create(struct platform_heap *heap_data)
{
@ -20,6 +22,8 @@ static int qcom_carveout_heap_create(struct platform_heap *heap_data)
{
return 1;
}
static inline int qcom_secure_carveout_freeze(void) { return 0; }
static inline int qcom_secure_carveout_restore(void) { return 0; }
#endif
#endif /* _QCOM_CARVEOUT_HEAP_H */

View File

@ -79,6 +79,52 @@ static int qcom_dma_heap_probe(struct platform_device *pdev)
return ret;
}
static int qcom_dma_heaps_freeze(struct device *dev)
{
int ret;
ret = qcom_secure_carveout_freeze();
if (ret) {
pr_err("Failed to freeze secure carveout heap: %d\n", ret);
return ret;
}
ret = qcom_secure_system_freeze();
if (ret) {
pr_err("Failed to freeze secure system heap: %d\n", ret);
goto err;
}
return 0;
err:
ret = qcom_secure_carveout_restore();
if (ret) {
pr_err("Failed to restore secure carveout heap: %d\n", ret);
return ret;
}
return -EBUSY;
}
static int qcom_dma_heaps_restore(struct device *dev)
{
int ret;
ret = qcom_secure_carveout_restore();
if (ret)
pr_err("Failed to restore secure carveout heap: %d\n", ret);
ret = qcom_secure_system_restore();
if (ret)
pr_err("Failed to restore secure system heap: %d\n", ret);
return ret;
}
static const struct dev_pm_ops qcom_dma_heaps_pm_ops = {
.freeze = qcom_dma_heaps_freeze,
.restore = qcom_dma_heaps_restore,
};
static const struct of_device_id qcom_dma_heap_match_table[] = {
{.compatible = "qcom,dma-heaps"},
{},
@ -89,6 +135,7 @@ static struct platform_driver qcom_dma_heap_driver = {
.driver = {
.name = "qcom-dma-heap",
.of_match_table = qcom_dma_heap_match_table,
.pm = &qcom_dma_heaps_pm_ops,
},
};

View File

@ -432,6 +432,7 @@ static void system_heap_free(struct qcom_sg_buffer *buffer)
}
dynamic_page_pool_free(sys_heap->pool_list[j], page);
}
atomic_long_sub(buffer->len, &sys_heap->total_allocated);
sg_free_table(table);
kfree(buffer);
}
@ -587,7 +588,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
if (num_non_secure_pages)
sg_free_table(&non_secure_table);
atomic_long_add(len, &sys_heap->total_allocated);
return dmabuf;
vmperm_release:
@ -626,6 +627,42 @@ static long get_pool_size_bytes(struct dma_heap *heap)
return total_size << PAGE_SHIFT;
}
int qcom_secure_system_freeze(void)
{
struct qcom_secure_system_heap *sys_heap;
long sz;
cancel_delayed_work_sync(&prefetch_work);
list_for_each_entry(sys_heap, &secure_heaps, list) {
sz = atomic_long_read(&sys_heap->total_allocated);
if (sz) {
pr_err("%s: Allocations not freed for VMID: %d %lx bytes won't be saved across hibernation. Aborting freeze.\n",
__func__, sys_heap->vmid, sz);
return -EINVAL;
}
dynamic_page_pool_release_pools(sys_heap->pool_list);
}
return 0;
}
int qcom_secure_system_restore(void)
{
struct qcom_secure_system_heap *sys_heap;
list_for_each_entry(sys_heap, &secure_heaps, list) {
sys_heap->pool_list = dynamic_page_pool_create_pools(sys_heap->vmid,
free_secure_pages);
if (IS_ERR(sys_heap->pool_list)) {
pr_err("%s: Pool creation failed for VMID: %d, err: %d\n",
__func__, sys_heap->vmid,
PTR_ERR(sys_heap->pool_list));
sys_heap->pool_list = NULL;
}
}
return 0;
}
static const struct dma_heap_ops system_heap_ops = {
.allocate = system_heap_allocate,
.get_pool_size = get_pool_size_bytes,

View File

@ -8,23 +8,29 @@
#include <linux/dma-heap.h>
#include <linux/err.h>
#include <linux/types.h>
#include "qcom_dynamic_page_pool.h"
struct qcom_secure_system_heap {
struct dynamic_page_pool **pool_list;
int vmid;
struct list_head list;
atomic_long_t total_allocated;
};
#ifdef CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE
void qcom_secure_system_heap_create(const char *name, const char *secure_system_alias,
int vmid);
int qcom_secure_system_freeze(void);
int qcom_secure_system_restore(void);
#else
static void qcom_secure_system_heap_create(const char *name, const char *secure_system_alias,
int vmid)
{
}
static inline int qcom_secure_system_freeze(void) { return 0; }
static inline int qcom_secure_system_restore(void) { return 0; }
#endif
#endif /* _QCOM_SECURE_SYSTEM_HEAP_H */

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s:[%s][%d]: " fmt, KBUILD_MODNAME, __func__, __LINE__
@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/dma-buf.h>
#include <linux/qcom_scm.h>
#include <linux/pm.h>
#include <soc/qcom/qseecomi.h>
#include <linux/qtee_shmbridge.h>
#include <linux/proc_fs.h>
@ -491,6 +492,7 @@ static uint32_t tmecrashdump_address_offset;
static uint64_t qseelog_shmbridge_handle;
static struct encrypted_log_info enc_qseelog_info;
static struct encrypted_log_info enc_tzlog_info;
static bool restore_from_hibernation;
/*
* Debugfs data structure and functions
@ -1064,6 +1066,15 @@ static int _disp_tz_log_stats(size_t count)
static struct tzdbg_log_pos_t log_start = {0};
struct tzdbg_log_v2_t *log_v2_ptr;
struct tzdbg_log_t *log_ptr;
/* wrap and offset are initialized to zero since tz is coldboot
* during restoration from hibernation.the reason to initialise
* the wrap and offset to zero since it contains previous boot
* values and which are invalid now.
*/
if (restore_from_hibernation) {
log_start.wrap = log_start.offset = 0;
return 0;
}
log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
tzdbg.diag_buf->ring_off -
@ -1151,8 +1162,18 @@ static int _disp_qsee_log_stats(size_t count)
if (!tzdbg.is_enlarged_buf)
return _disp_log_stats(g_qsee_log, &log_start,
QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
count, TZDBG_QSEE_LOG);
QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
count, TZDBG_QSEE_LOG);
/* wrap and offset are initialized to zero since tz is coldboot
* during restoration from hibernation. The reason to initialise
* the wrap and offset to zero since it contains previous values
* and which are invalid now.
*/
if (restore_from_hibernation) {
log_start.wrap = log_start.offset = 0;
return 0;
}
return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
@ -1929,6 +1950,56 @@ static int tz_log_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
static int tz_log_freeze(struct device *dev)
{
/* This Boolean variable is maintained to initialise the ring buffer
* log pointer to zero during restoration from hibernation
*/
restore_from_hibernation = true;
if (g_qsee_log)
dma_free_coherent(dev, QSEE_LOG_BUF_SIZE, (void *)g_qsee_log,
coh_pmem);
if (!tzdbg.is_encrypted_log_enabled)
qtee_shmbridge_deregister(qseelog_shmbridge_handle);
return 0;
}
static int tz_log_restore(struct device *dev)
{
/* ring buffer log pointer needs to be re initialized
* during restoration from hibernation.
*/
if (restore_from_hibernation) {
_disp_tz_log_stats(0);
_disp_qsee_log_stats(0);
}
/* Register the log bugger at TZ during hibernation resume.
* After hibernation the log buffer is with HLOS as TZ encountered
* a coldboot sequence.
*/
tzdbg_register_qsee_log_buf(to_platform_device(dev));
/* This is set back to zero after successful restoration
* from hibernation.
*/
restore_from_hibernation = false;
return 0;
}
static const struct dev_pm_ops tz_log_pmops = {
.freeze = tz_log_freeze,
.restore = tz_log_restore,
.thaw = tz_log_restore,
};
#define TZ_LOG_PMOPS (&tz_log_pmops)
#else
#define TZ_LOG_PMOPS NULL
#endif
static const struct of_device_id tzlog_match[] = {
{.compatible = "qcom,tz-log"},
{}
@ -1941,6 +2012,7 @@ static struct platform_driver tz_log_driver = {
.name = "tz_log",
.of_match_table = tzlog_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = TZ_LOG_PMOPS,
},
};

View File

@ -1854,6 +1854,19 @@ int qcom_scm_config_set_ice_key(uint32_t index, phys_addr_t paddr, size_t size,
}
EXPORT_SYMBOL(qcom_scm_config_set_ice_key);
int qcom_scm_hibernate_exit(void)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_HIBERNATE_EXIT,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call_noretry(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_hibernate_exit);
int qcom_scm_clear_ice_key(uint32_t index, unsigned int ce)
{
struct qcom_scm_desc desc = {

View File

@ -171,6 +171,7 @@ int qcom_scm_handle_wait(struct device *dev, int scm_ret,
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY_V2 0x05
#define QCOM_SCM_ES_CLEAR_ICE_KEY 0x06
#define QCOM_SCM_ES_DERIVE_RAW_SECRET 0x07
#define QCOM_SCM_ES_HIBERNATE_EXIT 0x08
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01

View File

@ -250,13 +250,28 @@ const char *kgsl_context_type(int type)
return "ANY";
}
/* Scheduled by kgsl_mem_entry_put_deferred() */
static void _deferred_put(struct work_struct *work)
/* Scheduled by kgsl_mem_entry_destroy_deferred() */
static void _deferred_destroy(struct work_struct *work)
{
struct kgsl_mem_entry *entry =
container_of(work, struct kgsl_mem_entry, work);
kgsl_mem_entry_put(entry);
kgsl_mem_entry_destroy(&entry->refcount);
}
static void kgsl_mem_entry_destroy_deferred(struct kref *kref)
{
struct kgsl_mem_entry *entry =
container_of(kref, struct kgsl_mem_entry, refcount);
INIT_WORK(&entry->work, _deferred_destroy);
queue_work(kgsl_driver.lockless_workqueue, &entry->work);
}
void kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
{
if (entry)
kref_put(&entry->refcount, kgsl_mem_entry_destroy_deferred);
}
static struct kgsl_mem_entry *kgsl_mem_entry_create(void)
@ -2573,7 +2588,7 @@ static void gpumem_free_func(struct kgsl_device *device,
entry->memdesc.gpuaddr, entry->memdesc.size,
entry->memdesc.flags);
kgsl_mem_entry_put(entry);
kgsl_mem_entry_put_deferred(entry);
}
static long gpumem_free_entry_on_timestamp(struct kgsl_device *device,
@ -2670,8 +2685,7 @@ static bool gpuobj_free_fence_func(void *priv)
entry->memdesc.gpuaddr, entry->memdesc.size,
entry->memdesc.flags);
INIT_WORK(&entry->work, _deferred_put);
queue_work(kgsl_driver.lockless_workqueue, &entry->work);
kgsl_mem_entry_put_deferred(entry);
return true;
}

View File

@ -652,6 +652,16 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
/*
* kgsl_mem_entry_put_deferred() - Puts refcount and triggers deferred
* mem_entry destroy when refcount is the last refcount.
* @entry: memory entry to be put.
*
* Use this to put a memory entry when we don't want to block
* the caller while destroying memory entry.
*/
void kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry);
/*
* kgsl_addr_range_overlap() - Checks if 2 ranges overlap
* @gpuaddr1: Start of first address range

View File

@ -12,6 +12,7 @@
#include "kgsl_device.h"
#include "kgsl_mmu.h"
#include "kgsl_reclaim.h"
#include "kgsl_sharedmem.h"
#include "kgsl_trace.h"
@ -317,10 +318,13 @@ static void kgsl_sharedmem_free_bind_op(struct kgsl_sharedmem_bind_op *op)
/* Decrement the vbo_count we added when creating the bind_op */
if (op->ops[i].entry)
atomic_dec(&op->ops[i].entry->vbo_count);
kgsl_mem_entry_put(op->ops[i].entry);
/* Release the reference on the child entry */
kgsl_mem_entry_put_deferred(op->ops[i].entry);
}
kgsl_mem_entry_put(op->target);
/* Release the reference on the target entry */
kgsl_mem_entry_put_deferred(op->target);
kvfree(op->ops);
kfree(op);
@ -371,6 +375,12 @@ kgsl_sharedmem_create_bind_op(struct kgsl_process_private *private,
op->nr_ops = ranges_nents;
op->target = target;
/* Make sure process is pinned in memory before proceeding */
atomic_inc(&private->cmd_count);
ret = kgsl_reclaim_to_pinned_state(private);
if (ret)
goto err;
for (i = 0; i < ranges_nents; i++) {
struct kgsl_gpumem_bind_range range;
struct kgsl_mem_entry *entry;
@ -471,12 +481,14 @@ kgsl_sharedmem_create_bind_op(struct kgsl_process_private *private,
ranges += ranges_size;
}
atomic_dec(&private->cmd_count);
init_completion(&op->comp);
kref_init(&op->ref);
return op;
err:
atomic_dec(&private->cmd_count);
kgsl_sharedmem_free_bind_op(op);
return ERR_PTR(ret);
}
@ -508,22 +520,16 @@ static void kgsl_sharedmem_bind_worker(struct work_struct *work)
op->ops[i].last,
op->ops[i].entry);
/* Release the reference on the child entry */
kgsl_mem_entry_put(op->ops[i].entry);
op->ops[i].entry = NULL;
}
/* Release the reference on the target entry */
kgsl_mem_entry_put(op->target);
op->target = NULL;
/* Wake up any threads waiting for the bind operation */
complete_all(&op->comp);
if (op->callback)
op->callback(op);
kref_put(&op->ref, kgsl_sharedmem_bind_range_destroy);
/* Put the refcount we took when scheduling the worker */
kgsl_sharedmem_put_bind_op(op);
}
void kgsl_sharedmem_bind_ranges(struct kgsl_sharedmem_bind_op *op)

View File

@ -171,6 +171,11 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
return container_of(dom, struct arm_smmu_domain, domain);
}
static struct arm_smmu_domain *cb_cfg_to_smmu_domain(struct arm_smmu_cfg *cfg)
{
return container_of(cfg, struct arm_smmu_domain, cfg);
}
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@ -1493,11 +1498,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
int irq, start, ret = 0;
unsigned long ias, oas;
struct io_pgtable_ops *pgtbl_ops;
struct qcom_io_pgtable_info pgtbl_info;
struct io_pgtable_cfg *pgtbl_cfg = &pgtbl_info.cfg;
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct qcom_io_pgtable_info *pgtbl_info = &smmu_domain->pgtbl_info;
struct io_pgtable_cfg *pgtbl_cfg = &pgtbl_info->cfg;
irqreturn_t (*context_fault)(int irq, void *dev);
struct io_pgtable *iop;
@ -1619,8 +1624,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (test_bit(DOMAIN_ATTR_FAST, smmu_domain->attributes)) {
fmt = ARM_V8L_FAST;
ret = qcom_iommu_get_fast_iova_range(dev,
&pgtbl_info.iova_base,
&pgtbl_info.iova_end);
&pgtbl_info->iova_base,
&pgtbl_info->iova_end);
if (ret < 0)
goto out_unlock;
}
@ -1645,8 +1650,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
else
cfg->asid = cfg->cbndx;
pgtbl_info.iommu_pgtbl_ops = &arm_smmu_pgtable_ops;
pgtbl_info.cfg = (struct io_pgtable_cfg) {
pgtbl_info->iommu_pgtbl_ops = &arm_smmu_pgtable_ops;
pgtbl_info->cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
@ -1667,12 +1672,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
pgtbl_cfg->quirks |= arm_smmu_domain_get_qcom_quirks(smmu_domain, smmu);
pgtbl_ops = qcom_alloc_io_pgtable_ops(fmt, &pgtbl_info, smmu_domain);
pgtbl_ops = qcom_alloc_io_pgtable_ops(fmt, pgtbl_info, smmu_domain);
if (!pgtbl_ops) {
ret = -ENOMEM;
goto out_clear_smmu;
}
smmu_domain->pgtbl_fmt = fmt;
/* Update the domain's page sizes to reflect the page table format */
domain->pgsize_bitmap = pgtbl_cfg->pgsize_bitmap;
@ -4262,11 +4268,88 @@ static int arm_smmu_pm_prepare(struct device *dev)
return (atomic_read(&dev->power.usage_count) == 1) ? -EINPROGRESS : 0;
}
static int __maybe_unused arm_smmu_pm_restore_early(struct device *dev)
{
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
struct arm_smmu_domain *smmu_domain;
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg *pgtbl_cfg;
struct arm_smmu_cb *cb;
int idx, ret;
/*
* Restore the page tables for secure vmids as they are lost
* after hibernation in secure code context.
*/
for (idx = 0; idx < smmu->num_context_banks; idx++) {
cb = &smmu->cbs[idx];
if (!cb->cfg)
continue;
smmu_domain = cb_cfg_to_smmu_domain(cb->cfg);
if (!arm_smmu_has_secure_vmid(smmu_domain))
continue;
pgtbl_cfg = &smmu_domain->pgtbl_info.cfg;
pgtbl_ops = qcom_alloc_io_pgtable_ops(smmu_domain->pgtbl_fmt,
&smmu_domain->pgtbl_info, smmu_domain);
if (!pgtbl_ops) {
dev_err(smmu->dev,
"failed to allocate page tables during pm restore for cxt %d %s\n",
idx, dev_name(dev));
return -ENOMEM;
}
smmu_domain->pgtbl_ops = pgtbl_ops;
arm_smmu_init_context_bank(smmu_domain, pgtbl_cfg);
arm_smmu_secure_domain_lock(smmu_domain);
ret = arm_smmu_assign_table(smmu_domain);
arm_smmu_secure_domain_unlock(smmu_domain);
if (ret)
dev_err(smmu->dev, "Failed to hyp-assign page table memory cxt:%d dev:%s\n",
idx, dev_name(smmu_domain->dev));
}
arm_smmu_pm_resume(dev);
ret = arm_smmu_runtime_suspend(dev);
if (ret) {
dev_err(dev, "Failed to suspend\n");
return ret;
}
return 0;
}
static int __maybe_unused arm_smmu_pm_freeze_late(struct device *dev)
{
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
struct arm_smmu_domain *smmu_domain;
struct arm_smmu_cb *cb;
int idx;
for (idx = 0; idx < smmu->num_context_banks; idx++) {
cb = &smmu->cbs[idx];
if (cb && cb->cfg) {
smmu_domain = cb_cfg_to_smmu_domain(cb->cfg);
if (smmu_domain &&
arm_smmu_has_secure_vmid(smmu_domain)) {
qcom_free_io_pgtable_ops(smmu_domain->pgtbl_ops);
arm_smmu_secure_domain_lock(smmu_domain);
arm_smmu_secure_pool_destroy(smmu_domain);
arm_smmu_unassign_table(smmu_domain);
arm_smmu_secure_domain_unlock(smmu_domain);
}
}
}
return 0;
}
static const struct dev_pm_ops arm_smmu_pm_ops = {
.prepare = arm_smmu_pm_prepare,
SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
arm_smmu_runtime_resume, NULL)
.suspend = arm_smmu_pm_suspend,
.resume = arm_smmu_pm_resume,
.thaw_early = arm_smmu_pm_restore_early,
.freeze_late = arm_smmu_pm_freeze_late,
.restore_early = arm_smmu_pm_restore_early,
};

View File

@ -523,6 +523,8 @@ struct arm_smmu_domain {
struct iommu_debug_attachment *logger;
struct iommu_domain domain;
struct qcom_io_pgtable_info pgtbl_info;
enum io_pgtable_fmt pgtbl_fmt;
/*
* test_bit(DOMAIN_ATTR_ATOMIC, aattributes) indicates that
* runtime power management should be disabled.

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
@ -213,6 +213,7 @@ struct qmp_device {
void *ilc;
bool early_boot;
bool hibernate_entry;
};
/**
@ -393,6 +394,9 @@ static int qmp_send_data(struct mbox_chan *chan, void *data)
mdev = mbox->mdev;
if (mdev->hibernate_entry)
return -ENXIO;
spin_lock_irqsave(&mbox->tx_lock, flags);
addr = mbox->desc + mbox->mcore_mbox_offset;
if (mbox->tx_sent) {
@ -529,6 +533,16 @@ static irqreturn_t qmp_irq_handler(int irq, void *priv)
{
struct qmp_device *mdev = (struct qmp_device *)priv;
/* QMP comes very early in cold boot, so there is
* a chance to miss the interrupt from remote qmp.
* In case of hibernate, early interrupt corrupts the
* QMP state machine and endup with invalid values.
* By ignore the first interrupt after hibernate exit
* this can be avoided.
*/
if (mdev->hibernate_entry && mdev->early_boot)
return IRQ_NONE;
if (mdev->rx_reset_reg)
writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
@ -840,6 +854,9 @@ static int qmp_shim_send_data(struct mbox_chan *chan, void *data)
mdev = mbox->mdev;
if (mdev->hibernate_entry)
return -ENXIO;
if (pkt->size > SZ_4K)
return -EINVAL;
@ -954,6 +971,7 @@ static int qmp_mbox_init(struct device_node *n, struct qmp_device *mdev)
INIT_DELAYED_WORK(&mbox->dwork, qmp_notify_timeout);
mbox->suspend_flag = false;
mdev->hibernate_entry = false;
mdev_add_mbox(mdev, mbox);
return 0;
}
@ -1054,6 +1072,7 @@ static int qmp_shim_init(struct platform_device *pdev, struct qmp_device *mdev)
mdev_add_mbox(mdev, mbox);
mdev->ilc = ipc_log_context_create(QMP_IPC_LOG_PAGE_CNT, mdev->name, 0);
mdev->hibernate_entry = false;
return 0;
}
@ -1206,6 +1225,10 @@ static int qmp_mbox_probe(struct platform_device *pdev)
static int qmp_mbox_freeze(struct device *dev)
{
struct qmp_device *mdev = dev_get_drvdata(dev);
mdev->hibernate_entry = true;
dev_info(dev, "QMP: Hibernate entry\n");
return 0;
}
@ -1213,6 +1236,11 @@ static int qmp_mbox_restore(struct device *dev)
{
struct qmp_device *mdev = dev_get_drvdata(dev);
struct qmp_mbox *mbox;
struct device_node *edge_node = dev->of_node;
/* skip negotiation if device has shim layer */
if (of_parse_phandle(edge_node, "qcom,qmp", 0))
goto end;
list_for_each_entry(mbox, &mdev->mboxes, list) {
mbox->local_state = LINK_DISCONNECTED;
@ -1231,6 +1259,11 @@ static int qmp_mbox_restore(struct device *dev)
__qmp_rx_worker(mbox);
}
end:
if (mdev->hibernate_entry)
mdev->hibernate_entry = false;
dev_info(dev, "QMP: Hibernate exit\n");
return 0;
}

View File

@ -1891,6 +1891,11 @@ static int bq256xx_restore(struct device *dev)
if (client->irq > 0) {
disable_irq_nosync(client->irq);
devm_free_irq(dev, client->irq, bq);
/*
* Set extcon state depending upon USB connect/disconnect state
* on hibernation exit
*/
bq256xx_irq_handler_thread(client->irq, bq);
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq256xx_irq_handler_thread,
IRQF_TRIGGER_FALLING |

View File

@ -2,6 +2,7 @@
/*
* Copyright (c) 2016-2017, Linaro Ltd
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/idr.h>
@ -355,6 +356,38 @@ static void qcom_glink_channel_release(struct kref *ref)
kfree(channel);
}
static struct glink_channel *qcom_glink_channel_ref_get(
struct qcom_glink *glink,
bool remote_channel, int cid)
{
struct glink_channel *channel = NULL;
unsigned long flags;
if (!glink)
return NULL;
spin_lock_irqsave(&glink->idr_lock, flags);
if (remote_channel)
channel = idr_find(&glink->rcids, cid);
else
channel = idr_find(&glink->lcids, cid);
if (channel)
kref_get(&channel->refcount);
spin_unlock_irqrestore(&glink->idr_lock, flags);
return channel;
}
static void qcom_glink_channel_ref_put(struct glink_channel *channel)
{
if (!channel)
return;
kref_put(&channel->refcount, qcom_glink_channel_release);
}
static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
{
return glink->rx_pipe->avail(glink->rx_pipe);
@ -506,11 +539,8 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
unsigned int cid, bool granted)
{
struct glink_channel *channel;
unsigned long flags;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
dev_err(glink->dev, "unable to find channel\n");
return;
@ -520,6 +550,7 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
atomic_inc(&channel->intent_req_acked);
wake_up(&channel->intent_req_ack);
CH_INFO(channel, "\n");
qcom_glink_channel_ref_put(channel);
}
/**
@ -869,9 +900,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
struct glink_channel *channel;
unsigned long flags;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
dev_err(glink->dev, "invalid channel id received\n");
return;
@ -883,6 +912,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
if (!intent) {
spin_unlock_irqrestore(&channel->intent_lock, flags);
dev_err(glink->dev, "invalid intent id received\n");
qcom_glink_channel_ref_put(channel);
return;
}
@ -894,6 +924,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
kfree(intent);
}
spin_unlock_irqrestore(&channel->intent_lock, flags);
qcom_glink_channel_ref_put(channel);
}
/**
@ -916,9 +947,7 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
unsigned long flags;
int iid;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
pr_err("%s channel not found for cid %d\n", __func__, cid);
@ -935,6 +964,7 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
spin_unlock_irqrestore(&channel->intent_lock, flags);
if (intent) {
qcom_glink_send_intent_req_ack(glink, channel, !!intent);
qcom_glink_channel_ref_put(channel);
return;
}
@ -944,6 +974,7 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
qcom_glink_advertise_intent(glink, channel, intent);
qcom_glink_send_intent_req_ack(glink, channel, !!intent);
qcom_glink_channel_ref_put(channel);
}
static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
@ -990,7 +1021,7 @@ EXPORT_SYMBOL(qcom_glink_is_wakeup);
static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
{
struct glink_core_rx_intent *intent;
struct glink_channel *channel;
struct glink_channel *channel = NULL;
struct {
struct glink_msg msg;
__le32 chunk_size;
@ -1018,9 +1049,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
}
rcid = le16_to_cpu(hdr.msg.param1);
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, rcid);
if (!channel) {
dev_dbg(glink->dev, "Data on non-existing channel\n");
@ -1033,13 +1062,16 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
/* Might have an ongoing, fragmented, message to append */
if (!channel->buf) {
intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
if (!intent)
if (!intent) {
qcom_glink_channel_ref_put(channel);
return -ENOMEM;
}
intent->data = kmalloc(chunk_size + left_size,
GFP_ATOMIC);
if (!intent->data) {
kfree(intent);
qcom_glink_channel_ref_put(channel);
return -ENOMEM;
}
@ -1114,7 +1146,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
advance_rx:
qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
qcom_glink_channel_ref_put(channel);
return ret;
}
@ -1145,9 +1177,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
return;
}
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
dev_err(glink->dev, "intents for non-existing channel\n");
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
@ -1155,8 +1185,10 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
}
msg = kmalloc(msglen, GFP_ATOMIC);
if (!msg)
if (!msg) {
qcom_glink_channel_ref_put(channel);
return;
}
qcom_glink_rx_peak(glink, msg, 0, msglen);
@ -1185,15 +1217,14 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
kfree(msg);
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
qcom_glink_channel_ref_put(channel);
}
static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
{
struct glink_channel *channel;
spin_lock(&glink->idr_lock);
channel = idr_find(&glink->lcids, lcid);
spin_unlock(&glink->idr_lock);
channel = qcom_glink_channel_ref_get(glink, false, lcid);
if (!channel) {
dev_err(glink->dev, "Invalid open ack packet\n");
return -EINVAL;
@ -1201,7 +1232,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
CH_INFO(channel, "\n");
complete_all(&channel->open_ack);
qcom_glink_channel_ref_put(channel);
return 0;
}
@ -1242,12 +1273,9 @@ static int qcom_glink_handle_signals(struct qcom_glink *glink,
unsigned int rcid, unsigned int signals)
{
struct glink_channel *channel;
unsigned long flags;
u32 old;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, rcid);
if (!channel) {
dev_err(glink->dev, "signal for non-existing channel\n");
return -EINVAL;
@ -1274,6 +1302,7 @@ static int qcom_glink_handle_signals(struct qcom_glink *glink,
old, channel->rsigs);
}
qcom_glink_channel_ref_put(channel);
return 0;
}

View File

@ -7,6 +7,9 @@
#include <linux/crypto-qti-common.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/qcom_scm.h>
#include "crypto-qti-ice-regs.h"
#include "crypto-qti-platform.h"
@ -361,5 +364,44 @@ int crypto_qti_derive_raw_secret(const u8 *wrapped_key,
}
EXPORT_SYMBOL(crypto_qti_derive_raw_secret);
static int crypto_qti_hibernate_exit(void)
{
int err = 0;
err = qcom_scm_hibernate_exit();
if (err == -EIO)
pr_err("%s:Hibernate exit SCM call unsupported in TZ\n", __func__);
else if (err != 0)
pr_err("%s:SCM call Error: 0x%x\n", __func__, err);
return err;
}
static int qcom_crypto_hibernate_restore(struct device *dev)
{
return crypto_qti_hibernate_exit();
}
static const struct dev_pm_ops qcom_crypto_dev_pm_ops = {
.restore = qcom_crypto_hibernate_restore,
};
static const struct of_device_id qti_crypto_match[] = {
{ .compatible = "qcom,crypto" },
{},
};
MODULE_DEVICE_TABLE(of, qti_crypto_match);
static struct platform_driver qti_crypto_driver = {
.probe = NULL,
.remove = NULL,
.driver = {
.name = "qti_crypto",
.pm = &qcom_crypto_dev_pm_ops,
.of_match_table = qti_crypto_match,
},
};
module_platform_driver(qti_crypto_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Common crypto library for storage encryption");

View File

@ -134,7 +134,7 @@ int crypto_qti_program_key(const struct ice_mmio_data *mmio_data,
return -EINVAL;
}
if (!qti_hwkm_init_done) {
if (qti_hwkm_init_required(mmio_data) || !qti_hwkm_is_ice_tpkey_set(mmio_data)) {
err = qti_hwkm_init(mmio_data);
if (err) {
pr_err("%s: Error with HWKM init %d\n", __func__, err);

View File

@ -189,6 +189,30 @@ static int qti_hwkm_check_bist_status(const struct ice_mmio_data *mmio_data)
return 0;
}
bool qti_hwkm_init_required(const struct ice_mmio_data *mmio_data)
{
u32 val = 0;
val = ice_readl(mmio_data->ice_base_mmio, ICE_REGS_CONTROL);
val = val & 0x1;
return (val == 1);
}
EXPORT_SYMBOL_GPL(qti_hwkm_init_required);
bool qti_hwkm_is_ice_tpkey_set(const struct ice_mmio_data *mmio_data)
{
u32 val = 0;
val = qti_hwkm_readl(mmio_data->ice_hwkm_mmio,
QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS,
DONE);
return (val == 1);
}
EXPORT_SYMBOL_GPL(qti_hwkm_is_ice_tpkey_set);
int qti_hwkm_ice_init_sequence(const struct ice_mmio_data *mmio_data)
{
int ret = 0;

View File

@ -111,6 +111,9 @@
/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_TPKEY_RECEIVE_CTL */
#define TPKEY_EN 8
/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_TPKEY_RECEIVE_STATUS */
#define DONE 8
/* QTI HWKM Bank status & control reg vals */
/* HWKM_MASTER_CFG_KM_BANKN_CTL */

View File

@ -119,10 +119,16 @@ void *qcom_mdt_read_metadata(struct device *dev, const struct firmware *fw, cons
void *data;
int ret;
if (fw->size < sizeof(struct elf32_hdr)) {
dev_err(dev, "Image is too small\n");
return ERR_PTR(-EINVAL);
}
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
if (ehdr->e_phnum < 2 || ehdr->e_phnum > PN_XNUM)
if (ehdr->e_phnum < 2 || ehdr->e_phoff > fw->size ||
(sizeof(phdrs) * ehdr->e_phnum > fw->size - ehdr->e_phoff))
return ERR_PTR(-EINVAL);
if (phdrs[0].p_type == PT_LOAD)

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@ -486,7 +486,10 @@ static int max31760_resume(struct device *dev)
max31760_enable_gpio(pdata, 1);
max31760_write_byte(pdata, MAX31760_CTRL_REG1, 0x19);
max31760_write_byte(pdata, MAX31760_CTRL_REG2, 0x11);
max31760_write_byte(pdata, MAX31760_CTRL_REG3, 0x31);
if (pdata->fan_num == 1)
max31760_write_byte(pdata, MAX31760_CTRL_REG3, 0x31);
else if (pdata->fan_num == 2)
max31760_write_byte(pdata, MAX31760_CTRL_REG3, 0x33);
max31760_set_cur_state_common(pdata, pdata->cur_state);
mutex_unlock(&pdata->update_lock);
}

View File

@ -2,7 +2,7 @@
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2018, Linaro Limited
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitops.h>
@ -97,6 +97,8 @@ static const struct tsens_ops ops_generic_v2 = {
.init = init_common,
.get_temp = get_temp_tsens_valid,
.get_cold_status = get_cold_int_status,
.suspend = tsens_v2_tsens_suspend,
.resume = tsens_v2_tsens_resume,
};
struct tsens_plat_data data_tsens_v2 = {

View File

@ -2,7 +2,7 @@
/*
* Copyright (c) 2015, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2019, 2020, Linaro Ltd.
* Copyright (c) 2021, 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/debugfs.h>
@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/suspend.h>
#include "tsens.h"
#include "thermal_zone_internal.h"
@ -999,6 +1000,9 @@ static int __maybe_unused tsens_suspend(struct device *dev)
{
struct tsens_priv *priv = dev_get_drvdata(dev);
if (!pm_suspend_via_firmware())
return 0;
if (priv->ops && priv->ops->suspend)
return priv->ops->suspend(priv);
@ -1009,13 +1013,34 @@ static int __maybe_unused tsens_resume(struct device *dev)
{
struct tsens_priv *priv = dev_get_drvdata(dev);
if (!pm_suspend_via_firmware())
return 0;
if (priv->ops && priv->ops->resume)
return priv->ops->resume(priv);
return 0;
}
static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
static int __maybe_unused tsens_freeze(struct device *dev)
{
struct tsens_priv *priv = dev_get_drvdata(dev);
if (priv->ops && priv->ops->suspend)
return priv->ops->suspend(priv);
return 0;
}
static int __maybe_unused tsens_restore(struct device *dev)
{
struct tsens_priv *priv = dev_get_drvdata(dev);
if (priv->ops && priv->ops->resume)
return priv->ops->resume(priv);
return 0;
}
static const struct of_device_id tsens_table[] = {
{
@ -1062,7 +1087,7 @@ static const struct thermal_zone_of_device_ops tsens_cold_of_ops = {
static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
irq_handler_t thread_fn)
irq_handler_t thread_fn, int *irq_num)
{
struct platform_device *pdev;
int ret, irq;
@ -1072,6 +1097,7 @@ static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
return -ENODEV;
irq = platform_get_irq_byname(pdev, irqname);
*irq_num = irq;
if (irq < 0) {
ret = irq;
/* For old DTs with no IRQ defined */
@ -1093,6 +1119,66 @@ static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
return ret;
}
static int tsens_reinit(struct tsens_priv *priv)
{
unsigned long flags;
spin_lock_irqsave(&priv->ul_lock, flags);
if (priv->feat->has_watchdog) {
regmap_field_write(priv->rf[WDOG_BARK_MASK], 0);
regmap_field_write(priv->rf[CC_MON_MASK], 1);
}
if (tsens_version(priv) >= VER_0_1)
tsens_enable_irq(priv);
spin_unlock_irqrestore(&priv->ul_lock, flags);
return 0;
}
int tsens_v2_tsens_suspend(struct tsens_priv *priv)
{
if (priv->uplow_irq > 0) {
disable_irq_nosync(priv->uplow_irq);
disable_irq_wake(priv->uplow_irq);
}
if (priv->feat->crit_int && priv->crit_irq > 0) {
disable_irq_nosync(priv->crit_irq);
disable_irq_wake(priv->crit_irq);
}
if (priv->cold_irq > 0) {
disable_irq_nosync(priv->cold_irq);
disable_irq_wake(priv->cold_irq);
}
return 0;
}
int tsens_v2_tsens_resume(struct tsens_priv *priv)
{
tsens_reinit(priv);
if (priv->uplow_irq > 0) {
enable_irq(priv->uplow_irq);
enable_irq_wake(priv->uplow_irq);
}
if (priv->feat->crit_int && priv->crit_irq > 0) {
enable_irq(priv->crit_irq);
enable_irq_wake(priv->crit_irq);
}
if (priv->cold_irq > 0) {
enable_irq(priv->cold_irq);
enable_irq_wake(priv->cold_irq);
}
return 0;
}
static int tsens_register(struct tsens_priv *priv)
{
int i, temp, ret;
@ -1122,14 +1208,15 @@ static int tsens_register(struct tsens_priv *priv)
priv->ops->enable(priv, i);
}
ret = tsens_register_irq(priv, "uplow", tsens_irq_thread);
ret = tsens_register_irq(priv, "uplow",
tsens_irq_thread, &priv->uplow_irq);
if (ret < 0)
return ret;
if (priv->feat->crit_int)
ret = tsens_register_irq(priv, "critical",
tsens_critical_irq_thread);
tsens_critical_irq_thread, &priv->crit_irq);
if (priv->feat->cold_int) {
priv->cold_sensor = devm_kzalloc(priv->dev,
@ -1144,13 +1231,11 @@ static int tsens_register(struct tsens_priv *priv)
priv->cold_sensor->hw_id,
priv->cold_sensor,
&tsens_cold_of_ops);
if (IS_ERR(tzd)) {
ret = 0;
return ret;
if (!IS_ERR_OR_NULL(tzd)) {
priv->cold_sensor->tzd = tzd;
tsens_register_irq(priv, "cold",
tsens_cold_irq_thread, &priv->cold_irq);
}
priv->cold_sensor->tzd = tzd;
ret = tsens_register_irq(priv, "cold", tsens_cold_irq_thread);
}
return ret;
}
@ -1242,6 +1327,13 @@ static int tsens_remove(struct platform_device *pdev)
return 0;
}
static const struct dev_pm_ops tsens_pm_ops = {
.freeze = tsens_freeze,
.restore = tsens_restore,
.suspend = tsens_suspend,
.resume = tsens_resume,
};
static struct platform_driver tsens_driver = {
.probe = tsens_probe,
.remove = tsens_remove,

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_TSENS_H__
@ -610,6 +610,11 @@ struct tsens_priv {
const struct reg_field *fields;
const struct tsens_ops *ops;
/* add to save irq number to re-use it at runtime */
int uplow_irq;
int crit_irq;
int cold_irq;
struct dentry *debug_root;
struct dentry *debug;
struct tsens_sensor *cold_sensor;
@ -626,6 +631,8 @@ int init_common(struct tsens_priv *priv);
int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp);
int get_temp_common(const struct tsens_sensor *s, int *temp);
int get_cold_int_status(const struct tsens_sensor *s, bool *cold_status);
int tsens_v2_tsens_suspend(struct tsens_priv *priv);
int tsens_v2_tsens_resume(struct tsens_priv *priv);
/* TSENS target */
extern struct tsens_plat_data data_8960;

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitmap.h>
@ -4210,6 +4210,14 @@ static int msm_geni_serial_sys_hib_resume(struct device *dev)
geni_write_reg_nolog(cfg1, uport->membase,
SE_GENI_TX_PACKING_CFG1);
disable_irq(uport->irq);
} else {
/*
* Peripheral register settings are lost during hibernation.
* Update setup flag such that port setup happens again
* during next session. Clients of HS-UART will close and
* open the port during hibernation.
*/
port->port_setup = false;
}
return 0;
}

View File

@ -364,6 +364,8 @@ int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp);
int qti_hwkm_clocks(bool on);
int qti_hwkm_init(const struct ice_mmio_data *mmio_data);
int qti_hwkm_ice_init_sequence(const struct ice_mmio_data *mmio_data);
bool qti_hwkm_init_required(const struct ice_mmio_data *mmio_data);
bool qti_hwkm_is_ice_tpkey_set(const struct ice_mmio_data *mmio_data);
#else
static inline int qti_hwkm_add_req(struct hwkm_cmd *cmd,
struct hwkm_rsp *rsp)
@ -382,5 +384,13 @@ static inline int qti_hwkm_ice_init_sequence(const struct ice_mmio_data *mmio_da
{
return -EOPNOTSUPP;
}
static inline bool qti_hwkm_init_required(const struct ice_mmio_data *mmio_data)
{
return -EOPNOTSUPP;
}
static inline bool qti_hwkm_is_ice_tpkey_set(const struct ice_mmio_data *mmio_data)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_QTI_HW_KEY_MANAGER */
#endif /* __HWKM_H_ */

View File

@ -205,6 +205,7 @@ extern int qcom_scm_ice_invalidate_key(u32 index);
extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
enum qcom_scm_ice_cipher cipher,
u32 data_unit_size);
extern int qcom_scm_hibernate_exit(void);
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
@ -391,6 +392,7 @@ static inline int qcom_scm_config_set_ice_key(uint32_t index, phys_addr_t paddr,
unsigned int food) { return -ENODEV; }
static inline int qcom_scm_clear_ice_key(uint32_t index, unsigned int food)
{ return -ENODEV; }
static inline int qcom_scm_hibernate_exit(uint32_t flag) { return -ENODEV; }
static inline int qcom_scm_derive_raw_secret(phys_addr_t paddr_key,
size_t size_key, phys_addr_t paddr_secret,
size_t size_secret) { return -ENODEV; }