From d703c71df41a2dd4c3674c532f7d44aa602317af Mon Sep 17 00:00:00 2001 From: Gerrit SelfHelp Service Account Date: Fri, 13 May 2022 10:25:42 -0700 Subject: [PATCH 001/146] Initial empty repository From b0ee7bc07666f7f9a10b1d874e4ef8bc5a14da1f Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Fri, 12 Aug 2022 17:39:48 -0700 Subject: [PATCH 002/146] adsprpc: Add initial src files to new target Add fastrpc driver files to vendor branch Change-Id: I852eee93cf6c0b2bc0833f9635f73b890de930da Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 7970 ++++++++++++++++++++++++++++++++++++++++++ dsp/adsprpc_compat.c | 1081 ++++++ dsp/adsprpc_compat.h | 17 + dsp/adsprpc_rpmsg.c | 276 ++ dsp/adsprpc_shared.h | 1140 ++++++ dsp/adsprpc_socket.c | 495 +++ 6 files changed, 10979 insertions(+) create mode 100644 dsp/adsprpc.c create mode 100644 dsp/adsprpc_compat.c create mode 100644 dsp/adsprpc_compat.h create mode 100644 dsp/adsprpc_rpmsg.c create mode 100644 dsp/adsprpc_shared.h create mode 100644 dsp/adsprpc_socket.c diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c new file mode 100644 index 0000000000..32b51572c3 --- /dev/null +++ b/dsp/adsprpc.c @@ -0,0 +1,7970 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +/* Uncomment this block to log an error on every VERIFY failure */ +/* + * #ifndef VERIFY_PRINT_ERROR + * #define VERIFY_PRINT_ERROR + * #endif + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adsprpc_compat.h" +#include "adsprpc_shared.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C +#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D +#define TZ_PIL_AUTH_QDSP6_PROC 1 + +#define FASTRPC_ENOSUCH 39 +#define DEBUGFS_SIZE 3072 +#define PID_SIZE 10 + +#define AUDIO_PDR_ADSP_DTSI_PROPERTY_NAME "qcom,fastrpc-adsp-audio-pdr" +#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc" +#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio" +#define ADSP_AUDIOPD_NAME "msm/adsp/audio_pd" + +#define SENSORS_PDR_ADSP_DTSI_PROPERTY_NAME "qcom,fastrpc-adsp-sensors-pdr" +#define SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc" +#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg" +#define ADSP_SENSORPD_NAME "msm/adsp/sensor_pd" + +#define SENSORS_PDR_SLPI_DTSI_PROPERTY_NAME "qcom,fastrpc-slpi-sensors-pdr" +#define SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_sdsprpc" +#define SENSORS_PDR_SLPI_SERVICE_NAME SENSORS_PDR_ADSP_SERVICE_NAME +#define SLPI_SENSORPD_NAME "msm/slpi/sensor_pd" + +#define FASTRPC_SECURE_WAKE_SOURCE_CLIENT_NAME "adsprpc-secure" +#define FASTRPC_NON_SECURE_WAKE_SOURCE_CLIENT_NAME "adsprpc-non_secure" + +#define RPC_TIMEOUT (5 * HZ) +#define BALIGN 128 +#define M_FDLIST (16) +#define M_CRCLIST (64) +#define M_KERNEL_PERF_LIST (PERF_KEY_MAX) +#define M_DSP_PERF_LIST (12) + +#define SESSION_ID_INDEX (30) +#define SESSION_ID_MASK (1 << SESSION_ID_INDEX) +#define PROCESS_ID_MASK ((2^SESSION_ID_INDEX) - 1) +#define FASTRPC_CTX_MAGIC (0xbeeddeed) + +/* Process status notifications from DSP will be sent with this unique context */ +#define FASTRPC_NOTIF_CTX_RESERVED 0xABCDABCD + +#define FASTRPC_CTX_JOB_TYPE_POS (4) +#define FASTRPC_CTX_TABLE_IDX_POS (6) +#define FASTRPC_CTX_JOBID_POS (16) +#define FASTRPC_CTX_TABLE_IDX_MASK \ + ((FASTRPC_CTX_MAX - 1) << FASTRPC_CTX_TABLE_IDX_POS) +#define FASTRPC_ASYNC_JOB_MASK (1) + +#define GET_TABLE_IDX_FROM_CTXID(ctxid) \ + ((ctxid & FASTRPC_CTX_TABLE_IDX_MASK) >> FASTRPC_CTX_TABLE_IDX_POS) + +/* Reserve few entries in context table for critical kernel and static RPC + * calls to avoid user invocations from exhausting all entries. + */ +#define NUM_KERNEL_AND_STATIC_ONLY_CONTEXTS (70) + +/* Maximum number of pending contexts per remote session */ +#define MAX_PENDING_CTX_PER_SESSION (64) + +#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */ +#define MINOR_NUM_DEV 0 +#define MINOR_NUM_SECURE_DEV 1 +#define NON_SECURE_CHANNEL 0 +#define SECURE_CHANNEL 1 + +#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0) +#ifndef ION_FLAG_CACHED +#define ION_FLAG_CACHED (1) +#endif + +/* + * ctxid of every message is OR-ed with fl->pd (0/1/2) before + * it is sent to DSP. So mask 2 LSBs to retrieve actual context + */ +#define CONTEXT_PD_CHECK (3) + +#define GET_CTXID_FROM_RSP_CTX(rsp_ctx) (rsp_ctx & ~CONTEXT_PD_CHECK) + +#define RH_CID ADSP_DOMAIN_ID + +#define FASTRPC_STATIC_HANDLE_PROCESS_GROUP (1) +#define FASTRPC_STATIC_HANDLE_DSP_UTILITIES (2) +#define FASTRPC_STATIC_HANDLE_LISTENER (3) +#define FASTRPC_STATIC_HANDLE_MAX (20) +#define FASTRPC_LATENCY_CTRL_ENB (1) + +/* Maximum PM timeout that can be voted through fastrpc */ +#define MAX_PM_TIMEOUT_MS 50 + +/* timeout in us for busy polling after early response from remote processor */ +#define FASTRPC_POLL_TIME (4000) + +/* timeout in us for polling until memory barrier */ +#define FASTRPC_POLL_TIME_MEM_UPDATE (500) + +/* timeout in us for polling completion signal after user early hint */ +#define FASTRPC_USER_EARLY_HINT_TIMEOUT (500) + +/* Early wake up poll completion number received from remote processor */ +#define FASTRPC_EARLY_WAKEUP_POLL (0xabbccdde) + +/* Poll response number from remote processor for call completion */ +#define FASTRPC_POLL_RESPONSE (0xdecaf) + +/* latency in us, early wake up signal used below this value */ +#define FASTRPC_EARLY_WAKEUP_LATENCY (200) + +/* response version number */ +#define FASTRPC_RSP_VERSION2 (2) + +/* CPU feature information to DSP */ +#define FASTRPC_CPUINFO_DEFAULT (0) +#define FASTRPC_CPUINFO_EARLY_WAKEUP (1) + +#define INIT_FILELEN_MAX (2*1024*1024) +#define INIT_MEMLEN_MAX (8*1024*1024) +#define MAX_CACHE_BUF_SIZE (8*1024*1024) + +/* Maximum buffers cached in cached buffer list */ +#define MAX_CACHED_BUFS (32) + +/* Max no. of persistent headers pre-allocated per process */ +#define MAX_PERSISTENT_HEADERS (25) + +#define PERF_CAPABILITY_SUPPORT (1 << 1) +#define KERNEL_ERROR_CODE_V1_SUPPORT 1 +#define USERSPACE_ALLOCATION_SUPPORT 1 +#define DSPSIGNAL_SUPPORT 1 + +#define MD_GMSG_BUFFER (1000) + +#define MINI_DUMP_DBG_SIZE (200*1024) + +/* Max number of region supported */ +#define MAX_UNIQUE_ID 5 + +/* Convert the 19.2MHz clock count to micro-seconds */ +#define CONVERT_CNT_TO_US(CNT) (CNT * 10ull / 192ull) + +/* Unique index flag used for mini dump */ +static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; + +/* Fastrpc remote process attributes */ +enum fastrpc_proc_attr { + /* Macro for Debug attr */ + FASTRPC_MODE_DEBUG = 1 << 0, + /* Macro for Ptrace */ + FASTRPC_MODE_PTRACE = 1 << 1, + /* Macro for CRC Check */ + FASTRPC_MODE_CRC = 1 << 2, + /* Macro for Unsigned PD */ + FASTRPC_MODE_UNSIGNED_MODULE = 1 << 3, + /* Macro for Adaptive QoS */ + FASTRPC_MODE_ADAPTIVE_QOS = 1 << 4, + /* Macro for System Process */ + FASTRPC_MODE_SYSTEM_PROCESS = 1 << 5, + /* Macro for Prvileged Process */ + FASTRPC_MODE_PRIVILEGED = (1 << 6), +}; + +#define PERF_END ((void)0) + +#define PERF(enb, cnt, ff) \ + {\ + struct timespec64 startT = {0};\ + uint64_t *counter = cnt;\ + if (enb && counter) {\ + ktime_get_real_ts64(&startT);\ + } \ + ff ;\ + if (enb && counter) {\ + *counter += getnstimediff(&startT);\ + } \ + } + +#define GET_COUNTER(perf_ptr, offset) \ + (perf_ptr != NULL ?\ + (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\ + (uint64_t *)(perf_ptr + offset)\ + : (uint64_t *)NULL) : (uint64_t *)NULL) + +/* Macro for comparing local client and PD names with those from callback */ +#define COMPARE_SERVICE_LOCATOR_NAMES(cb_client, local_client, \ + cb_pdname, local_pdname) \ + ((!strcmp(cb_client, local_client)) \ + && (!strcmp(cb_pdname, local_pdname))) + +#define IS_ASYNC_FASTRPC_AVAILABLE (1) + +/* Use the second definition to enable additional dspsignal debug logging */ +#define DSPSIGNAL_VERBOSE(x, ...) +/*#define DSPSIGNAL_VERBOSE ADSPRPC_INFO*/ + +static struct dentry *debugfs_root; +static struct dentry *debugfs_global_file; + +static inline uint64_t buf_page_start(uint64_t buf) +{ + uint64_t start = (uint64_t) buf & PAGE_MASK; + return start; +} + +static inline uint64_t buf_page_offset(uint64_t buf) +{ + uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1); + return offset; +} + +static inline uint64_t buf_num_pages(uint64_t buf, size_t len) +{ + uint64_t start = buf_page_start(buf) >> PAGE_SHIFT; + uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT; + uint64_t nPages = end - start + 1; + return nPages; +} + +static inline uint64_t buf_page_size(uint32_t size) +{ + uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK; + + return sz > PAGE_SIZE ? sz : PAGE_SIZE; +} + +static inline void *uint64_to_ptr(uint64_t addr) +{ + void *ptr = (void *)((uintptr_t)addr); + + return ptr; +} + +static inline uint64_t ptr_to_uint64(void *ptr) +{ + uint64_t addr = (uint64_t)((uintptr_t)ptr); + + return addr; +} + +static struct fastrpc_apps gfa; + +static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = { + { + .name = "adsprpc-smd", + .subsys = "lpass", + .spd = { + { + .servloc_name = + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, + .spdname = ADSP_AUDIOPD_NAME, + .cid = ADSP_DOMAIN_ID, + }, + { + .servloc_name = + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME, + .spdname = ADSP_SENSORPD_NAME, + .cid = ADSP_DOMAIN_ID, + } + }, + .cpuinfo_todsp = FASTRPC_CPUINFO_DEFAULT, + .cpuinfo_status = false, + }, + { + .name = "mdsprpc-smd", + .subsys = "mpss", + .spd = { + { + .cid = MDSP_DOMAIN_ID, + } + }, + .cpuinfo_todsp = FASTRPC_CPUINFO_DEFAULT, + .cpuinfo_status = false, + }, + { + .name = "sdsprpc-smd", + .subsys = "dsps", + .spd = { + { + .servloc_name = + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME, + .spdname = SLPI_SENSORPD_NAME, + .cid = SDSP_DOMAIN_ID, + } + }, + .cpuinfo_todsp = FASTRPC_CPUINFO_DEFAULT, + .cpuinfo_status = false, + }, + { + .name = "cdsprpc-smd", + .subsys = "cdsp", + .spd = { + { + .cid = CDSP_DOMAIN_ID, + } + }, + .cpuinfo_todsp = FASTRPC_CPUINFO_EARLY_WAKEUP, + .cpuinfo_status = false, + }, +}; + +static int hlosvm[1] = {VMID_HLOS}; +static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + +static uint32_t kernel_capabilities[FASTRPC_MAX_ATTRIBUTES - + FASTRPC_MAX_DSP_ATTRIBUTES] = { + PERF_CAPABILITY_SUPPORT, + /* PERF_LOGGING_V2_SUPPORT feature is supported, unsupported = 0 */ + KERNEL_ERROR_CODE_V1_SUPPORT, + /* Fastrpc Driver error code changes present */ + USERSPACE_ALLOCATION_SUPPORT, + /* Userspace allocation allowed for DSP memory request*/ + DSPSIGNAL_SUPPORT + /* Lightweight driver-based signaling */ +}; + +static inline void fastrpc_pm_awake(struct fastrpc_file *fl, int channel_type); +static int fastrpc_mem_map_to_dsp(struct fastrpc_file *fl, int fd, int offset, + uint32_t flags, uintptr_t va, uint64_t phys, + size_t size, uintptr_t *raddr); +static inline void fastrpc_update_rxmsg_buf(struct fastrpc_channel_ctx *chan, + uint64_t ctx, int retval, uint32_t rsp_flags, + uint32_t early_wake_time, uint32_t ver, int64_t ns, uint64_t xo_time_in_us); + +/** + * fastrpc_device_create - Create device for the fastrpc process file + * @fl : Fastrpc process file + * Returns: 0 on Success + */ +static int fastrpc_device_create(struct fastrpc_file *fl); + +static inline int64_t getnstimediff(struct timespec64 *start) +{ + int64_t ns; + struct timespec64 ts, b; + + ktime_get_real_ts64(&ts); + b = timespec64_sub(ts, *start); + ns = timespec64_to_ns(&b); + return ns; +} + +/** + * get_timestamp_in_ns - Gets time of day in nanoseconds + * + * Returns: Timestamp in nanoseconds + */ +static inline int64_t get_timestamp_in_ns(void) +{ + int64_t ns = 0; + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + ns = timespec64_to_ns(&ts); + return ns; +} + +static inline int poll_for_remote_response(struct smq_invoke_ctx *ctx, uint32_t timeout) +{ + int err = -EIO; + uint32_t sc = ctx->sc, ii = 0, jj = 0; + struct smq_invoke_buf *list; + struct smq_phy_page *pages; + uint64_t *fdlist = NULL; + uint32_t *crclist = NULL, *poll = NULL; + unsigned int inbufs, outbufs, handles; + + /* calculate poll memory location */ + inbufs = REMOTE_SCALARS_INBUFS(sc); + outbufs = REMOTE_SCALARS_OUTBUFS(sc); + handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc); + list = smq_invoke_buf_start(ctx->rpra, sc); + pages = smq_phy_page_start(sc, list); + fdlist = (uint64_t *)(pages + inbufs + outbufs + handles); + crclist = (uint32_t *)(fdlist + M_FDLIST); + poll = (uint32_t *)(crclist + M_CRCLIST); + + /* poll on memory for DSP response. Return failure on timeout */ + for (ii = 0, jj = 0; ii < timeout; ii++, jj++) { + if (*poll == FASTRPC_EARLY_WAKEUP_POLL) { + /* Remote processor sent early response */ + err = 0; + break; + } else if (*poll == FASTRPC_POLL_RESPONSE) { + /* Remote processor sent poll response to complete the call */ + err = 0; + ctx->is_work_done = true; + ctx->retval = 0; + /* Update DSP response history */ + fastrpc_update_rxmsg_buf(&gfa.channel[ctx->fl->cid], + ctx->msg.invoke.header.ctx, 0, POLL_MODE, 0, + FASTRPC_RSP_VERSION2, get_timestamp_in_ns(), + CONVERT_CNT_TO_US(__arch_counter_get_cntvct())); + break; + } + if (jj == FASTRPC_POLL_TIME_MEM_UPDATE) { + /* Wait for DSP to finish updating poll memory */ + rmb(); + jj = 0; + } + udelay(1); + } + return err; +} + +/** + * fastrpc_update_txmsg_buf - Update history of sent glink messages + * @chan : Channel context + * @msg : Pointer to RPC message to remote subsystem + * @transport_send_err : Error from transport + * @ns : Timestamp (in ns) of sent message + * @xo_time_in_us : XO Timestamp (in us) of sent message + * + * Returns none + */ +static inline void fastrpc_update_txmsg_buf(struct fastrpc_channel_ctx *chan, + struct smq_msg *msg, int transport_send_err, int64_t ns, uint64_t xo_time_in_us) +{ + unsigned long flags = 0; + unsigned int tx_index = 0; + struct fastrpc_tx_msg *tx_msg = NULL; + + spin_lock_irqsave(&chan->gmsg_log.lock, flags); + + tx_index = chan->gmsg_log.tx_index; + tx_msg = &chan->gmsg_log.tx_msgs[tx_index]; + + memcpy(&tx_msg->msg, msg, sizeof(struct smq_msg)); + tx_msg->transport_send_err = transport_send_err; + tx_msg->ns = ns; + tx_msg->xo_time_in_us = xo_time_in_us; + + tx_index++; + chan->gmsg_log.tx_index = + (tx_index > (GLINK_MSG_HISTORY_LEN - 1)) ? 0 : tx_index; + + spin_unlock_irqrestore(&chan->gmsg_log.lock, flags); +} + +/** + * fastrpc_update_rxmsg_buf - Update history of received glink responses + * @chan : Channel context + * @ctx : Context of received response from DSP + * @retval : Return value for RPC call + * @rsp_flags : Response type + * @early_wake_time : Poll time for early wakeup + * @ver : Version of response + * @ns : Timestamp (in ns) of response + * @xo_time_in_us : XO Timestamp (in us) of response + * + * Returns none + */ +static inline void fastrpc_update_rxmsg_buf(struct fastrpc_channel_ctx *chan, + uint64_t ctx, int retval, uint32_t rsp_flags, + uint32_t early_wake_time, uint32_t ver, int64_t ns, uint64_t xo_time_in_us) +{ + unsigned long flags = 0; + unsigned int rx_index = 0; + struct fastrpc_rx_msg *rx_msg = NULL; + struct smq_invoke_rspv2 *rsp = NULL; + + spin_lock_irqsave(&chan->gmsg_log.lock, flags); + + rx_index = chan->gmsg_log.rx_index; + rx_msg = &chan->gmsg_log.rx_msgs[rx_index]; + rsp = &rx_msg->rsp; + + rsp->ctx = ctx; + rsp->retval = retval; + rsp->flags = rsp_flags; + rsp->early_wake_time = early_wake_time; + rsp->version = ver; + rx_msg->ns = ns; + rx_msg->xo_time_in_us = xo_time_in_us; + + rx_index++; + chan->gmsg_log.rx_index = + (rx_index > (GLINK_MSG_HISTORY_LEN - 1)) ? 0 : rx_index; + + spin_unlock_irqrestore(&chan->gmsg_log.lock, flags); +} + +static inline int get_unique_index(void) +{ + int index = -1; + + mutex_lock(&gfa.mut_uid); + for (index = 0; index < MAX_UNIQUE_ID; index++) { + if (md_unique_index_flag[index] == 0) { + md_unique_index_flag[index] = 1; + mutex_unlock(&gfa.mut_uid); + return index; + } + } + mutex_unlock(&gfa.mut_uid); + return index; +} + +static inline void reset_unique_index(int index) +{ + mutex_lock(&gfa.mut_uid); + if (index > -1 && index < MAX_UNIQUE_ID) + md_unique_index_flag[index] = 0; + mutex_unlock(&gfa.mut_uid); +} + +/** + * fastrpc_minidump_add_region - Add mini dump region + * @fastrpc_mmap : Input structure mmap + * + * Returns int + */ +static int fastrpc_minidump_add_region(struct fastrpc_mmap *map) +{ + int err = 0, ret_val = 0, md_index = 0; + struct md_region md_entry; + + md_index = get_unique_index(); + if (md_index > -1 && md_index < MAX_UNIQUE_ID) { + scnprintf(md_entry.name, MAX_NAME_LENGTH, "FRPC_%d", md_index); + md_entry.virt_addr = map->va; + md_entry.phys_addr = map->phys; + md_entry.size = map->size; + ret_val = msm_minidump_add_region(&md_entry); + if (ret_val < 0) { + ADSPRPC_ERR( + "Failed to add/update CMA to Minidump for phys: 0x%llx, size: %zu, md_index %d, md_entry.name %s\n", + map->phys, + map->size, md_index, + md_entry.name); + reset_unique_index(md_index); + err = ret_val; + } else { + map->frpc_md_index = md_index; + } + } else { + pr_warn("failed to generate valid unique id for mini dump : %d\n", md_index); + } + return err; +} + +/** + * fastrpc_minidump_remove_region - Remove mini dump region if added + * @fastrpc_mmap : Input structure mmap + * + * Returns int + */ +static int fastrpc_minidump_remove_region(struct fastrpc_mmap *map) +{ + int err = -EINVAL; + struct md_region md_entry; + + if (map->frpc_md_index > -1 && map->frpc_md_index < MAX_UNIQUE_ID) { + scnprintf(md_entry.name, MAX_NAME_LENGTH, "FRPC_%d", + map->frpc_md_index); + md_entry.virt_addr = map->va; + md_entry.phys_addr = map->phys; + md_entry.size = map->size; + err = msm_minidump_remove_region(&md_entry); + if (err < 0) { + ADSPRPC_ERR( + "Failed to remove CMA from Minidump for phys: 0x%llx, size: %zu index = %d\n", + map->phys, map->size, map->frpc_md_index); + } else { + reset_unique_index(map->frpc_md_index); + map->frpc_md_index = -1; + } + } else { + ADSPRPC_ERR("mini-dump enabled with invalid unique id: %d\n", map->frpc_md_index); + } + return err; +} + +static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache) +{ + struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl; + int vmid, err = 0, cid = -1; + + if (!fl) + return; + if (buf->in_use) { + /* Don't free persistent header buf. Just mark as available */ + spin_lock(&fl->hlock); + buf->in_use = false; + spin_unlock(&fl->hlock); + return; + } + if (cache && buf->size < MAX_CACHE_BUF_SIZE) { + spin_lock(&fl->hlock); + if (fl->num_cached_buf > MAX_CACHED_BUFS) { + spin_unlock(&fl->hlock); + goto skip_buf_cache; + } + hlist_add_head(&buf->hn, &fl->cached_bufs); + fl->num_cached_buf++; + spin_unlock(&fl->hlock); + buf->type = -1; + return; + } +skip_buf_cache: + if (buf->type == USERHEAP_BUF) { + spin_lock(&fl->hlock); + hlist_del_init(&buf->hn_rem); + spin_unlock(&fl->hlock); + buf->raddr = 0; + } + if (!IS_ERR_OR_NULL(buf->virt)) { + int destVM[1] = {VMID_HLOS}; + int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + + VERIFY(err, fl->sctx != NULL); + if (err) + goto bail; + if (fl->sctx->smmu.cb) + buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32); + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + ADSPRPC_ERR( + "invalid channel 0x%zx set for session\n", + cid); + goto bail; + } + vmid = fl->apps->channel[cid].vmid; + if (vmid) { + int srcVM[2] = {VMID_HLOS, vmid}; + int hyp_err = 0; + + hyp_err = hyp_assign_phys(buf->phys, + buf_page_size(buf->size), + srcVM, 2, destVM, destVMperm, 1); + if (hyp_err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + hyp_err, buf->phys, buf->size); + } + } + trace_fastrpc_dma_free(cid, buf->phys, buf->size); + dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt, + buf->phys, buf->dma_attr); + } +bail: + kfree(buf); +} + +static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl) +{ + struct fastrpc_buf *buf, *free; + + do { + struct hlist_node *n; + + free = NULL; + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) { + hlist_del_init(&buf->hn); + fl->num_cached_buf--; + free = buf; + break; + } + spin_unlock(&fl->hlock); + if (free) + fastrpc_buf_free(free, 0); + } while (free); +} + +static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl) +{ + struct fastrpc_buf *buf, *free; + + do { + struct hlist_node *n; + + free = NULL; + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) { + free = buf; + break; + } + spin_unlock(&fl->hlock); + if (free) + fastrpc_buf_free(free, 0); + } while (free); +} + +static void fastrpc_mmap_add(struct fastrpc_mmap *map) +{ + if (map->flags == ADSP_MMAP_HEAP_ADDR || + map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_add_head(&map->hn, &me->maps); + spin_unlock_irqrestore(&me->hlock, irq_flags); + } else { + struct fastrpc_file *fl = map->fl; + + hlist_add_head(&map->hn, &fl->maps); + } +} + +static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, + struct dma_buf *buf, uintptr_t va, size_t len, int mflags, int refs, + struct fastrpc_mmap **ppmap) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_mmap *match = NULL, *map = NULL; + struct hlist_node *n; + unsigned long irq_flags = 0; + + if ((va + len) < va) + return -EFAULT; + if (mflags == ADSP_MMAP_HEAP_ADDR || + mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + if (va >= map->va && + va + len <= map->va + map->len && + map->fd == fd) { + if (refs) { + if (map->refs + 1 == INT_MAX) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + return -ETOOMANYREFS; + } + map->refs++; + } + match = map; + break; + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + } else if (mflags == ADSP_MMAP_DMA_BUFFER) { + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + if (map->buf == buf) { + if (refs) { + if (map->refs + 1 == INT_MAX) + return -ETOOMANYREFS; + map->refs++; + } + match = map; + break; + } + } + } else { + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + if (va >= map->va && + va + len <= map->va + map->len && + map->fd == fd) { + if (refs) { + if (map->refs + 1 == INT_MAX) + return -ETOOMANYREFS; + map->refs++; + } + match = map; + break; + } + } + } + if (match) { + *ppmap = match; + return 0; + } + return -ENXIO; +} + +static int fastrpc_alloc_cma_memory(dma_addr_t *region_phys, void **vaddr, + size_t size, unsigned long dma_attr) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + + if (me->dev == NULL) { + ADSPRPC_ERR( + "failed to allocate CMA memory, device adsprpc-mem is not initialized\n"); + return -ENODEV; + } + VERIFY(err, size > 0 && size < me->max_size_limit); + if (err) { + err = -EFAULT; + pr_err("adsprpc: %s: invalid allocation size 0x%zx\n", + __func__, size); + return err; + } + *vaddr = dma_alloc_attrs(me->dev, size, region_phys, + GFP_KERNEL, dma_attr); + if (IS_ERR_OR_NULL(*vaddr)) { + ADSPRPC_ERR( + "dma_alloc_attrs failed for device %s size 0x%zx dma_attr %lu, returned %ld\n", + dev_name(me->dev), size, dma_attr, PTR_ERR(*vaddr)); + return -ENOBUFS; + } + return 0; +} + +static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va, + size_t len, struct fastrpc_mmap **ppmap) +{ + struct fastrpc_mmap *match = NULL, *map; + struct hlist_node *n; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + /* + * Search for a mapping by matching fd, remote address and length. + * For backward compatibility, search for a mapping by matching is + * limited to remote address and length when passed fd < 0. + */ + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + if ((fd < 0 || map->fd == fd) && map->raddr == va && + map->raddr + map->len == va + len && + map->refs == 1 && + /* Skip unmap if it is fastrpc shell memory */ + !map->is_filemap) { + match = map; + hlist_del_init(&map->hn); + break; + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + if (match) { + *ppmap = match; + return 0; + } + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + if ((fd < 0 || map->fd == fd) && map->raddr == va && + map->raddr + map->len == va + len && + map->refs == 1 && + /* Skip unmap if it is fastrpc shell memory */ + !map->is_filemap) { + match = map; + hlist_del_init(&map->hn); + break; + } + } + if (match) { + *ppmap = match; + return 0; + } + return -ETOOMANYREFS; +} + +static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_file *fl; + int vmid, cid = -1, err = 0; + struct fastrpc_session_ctx *sess; + unsigned long irq_flags = 0; + + if (!map) + return; + fl = map->fl; + if (fl && !(map->flags == ADSP_MMAP_HEAP_ADDR || + map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) { + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + pr_err("adsprpc: ERROR:%s, Invalid channel id: %d, err:%d\n", + __func__, cid, err); + return; + } + } + if (map->flags == ADSP_MMAP_HEAP_ADDR || + map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + spin_lock_irqsave(&me->hlock, irq_flags); + map->refs--; + if (!map->refs) + hlist_del_init(&map->hn); + spin_unlock_irqrestore(&me->hlock, irq_flags); + if (map->refs > 0) { + ADSPRPC_WARN( + "multiple references for remote heap size %zu va 0x%lx ref count is %d\n", + map->size, map->va, map->refs); + return; + } + } else { + map->refs--; + if (!map->refs) + hlist_del_init(&map->hn); + if (map->refs > 0 && !flags) + return; + } + if (map->flags == ADSP_MMAP_HEAP_ADDR || + map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + + if (me->dev == NULL) { + ADSPRPC_ERR( + "failed to free remote heap allocation, device is not initialized\n"); + return; + } + + if (msm_minidump_enabled()) { + err = fastrpc_minidump_remove_region(map); + } + trace_fastrpc_dma_free(-1, map->phys, map->size); + if (map->phys) { + dma_free_attrs(me->dev, map->size, (void *)map->va, + (dma_addr_t)map->phys, (unsigned long)map->attr); + } + } else if (map->flags == FASTRPC_MAP_FD_NOMAP) { + trace_fastrpc_dma_unmap(cid, map->phys, map->size); + if (!IS_ERR_OR_NULL(map->table)) + dma_buf_unmap_attachment(map->attach, map->table, + DMA_BIDIRECTIONAL); + if (!IS_ERR_OR_NULL(map->attach)) + dma_buf_detach(map->buf, map->attach); + if (!IS_ERR_OR_NULL(map->buf)) + dma_buf_put(map->buf); + } else { + int destVM[1] = {VMID_HLOS}; + int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + + if (map->secure) + sess = fl->secsctx; + else + sess = fl->sctx; + + vmid = fl->apps->channel[cid].vmid; + if (vmid && map->phys) { + int hyp_err = 0; + int srcVM[2] = {VMID_HLOS, vmid}; + + hyp_err = hyp_assign_phys(map->phys, + buf_page_size(map->size), + srcVM, 2, destVM, destVMperm, 1); + if (hyp_err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + hyp_err, map->phys, map->size); + } + } + trace_fastrpc_dma_unmap(cid, map->phys, map->size); + if (!IS_ERR_OR_NULL(map->table)) + dma_buf_unmap_attachment(map->attach, map->table, + DMA_BIDIRECTIONAL); + if (!IS_ERR_OR_NULL(map->attach)) + dma_buf_detach(map->buf, map->attach); + if (!IS_ERR_OR_NULL(map->buf)) + dma_buf_put(map->buf); + } + kfree(map); +} + +static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure, + struct fastrpc_session_ctx **session); + +static int fastrpc_mmap_create_remote_heap(struct fastrpc_file *fl, + struct fastrpc_mmap *map, size_t len, int mflags) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + dma_addr_t region_phys = 0; + void *region_vaddr = NULL; + + map->apps = me; + map->fl = NULL; + map->attr |= DMA_ATTR_NO_KERNEL_MAPPING; + err = fastrpc_alloc_cma_memory(®ion_phys, ®ion_vaddr, + len, (unsigned long) map->attr); + if (err) + goto bail; + trace_fastrpc_dma_alloc(fl->cid, (uint64_t)region_phys, len, + (unsigned long)map->attr, mflags); + map->phys = (uintptr_t)region_phys; + map->size = len; + map->va = (uintptr_t)region_vaddr; +bail: + return err; +} + +static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf *buf, + unsigned int attr, uintptr_t va, size_t len, int mflags, + struct fastrpc_mmap **ppmap) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_session_ctx *sess; + struct fastrpc_apps *apps = NULL; + int cid = -1; + struct fastrpc_channel_ctx *chan = NULL; + struct fastrpc_mmap *map = NULL; + int err = 0, vmid, sgl_index = 0; + struct scatterlist *sgl = NULL; + + if (!fl) { + err = -EBADF; + goto bail; + } + apps = fl->apps; + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + chan = &apps->channel[cid]; + + if (!fastrpc_mmap_find(fl, fd, NULL, va, len, mflags, 1, ppmap)) + return 0; + map = kzalloc(sizeof(*map), GFP_KERNEL); + VERIFY(err, !IS_ERR_OR_NULL(map)); + if (err) { + err = -ENOMEM; + goto bail; + } + INIT_HLIST_NODE(&map->hn); + map->flags = mflags; + map->refs = 1; + map->fl = fl; + map->fd = fd; + map->attr = attr; + map->buf = buf; + map->frpc_md_index = -1; + map->is_filemap = false; + ktime_get_real_ts64(&map->map_start_time); + if (mflags == ADSP_MMAP_HEAP_ADDR || + mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + VERIFY(err, 0 == (err = fastrpc_mmap_create_remote_heap(fl, map, + len, mflags))); + if (err) + goto bail; + if (msm_minidump_enabled()) { + err = fastrpc_minidump_add_region(map); + if (err) + goto bail; + } + } else if (mflags == FASTRPC_MAP_FD_NOMAP) { + VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd))); + if (err) { + ADSPRPC_ERR("dma_buf_get failed for fd %d ret %ld\n", + fd, PTR_ERR(map->buf)); + err = -EBADFD; + goto bail; + } + map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; + map->va = 0; + map->phys = 0; + + VERIFY(err, !IS_ERR_OR_NULL(map->attach = + dma_buf_attach(map->buf, me->dev))); + if (err) { + ADSPRPC_ERR( + "dma_buf_attach for fd %d failed to map buffer on SMMU device %s ret %ld\n", + fd, dev_name(me->dev), PTR_ERR(map->attach)); + err = -EFAULT; + goto bail; + } + + map->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + VERIFY(err, !IS_ERR_OR_NULL(map->table = + dma_buf_map_attachment(map->attach, + DMA_BIDIRECTIONAL))); + if (err) { + ADSPRPC_ERR( + "dma_buf_map_attachment for fd %d failed on device %s ret %ld\n", + fd, dev_name(me->dev), PTR_ERR(map->table)); + err = -EFAULT; + goto bail; + } + VERIFY(err, map->table->nents == 1); + if (err) { + ADSPRPC_ERR( + "multiple scatter-gather entries (%u) present for NOMAP fd %d\n", + map->table->nents, fd); + err = -EFAULT; + goto bail; + } + map->phys = sg_dma_address(map->table->sgl); + map->size = len; + map->flags = FASTRPC_MAP_FD_DELAYED; + trace_fastrpc_dma_map(cid, fd, map->phys, map->size, + len, mflags, map->attach->dma_map_attrs); + } else { + if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) { + ADSPRPC_INFO("buffer mapped with persist attr 0x%x\n", + (unsigned int)map->attr); + map->refs = 2; + } + if (mflags == ADSP_MMAP_DMA_BUFFER) { + VERIFY(err, !IS_ERR_OR_NULL(map->buf)); + if (err) { + ADSPRPC_ERR("Invalid DMA buffer address %pK\n", + map->buf); + err = -EFAULT; + goto bail; + } + /* Increment DMA buffer ref count, + * so that client cannot unmap DMA buffer, before freeing buffer + */ + get_dma_buf(map->buf); + } else { + VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd))); + if (err) { + ADSPRPC_ERR("dma_buf_get failed for fd %d ret %ld\n", + fd, PTR_ERR(map->buf)); + err = -EBADFD; + goto bail; + } + } + map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; + if (map->secure) { + if (!fl->secsctx) + err = fastrpc_session_alloc(chan, 1, + &fl->secsctx); + if (err) { + ADSPRPC_ERR( + "fastrpc_session_alloc failed for fd %d ret %d\n", + fd, err); + err = -ENOSR; + goto bail; + } + } + if (map->secure) + sess = fl->secsctx; + else + sess = fl->sctx; + + VERIFY(err, !IS_ERR_OR_NULL(sess)); + if (err) { + ADSPRPC_ERR( + "session is invalid for fd %d, secure flag %d\n", + fd, map->secure); + err = -EBADR; + goto bail; + } + + VERIFY(err, !IS_ERR_OR_NULL(map->attach = + dma_buf_attach(map->buf, sess->smmu.dev))); + if (err) { + ADSPRPC_ERR( + "dma_buf_attach for fd %d failed to map buffer on SMMU device %s ret %ld\n", + fd, dev_name(sess->smmu.dev), + PTR_ERR(map->attach)); + err = -EFAULT; + goto bail; + } + + map->attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP; + + /* + * Skip CPU sync if IO Cohernecy is not supported + */ + if (!sess->smmu.coherent) + map->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + + VERIFY(err, !IS_ERR_OR_NULL(map->table = + dma_buf_map_attachment(map->attach, + DMA_BIDIRECTIONAL))); + if (err) { + ADSPRPC_ERR( + "dma_buf_map_attachment for fd %d failed on device %s ret %ld\n", + fd, dev_name(sess->smmu.dev), + PTR_ERR(map->table)); + err = -EFAULT; + goto bail; + } + if (!sess->smmu.enabled) { + VERIFY(err, map->table->nents == 1); + if (err) { + ADSPRPC_ERR( + "multiple scatter-gather entries (%u) present for fd %d mapped on SMMU disabled device\n", + map->table->nents, fd); + err = -EFAULT; + goto bail; + } + } + map->phys = sg_dma_address(map->table->sgl); + + if (sess->smmu.cb) { + map->phys += ((uint64_t)sess->smmu.cb << 32); + for_each_sg(map->table->sgl, sgl, map->table->nents, + sgl_index) + map->size += sg_dma_len(sgl); + } else { + map->size = buf_page_size(len); + } + trace_fastrpc_dma_map(cid, fd, map->phys, map->size, + len, mflags, map->attach->dma_map_attrs); + + VERIFY(err, map->size >= len && map->size < me->max_size_limit); + if (err) { + err = -EFAULT; + pr_err("adsprpc: %s: invalid map size 0x%zx len 0x%zx\n", + __func__, map->size, len); + goto bail; + } + + vmid = fl->apps->channel[cid].vmid; + if (vmid) { + int srcVM[1] = {VMID_HLOS}; + int destVM[2] = {VMID_HLOS, vmid}; + int destVMperm[2] = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE | PERM_EXEC}; + + err = hyp_assign_phys(map->phys, + buf_page_size(map->size), + srcVM, 1, destVM, destVMperm, 2); + if (err) { + ADSPRPC_ERR( + "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", + err, map->phys, map->size); + err = -EADDRNOTAVAIL; + goto bail; + } + } + map->va = va; + } + map->len = len; + + fastrpc_mmap_add(map); + *ppmap = map; + +bail: + if (map) + ktime_get_real_ts64(&map->map_end_time); + if (err && map) + fastrpc_mmap_free(map, 0); + return err; +} + +static inline bool fastrpc_get_cached_buf(struct fastrpc_file *fl, + size_t size, int buf_type, struct fastrpc_buf **obuf) +{ + bool found = false; + struct fastrpc_buf *buf = NULL, *fr = NULL; + struct hlist_node *n = NULL; + + if (buf_type == USERHEAP_BUF) + goto bail; + + /* find the smallest buffer that fits in the cache */ + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) { + if (buf->size >= size && (!fr || fr->size > buf->size)) + fr = buf; + } + if (fr) { + hlist_del_init(&fr->hn); + fl->num_cached_buf--; + } + spin_unlock(&fl->hlock); + if (fr) { + fr->type = buf_type; + *obuf = fr; + found = true; + } +bail: + return found; +} + +static inline bool fastrpc_get_persistent_buf(struct fastrpc_file *fl, + size_t size, int buf_type, struct fastrpc_buf **obuf) +{ + unsigned int i = 0; + bool found = false; + struct fastrpc_buf *buf = NULL; + + spin_lock(&fl->hlock); + if (!fl->num_pers_hdrs) + goto bail; + + /* + * Persistent header buffer can be used only if + * metadata length is less than 1 page size. + */ + if (buf_type != METADATA_BUF || size > PAGE_SIZE) + goto bail; + + for (i = 0; i < fl->num_pers_hdrs; i++) { + buf = &fl->hdr_bufs[i]; + /* If buffer not in use, then assign it for requested alloc */ + if (!buf->in_use) { + buf->in_use = true; + *obuf = buf; + found = true; + break; + } + } +bail: + spin_unlock(&fl->hlock); + return found; +} + +static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size, + unsigned long dma_attr, uint32_t rflags, + int buf_type, struct fastrpc_buf **obuf) +{ + int err = 0, vmid; + struct fastrpc_apps *me = &gfa; + struct fastrpc_buf *buf = NULL; + int cid = -1; + + VERIFY(err, fl && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + + VERIFY(err, size > 0 && size < me->max_size_limit); + if (err) { + err = -EFAULT; + pr_err("adsprpc: %s: invalid allocation size 0x%zx\n", + __func__, size); + goto bail; + } + + VERIFY(err, size > 0 && fl->sctx->smmu.dev); + if (err) { + err = (fl->sctx->smmu.dev == NULL) ? -ENODEV : err; + goto bail; + } + if (fastrpc_get_persistent_buf(fl, size, buf_type, obuf)) + return err; + if (fastrpc_get_cached_buf(fl, size, buf_type, obuf)) + return err; + + /* If unable to get persistent or cached buf, allocate new buffer */ + VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL))); + if (err) { + err = -ENOMEM; + goto bail; + } + INIT_HLIST_NODE(&buf->hn); + buf->fl = fl; + buf->virt = NULL; + buf->phys = 0; + buf->size = size; + buf->dma_attr = dma_attr; + buf->flags = rflags; + buf->raddr = 0; + buf->type = buf_type; + ktime_get_real_ts64(&buf->buf_start_time); + + buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size, + (dma_addr_t *)&buf->phys, + GFP_KERNEL, buf->dma_attr); + if (IS_ERR_OR_NULL(buf->virt)) { + /* free cache and retry */ + fastrpc_cached_buf_list_free(fl); + buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size, + (dma_addr_t *)&buf->phys, GFP_KERNEL, + buf->dma_attr); + VERIFY(err, !IS_ERR_OR_NULL(buf->virt)); + } + if (err) { + ADSPRPC_ERR( + "dma_alloc_attrs failed for size 0x%zx, returned %pK\n", + size, buf->virt); + err = -ENOBUFS; + goto bail; + } + if (fl->sctx->smmu.cb) + buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32); + trace_fastrpc_dma_alloc(cid, buf->phys, size, + dma_attr, (int)rflags); + + vmid = fl->apps->channel[cid].vmid; + if (vmid) { + int srcVM[1] = {VMID_HLOS}; + int destVM[2] = {VMID_HLOS, vmid}; + int destVMperm[2] = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE | PERM_EXEC}; + + err = hyp_assign_phys(buf->phys, buf_page_size(size), + srcVM, 1, destVM, destVMperm, 2); + if (err) { + ADSPRPC_DEBUG( + "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", + err, buf->phys, size); + err = -EADDRNOTAVAIL; + goto bail; + } + } + + if (buf_type == USERHEAP_BUF) { + INIT_HLIST_NODE(&buf->hn_rem); + spin_lock(&fl->hlock); + hlist_add_head(&buf->hn_rem, &fl->remote_bufs); + spin_unlock(&fl->hlock); + } + *obuf = buf; + bail: + if (buf) + ktime_get_real_ts64(&buf->buf_end_time); + if (err && buf) + fastrpc_buf_free(buf, 0); + return err; +} + + +static int context_restore_interrupted(struct fastrpc_file *fl, + struct fastrpc_ioctl_invoke_async *inv, + struct smq_invoke_ctx **po) +{ + int err = 0; + struct smq_invoke_ctx *ctx = NULL, *ictx = NULL; + struct hlist_node *n; + struct fastrpc_ioctl_invoke *invoke = &inv->inv; + + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) { + if (ictx->pid == current->pid) { + if (invoke->sc != ictx->sc || ictx->fl != fl) { + err = -EINVAL; + ictx->sc_interrupted = invoke->sc; + ictx->fl_interrupted = fl; + ictx->handle_interrupted = invoke->handle; + ADSPRPC_ERR( + "interrupted sc (0x%x) or fl (%pK) does not match with invoke sc (0x%x) or fl (%pK)\n", + ictx->sc, ictx->fl, invoke->sc, fl); + } else { + ctx = ictx; + hlist_del_init(&ctx->hn); + hlist_add_head(&ctx->hn, &fl->clst.pending); + } + break; + } + } + spin_unlock(&fl->hlock); + if (ctx) + *po = ctx; + return err; +} + +static unsigned int sorted_lists_intersection(unsigned int *listA, + unsigned int lenA, unsigned int *listB, unsigned int lenB) +{ + unsigned int i = 0, j = 0; + + while (i < lenA && j < lenB) { + if (listA[i] < listB[j]) + i++; + else if (listA[i] > listB[j]) + j++; + else + return listA[i]; + } + return 0; +} + +#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1) + +static int uint_cmp_func(const void *p1, const void *p2) +{ + unsigned int a1 = *((unsigned int *)p1); + unsigned int a2 = *((unsigned int *)p2); + + return CMP(a1, a2); +} + +static int overlap_ptr_cmp(const void *a, const void *b) +{ + struct overlap *pa = *((struct overlap **)a); + struct overlap *pb = *((struct overlap **)b); + /* sort with lowest starting buffer first */ + int st = CMP(pa->start, pb->start); + /* sort with highest ending buffer first */ + int ed = CMP(pb->end, pa->end); + return st == 0 ? ed : st; +} + +static int context_build_overlap(struct smq_invoke_ctx *ctx) +{ + int i, err = 0; + remote_arg_t *lpra = ctx->lpra; + int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); + int nbufs = inbufs + outbufs; + struct overlap max; + + for (i = 0; i < nbufs; ++i) { + ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv; + ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len; + if (lpra[i].buf.len) { + VERIFY(err, ctx->overs[i].end > ctx->overs[i].start); + if (err) { + err = -EFAULT; + ADSPRPC_ERR( + "Invalid address 0x%llx and size %zu\n", + (uintptr_t)lpra[i].buf.pv, + lpra[i].buf.len); + goto bail; + } + } + ctx->overs[i].raix = i; + ctx->overps[i] = &ctx->overs[i]; + } + sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL); + max.start = 0; + max.end = 0; + for (i = 0; i < nbufs; ++i) { + if (ctx->overps[i]->start < max.end) { + ctx->overps[i]->mstart = max.end; + ctx->overps[i]->mend = ctx->overps[i]->end; + ctx->overps[i]->offset = max.end - + ctx->overps[i]->start; + if (ctx->overps[i]->end > max.end) { + max.end = ctx->overps[i]->end; + } else { + if ((max.raix < inbufs && + ctx->overps[i]->raix + 1 > inbufs) || + (ctx->overps[i]->raix < inbufs && + max.raix + 1 > inbufs)) + ctx->overps[i]->do_cmo = 1; + ctx->overps[i]->mend = 0; + ctx->overps[i]->mstart = 0; + } + } else { + ctx->overps[i]->mend = ctx->overps[i]->end; + ctx->overps[i]->mstart = ctx->overps[i]->start; + ctx->overps[i]->offset = 0; + max = *ctx->overps[i]; + } + } +bail: + return err; +} + +#define K_COPY_FROM_USER(err, kernel, dst, src, size) \ + do {\ + if (!(kernel))\ + err = copy_from_user((dst),\ + (void const __user *)(src),\ + (size));\ + else\ + memmove((dst), (src), (size));\ + } while (0) + +#define K_COPY_TO_USER(err, kernel, dst, src, size) \ + do {\ + if (!(kernel))\ + err = copy_to_user((void __user *)(dst),\ + (src), (size));\ + else\ + memmove((dst), (src), (size));\ + } while (0) + + +static void context_free(struct smq_invoke_ctx *ctx); + +static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, + struct fastrpc_ioctl_invoke_async *invokefd, + struct smq_invoke_ctx **po) +{ + struct fastrpc_apps *me = &gfa; + int err = 0, bufs, ii, size = 0, cid = fl->cid; + struct smq_invoke_ctx *ctx = NULL; + struct fastrpc_ctx_lst *clst = &fl->clst; + struct fastrpc_ioctl_invoke *invoke = &invokefd->inv; + struct fastrpc_channel_ctx *chan = NULL; + unsigned long irq_flags = 0; + uint32_t is_kernel_memory = 0; + + spin_lock(&fl->hlock); + if (fl->clst.num_active_ctxs > MAX_PENDING_CTX_PER_SESSION && + !(kernel || invoke->handle < FASTRPC_STATIC_HANDLE_MAX)) { + err = -EDQUOT; + spin_unlock(&fl->hlock); + goto bail; + } + spin_unlock(&fl->hlock); + bufs = REMOTE_SCALARS_LENGTH(invoke->sc); + size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) + + sizeof(*ctx->fds) * (bufs) + + sizeof(*ctx->attrs) * (bufs) + + sizeof(*ctx->overs) * (bufs) + + sizeof(*ctx->overps) * (bufs); + + VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL))); + if (err) { + err = -ENOMEM; + goto bail; + } + + INIT_HLIST_NODE(&ctx->hn); + INIT_LIST_HEAD(&ctx->asyncn); + hlist_add_fake(&ctx->hn); + ctx->fl = fl; + ctx->maps = (struct fastrpc_mmap **)(&ctx[1]); + ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]); + ctx->fds = (int *)(&ctx->lpra[bufs]); + ctx->attrs = (unsigned int *)(&ctx->fds[bufs]); + ctx->overs = (struct overlap *)(&ctx->attrs[bufs]); + ctx->overps = (struct overlap **)(&ctx->overs[bufs]); + + /* If user message, do not use copy_from_user to copy buffers for + * compat driver,as memory is already copied to kernel memory + * for compat driver + */ + is_kernel_memory = ((kernel == USER_MSG) ? (fl->is_compat) : kernel); + K_COPY_FROM_USER(err, is_kernel_memory, (void *)ctx->lpra, invoke->pra, + bufs * sizeof(*ctx->lpra)); + if (err) { + ADSPRPC_ERR( + "copy from user failed with %d for remote arguments list\n", + err); + err = -EFAULT; + goto bail; + } + + if (invokefd->fds) { + K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds, + bufs * sizeof(*ctx->fds)); + if (err) { + ADSPRPC_ERR( + "copy from user failed with %d for fd list\n", + err); + err = -EFAULT; + goto bail; + } + } else { + ctx->fds = NULL; + } + if (invokefd->attrs) { + K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs, + bufs * sizeof(*ctx->attrs)); + if (err) { + ADSPRPC_ERR( + "copy from user failed with %d for attribute list\n", + err); + err = -EFAULT; + goto bail; + } + } + ctx->crc = (uint32_t *)invokefd->crc; + ctx->perf_dsp = (uint64_t *)invokefd->perf_dsp; + ctx->perf_kernel = (uint64_t *)invokefd->perf_kernel; + ctx->handle = invoke->handle; + ctx->sc = invoke->sc; + if (bufs) { + VERIFY(err, 0 == (err = context_build_overlap(ctx))); + if (err) + goto bail; + } + ctx->retval = -1; + ctx->pid = current->pid; + ctx->tgid = fl->tgid; + init_completion(&ctx->work); + ctx->magic = FASTRPC_CTX_MAGIC; + ctx->rsp_flags = NORMAL_RESPONSE; + ctx->is_work_done = false; + ctx->copybuf = NULL; + ctx->is_early_wakeup = false; + + if (ctx->fl->profile) { + ctx->perf = kzalloc(sizeof(*(ctx->perf)), GFP_KERNEL); + VERIFY(err, !IS_ERR_OR_NULL(ctx->perf)); + if (err) { + kfree(ctx->perf); + err = -ENOMEM; + goto bail; + } + memset(ctx->perf, 0, sizeof(*(ctx->perf))); + ctx->perf->tid = fl->tgid; + } + if (invokefd->job) { + K_COPY_FROM_USER(err, kernel, &ctx->asyncjob, invokefd->job, + sizeof(ctx->asyncjob)); + if (err) + goto bail; + } + + chan = &me->channel[cid]; + + spin_lock_irqsave(&chan->ctxlock, irq_flags); + me->jobid[cid]++; + for (ii = ((kernel || ctx->handle < FASTRPC_STATIC_HANDLE_MAX) + ? 0 : NUM_KERNEL_AND_STATIC_ONLY_CONTEXTS); + ii < FASTRPC_CTX_MAX; ii++) { + if (!chan->ctxtable[ii]) { + chan->ctxtable[ii] = ctx; + ctx->ctxid = (me->jobid[cid] << FASTRPC_CTX_JOBID_POS) + | (ii << FASTRPC_CTX_TABLE_IDX_POS) + | ((ctx->asyncjob.isasyncjob && + FASTRPC_ASYNC_JOB_MASK) << FASTRPC_CTX_JOB_TYPE_POS); + break; + } + } + spin_unlock_irqrestore(&chan->ctxlock, irq_flags); + VERIFY(err, ii < FASTRPC_CTX_MAX); + if (err) { + ADSPRPC_ERR( + "adsprpc: out of context table entries for handle 0x%x, sc 0x%x\n", + ctx->handle, ctx->sc); + err = -ENOKEY; + goto bail; + } + spin_lock(&fl->hlock); + hlist_add_head(&ctx->hn, &clst->pending); + clst->num_active_ctxs++; + spin_unlock(&fl->hlock); + + trace_fastrpc_context_alloc((uint64_t)ctx, + ctx->ctxid | fl->pd, ctx->handle, ctx->sc); + *po = ctx; +bail: + if (ctx && err) + context_free(ctx); + return err; +} + +static void context_save_interrupted(struct smq_invoke_ctx *ctx) +{ + struct fastrpc_ctx_lst *clst = &ctx->fl->clst; + + spin_lock(&ctx->fl->hlock); + hlist_del_init(&ctx->hn); + hlist_add_head(&ctx->hn, &clst->interrupted); + spin_unlock(&ctx->fl->hlock); +} + +static void context_free(struct smq_invoke_ctx *ctx) +{ + uint32_t i = 0; + struct fastrpc_apps *me = &gfa; + int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) + + REMOTE_SCALARS_OUTBUFS(ctx->sc); + int cid = ctx->fl->cid; + struct fastrpc_channel_ctx *chan = NULL; + unsigned long irq_flags = 0; + int err = 0; + + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + ADSPRPC_ERR( + "invalid channel 0x%zx set for session\n", + cid); + return; + } + chan = &me->channel[cid]; + i = (uint32_t)GET_TABLE_IDX_FROM_CTXID(ctx->ctxid); + + spin_lock_irqsave(&chan->ctxlock, irq_flags); + if (i < FASTRPC_CTX_MAX && chan->ctxtable[i] == ctx) { + chan->ctxtable[i] = NULL; + } else { + for (i = 0; i < FASTRPC_CTX_MAX; i++) { + if (chan->ctxtable[i] == ctx) { + chan->ctxtable[i] = NULL; + break; + } + } + } + spin_unlock_irqrestore(&chan->ctxlock, irq_flags); + + spin_lock(&ctx->fl->hlock); + if (!hlist_unhashed(&ctx->hn)) { + hlist_del_init(&ctx->hn); + ctx->fl->clst.num_active_ctxs--; + } + spin_unlock(&ctx->fl->hlock); + + mutex_lock(&ctx->fl->map_mutex); + for (i = 0; i < nbufs; ++i) + fastrpc_mmap_free(ctx->maps[i], 0); + mutex_unlock(&ctx->fl->map_mutex); + + fastrpc_buf_free(ctx->buf, 1); + if (ctx->copybuf != ctx->buf) + fastrpc_buf_free(ctx->copybuf, 1); + kfree(ctx->lrpra); + ctx->lrpra = NULL; + ctx->magic = 0; + ctx->ctxid = 0; + if (ctx->fl->profile) + kfree(ctx->perf); + + trace_fastrpc_context_free((uint64_t)ctx, + ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc); + kfree(ctx); +} + +static void fastrpc_queue_completed_async_job(struct smq_invoke_ctx *ctx) +{ + struct fastrpc_file *fl = ctx->fl; + unsigned long flags; + + spin_lock_irqsave(&fl->aqlock, flags); + if (ctx->is_early_wakeup) + goto bail; + list_add_tail(&ctx->asyncn, &fl->clst.async_queue); + atomic_add(1, &fl->async_queue_job_count); + ctx->is_early_wakeup = true; + wake_up_interruptible(&fl->async_wait_queue); +bail: + spin_unlock_irqrestore(&fl->aqlock, flags); +} + +static void fastrpc_queue_pd_status(struct fastrpc_file *fl, int domain, int status, int sessionid) +{ + struct smq_notif_rsp *notif_rsp = NULL; + unsigned long flags; + int err = 0; + + VERIFY(err, NULL != (notif_rsp = kzalloc(sizeof(*notif_rsp), GFP_ATOMIC))); + if (err) { + ADSPRPC_ERR( + "allocation failed for size 0x%zx\n", + sizeof(*notif_rsp)); + return; + } + notif_rsp->status = status; + notif_rsp->domain = domain; + notif_rsp->session = sessionid; + + spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); + list_add_tail(¬if_rsp->notifn, &fl->clst.notif_queue); + atomic_add(1, &fl->proc_state_notif.notif_queue_count); + wake_up_interruptible(&fl->proc_state_notif.notif_wait_queue); + spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags); +} + +static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_file *fl = NULL; + struct hlist_node *n; + bool is_process_found = false; + int sessionid = 0; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->tgid == notif->pid || + (fl->tgid == (notif->pid & PROCESS_ID_MASK))) { + is_process_found = true; + break; + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + + if (!is_process_found) + return; + if (notif->pid & SESSION_ID_MASK) + sessionid = 1; + fastrpc_queue_pd_status(fl, domain, notif->status, sessionid); +} + +static void context_notify_user(struct smq_invoke_ctx *ctx, + int retval, uint32_t rsp_flags, uint32_t early_wake_time) +{ + fastrpc_pm_awake(ctx->fl, gcinfo[ctx->fl->cid].secure); + ctx->retval = retval; + ctx->rsp_flags = (enum fastrpc_response_flags)rsp_flags; + trace_fastrpc_context_complete(ctx->fl->cid, (uint64_t)ctx, retval, + ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc); + switch (rsp_flags) { + case NORMAL_RESPONSE: + case COMPLETE_SIGNAL: + /* normal and complete response with return value */ + ctx->is_work_done = true; + if (ctx->asyncjob.isasyncjob) + fastrpc_queue_completed_async_job(ctx); + trace_fastrpc_msg("wakeup_task: begin"); + complete(&ctx->work); + trace_fastrpc_msg("wakeup_task: end"); + break; + case USER_EARLY_SIGNAL: + /* user hint of approximate time of completion */ + ctx->early_wake_time = early_wake_time; + if (ctx->asyncjob.isasyncjob) + break; + case EARLY_RESPONSE: + /* rpc framework early response with return value */ + if (ctx->asyncjob.isasyncjob) + fastrpc_queue_completed_async_job(ctx); + else { + trace_fastrpc_msg("wakeup_task: begin"); + complete(&ctx->work); + trace_fastrpc_msg("wakeup_task: end"); + } + break; + default: + break; + } +} + +static void fastrpc_notify_users(struct fastrpc_file *me) +{ + struct smq_invoke_ctx *ictx; + struct hlist_node *n; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) { + ictx->is_work_done = true; + ictx->retval = -ECONNRESET; + trace_fastrpc_context_complete(me->cid, (uint64_t)ictx, + ictx->retval, ictx->msg.invoke.header.ctx, + ictx->handle, ictx->sc); + if (ictx->asyncjob.isasyncjob) + fastrpc_queue_completed_async_job(ictx); + else + complete(&ictx->work); + } + hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) { + ictx->is_work_done = true; + ictx->retval = -ECONNRESET; + trace_fastrpc_context_complete(me->cid, (uint64_t)ictx, + ictx->retval, ictx->msg.invoke.header.ctx, + ictx->handle, ictx->sc); + complete(&ictx->work); + } + spin_unlock_irqrestore(&me->hlock, irq_flags); +} + +static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me) +{ + struct smq_invoke_ctx *ictx; + struct hlist_node *n; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) { + if (ictx->msg.pid) { + ictx->is_work_done = true; + ictx->retval = -ECONNRESET; + trace_fastrpc_context_complete(me->cid, (uint64_t)ictx, + ictx->retval, ictx->msg.invoke.header.ctx, + ictx->handle, ictx->sc); + if (ictx->asyncjob.isasyncjob) + fastrpc_queue_completed_async_job(ictx); + else + complete(&ictx->work); + } + } + hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) { + if (ictx->msg.pid) { + ictx->is_work_done = true; + ictx->retval = -ECONNRESET; + trace_fastrpc_context_complete(me->cid, (uint64_t)ictx, + ictx->retval, ictx->msg.invoke.header.ctx, + ictx->handle, ictx->sc); + complete(&ictx->work); + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); +} + +static void fastrpc_ramdump_collection(int cid) +{ + struct fastrpc_file *fl = NULL; + struct hlist_node *n = NULL; + struct fastrpc_apps *me = &gfa; + struct fastrpc_channel_ctx *chan = &me->channel[cid]; + struct qcom_dump_segment ramdump_entry; + struct fastrpc_buf *buf = NULL; + int ret = 0; + unsigned long irq_flags = 0; + struct list_head head; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->cid == cid && fl->init_mem && + fl->file_close < FASTRPC_PROCESS_DSP_EXIT_COMPLETE && + fl->dsp_proc_init) { + hlist_add_head(&fl->init_mem->hn_init, &chan->initmems); + fl->is_ramdump_pend = true; + } + } + if (chan->buf) + hlist_add_head(&chan->buf->hn_init, &chan->initmems); + spin_unlock_irqrestore(&me->hlock, irq_flags); + + hlist_for_each_entry_safe(buf, n, &chan->initmems, hn_init) { + fl = buf->fl; + memset(&ramdump_entry, 0, sizeof(ramdump_entry)); + ramdump_entry.da = buf->phys; + ramdump_entry.va = (void *)buf->virt; + ramdump_entry.size = buf->size; + INIT_LIST_HEAD(&head); + list_add(&ramdump_entry.node, &head); + + if (fl && fl->sctx && fl->sctx->smmu.dev) + ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); + else { + if (me->dev != NULL) + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); + } + if (ret < 0) + ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", + __func__, ret); + + hlist_del_init(&buf->hn_init); + if (fl) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->file_close) + complete(&fl->work); + fl->is_ramdump_pend = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } + } +} + +static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid) +{ + struct fastrpc_file *fl; + struct hlist_node *n; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->cid == cid) { + fastrpc_queue_pd_status(fl, cid, FASTRPC_DSP_SSR, 0); + fastrpc_notify_users(fl); + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); +} + +static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, + char *servloc_name) +{ + struct fastrpc_file *fl; + struct hlist_node *n; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->servloc_name && !strcmp(servloc_name, fl->servloc_name)) + fastrpc_notify_users_staticpd_pdr(fl); + } + spin_unlock_irqrestore(&me->hlock, irq_flags); +} + +static void context_list_ctor(struct fastrpc_ctx_lst *me) +{ + INIT_HLIST_HEAD(&me->interrupted); + INIT_HLIST_HEAD(&me->pending); + me->num_active_ctxs = 0; + INIT_LIST_HEAD(&me->async_queue); + INIT_LIST_HEAD(&me->notif_queue); +} + +static void fastrpc_context_list_dtor(struct fastrpc_file *fl) +{ + struct fastrpc_ctx_lst *clst = &fl->clst; + struct smq_invoke_ctx *ictx = NULL, *ctxfree; + struct hlist_node *n; + + do { + ctxfree = NULL; + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) { + hlist_del_init(&ictx->hn); + clst->num_active_ctxs--; + ctxfree = ictx; + break; + } + spin_unlock(&fl->hlock); + if (ctxfree) + context_free(ctxfree); + } while (ctxfree); + do { + ctxfree = NULL; + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) { + hlist_del_init(&ictx->hn); + clst->num_active_ctxs--; + ctxfree = ictx; + break; + } + spin_unlock(&fl->hlock); + if (ctxfree) + context_free(ctxfree); + } while (ctxfree); +} + +static int fastrpc_file_free(struct fastrpc_file *fl); +static void fastrpc_file_list_dtor(struct fastrpc_apps *me) +{ + struct fastrpc_file *fl, *free; + struct hlist_node *n; + unsigned long irq_flags = 0; + + do { + free = NULL; + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + hlist_del_init(&fl->hn); + free = fl; + break; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + if (free) + fastrpc_file_free(free); + } while (free); +} + +static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) +{ + remote_arg64_t *rpra, *lrpra; + remote_arg_t *lpra = ctx->lpra; + struct smq_invoke_buf *list; + struct smq_phy_page *pages, *ipage; + uint32_t sc = ctx->sc; + int inbufs = REMOTE_SCALARS_INBUFS(sc); + int outbufs = REMOTE_SCALARS_OUTBUFS(sc); + int handles, bufs = inbufs + outbufs; + uintptr_t args = 0; + size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0, templen = 0; + size_t totallen = 0; //header and non ion copy buf len + int i, oix; + int err = 0, j = 0; + int mflags = 0; + uint64_t *fdlist = NULL; + uint32_t *crclist = NULL; + uint32_t early_hint; + uint64_t *perf_counter = NULL; + struct fastrpc_dsp_capabilities *dsp_cap_ptr = NULL; + + if (ctx->fl->profile) + perf_counter = (uint64_t *)ctx->perf + PERF_COUNT; + + /* calculate size of the metadata */ + rpra = NULL; + lrpra = NULL; + list = smq_invoke_buf_start(rpra, sc); + pages = smq_phy_page_start(sc, list); + ipage = pages; + + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP), + for (i = 0; i < bufs; ++i) { + uintptr_t buf = (uintptr_t)lpra[i].buf.pv; + size_t len = lpra[i].buf.len; + + mutex_lock(&ctx->fl->map_mutex); + if (ctx->fds && (ctx->fds[i] != -1)) + err = fastrpc_mmap_create(ctx->fl, ctx->fds[i], NULL, + ctx->attrs[i], buf, len, + mflags, &ctx->maps[i]); + mutex_unlock(&ctx->fl->map_mutex); + if (err) + goto bail; + ipage += 1; + } + PERF_END); + handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc); + mutex_lock(&ctx->fl->map_mutex); + for (i = bufs; i < bufs + handles; i++) { + int dmaflags = 0; + + if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP)) + dmaflags = FASTRPC_MAP_FD_NOMAP; + VERIFY(err, VALID_FASTRPC_CID(ctx->fl->cid)); + if (err) { + err = -ECHRNG; + mutex_unlock(&ctx->fl->map_mutex); + goto bail; + } + dsp_cap_ptr = &gcinfo[ctx->fl->cid].dsp_cap_kernel; + // Skip cpu mapping if DMA_HANDLE_REVERSE_RPC_CAP is true. + if (!dsp_cap_ptr->dsp_attributes[DMA_HANDLE_REVERSE_RPC_CAP] && + ctx->fds && (ctx->fds[i] != -1)) + err = fastrpc_mmap_create(ctx->fl, ctx->fds[i], NULL, + FASTRPC_ATTR_NOVA, 0, 0, dmaflags, + &ctx->maps[i]); + if (err) { + for (j = bufs; j < i; j++) + fastrpc_mmap_free(ctx->maps[j], 0); + mutex_unlock(&ctx->fl->map_mutex); + goto bail; + } + ipage += 1; + } + mutex_unlock(&ctx->fl->map_mutex); + + /* metalen includes meta data, fds, crc, dsp perf and early wakeup hint */ + metalen = totallen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) + + (sizeof(uint32_t) * M_CRCLIST) + (sizeof(uint64_t) * M_DSP_PERF_LIST) + + sizeof(early_hint); + + if (metalen) { + err = fastrpc_buf_alloc(ctx->fl, metalen, 0, 0, + METADATA_BUF, &ctx->buf); + if (err) + goto bail; + VERIFY(err, !IS_ERR_OR_NULL(ctx->buf->virt)); + if (err) + goto bail; + memset(ctx->buf->virt, 0, metalen); + } + ctx->used = metalen; + + /* allocate new local rpra buffer */ + lrpralen = (size_t)&list[0]; + if (lrpralen) { + lrpra = kzalloc(lrpralen, GFP_KERNEL); + VERIFY(err, !IS_ERR_OR_NULL(lrpra)); + if (err) { + err = -ENOMEM; + goto bail; + } + } + ctx->lrpra = lrpra; + + /* calculate len required for copying */ + for (oix = 0; oix < inbufs + outbufs; ++oix) { + int i = ctx->overps[oix]->raix; + uintptr_t mstart, mend; + size_t len = lpra[i].buf.len; + + if (!len) + continue; + if (ctx->maps[i]) + continue; + if (ctx->overps[oix]->offset == 0) + copylen = ALIGN(copylen, BALIGN); + mstart = ctx->overps[oix]->mstart; + mend = ctx->overps[oix]->mend; + templen = mend - mstart; + VERIFY(err, ((templen <= LONG_MAX) && (copylen <= (LONG_MAX - templen)))); + if (err) { + err = -EFAULT; + goto bail; + } + copylen += templen; + } + totallen = ALIGN(totallen, BALIGN) + copylen; + + /* allocate non -ion copy buffer */ + /* Checking if copylen can be accomodated in metalen*/ + /*if not allocating new buffer */ + if (totallen <= (size_t)buf_page_size(metalen)) { + args = (uintptr_t)ctx->buf->virt + metalen; + ctx->copybuf = ctx->buf; + rlen = totallen - metalen; + } else if (copylen) { + err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, COPYDATA_BUF, + &ctx->copybuf); + if (err) + goto bail; + memset(ctx->copybuf->virt, 0, copylen); + args = (uintptr_t)ctx->copybuf->virt; + rlen = copylen; + totallen = copylen; + } + + /* copy metadata */ + rpra = ctx->buf->virt; + ctx->rpra = rpra; + list = smq_invoke_buf_start(rpra, sc); + pages = smq_phy_page_start(sc, list); + ipage = pages; + for (i = 0; i < bufs + handles; ++i) { + if (lpra[i].buf.len) + list[i].num = 1; + else + list[i].num = 0; + list[i].pgidx = ipage - pages; + ipage++; + } + + /* map ion buffers */ + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP), + for (i = 0; rpra && i < inbufs + outbufs; ++i) { + struct fastrpc_mmap *map = ctx->maps[i]; + uint64_t buf = ptr_to_uint64(lpra[i].buf.pv); + size_t len = lpra[i].buf.len; + + rpra[i].buf.pv = 0; + rpra[i].buf.len = len; + if (!len) + continue; + if (map) { + struct vm_area_struct *vma; + uintptr_t offset; + uint64_t num = buf_num_pages(buf, len); + int idx = list[i].pgidx; + + if (map->attr & FASTRPC_ATTR_NOVA) { + offset = 0; + } else { + down_read(¤t->mm->mmap_lock); + VERIFY(err, NULL != (vma = find_vma(current->mm, + map->va))); + if (err) { + up_read(¤t->mm->mmap_lock); + goto bail; + } + offset = buf_page_start(buf) - vma->vm_start; + up_read(¤t->mm->mmap_lock); + VERIFY(err, offset + len <= (uintptr_t)map->size); + if (err) { + ADSPRPC_ERR( + "buffer address is invalid for the fd passed for %d address 0x%llx and size %zu\n", + i, (uintptr_t)lpra[i].buf.pv, + lpra[i].buf.len); + err = -EFAULT; + goto bail; + } + } + pages[idx].addr = map->phys + offset; + pages[idx].size = num << PAGE_SHIFT; + } + rpra[i].buf.pv = buf; + } + PERF_END); + for (i = bufs; i < bufs + handles; ++i) { + struct fastrpc_mmap *map = ctx->maps[i]; + + if (map) { + pages[i].addr = map->phys; + pages[i].size = map->size; + } + } + fdlist = (uint64_t *)&pages[bufs + handles]; + crclist = (uint32_t *)&fdlist[M_FDLIST]; + /* reset fds, crc and early wakeup hint memory */ + /* remote process updates these values before responding */ + memset(fdlist, 0, sizeof(uint64_t)*M_FDLIST + sizeof(uint32_t)*M_CRCLIST + + (sizeof(uint64_t) * M_DSP_PERF_LIST) + sizeof(early_hint)); + + /* copy non ion buffers */ + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY), + for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) { + int i = ctx->overps[oix]->raix; + struct fastrpc_mmap *map = ctx->maps[i]; + size_t mlen; + uint64_t buf; + size_t len = lpra[i].buf.len; + + if (!len) + continue; + if (map) + continue; + if (ctx->overps[oix]->offset == 0) { + rlen -= ALIGN(args, BALIGN) - args; + args = ALIGN(args, BALIGN); + } + mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart; + VERIFY(err, rlen >= mlen); + if (err) { + err = -EFAULT; + goto bail; + } + rpra[i].buf.pv = + (args - ctx->overps[oix]->offset); + pages[list[i].pgidx].addr = ctx->copybuf->phys - + ctx->overps[oix]->offset + + (totallen - rlen); + pages[list[i].pgidx].addr = + buf_page_start(pages[list[i].pgidx].addr); + buf = rpra[i].buf.pv; + pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE; + if (i < inbufs) { + K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf), + lpra[i].buf.pv, len); + if (err) { + ADSPRPC_ERR( + "copy from user failed with %d for dst 0x%llx, src %pK, size 0x%zx, arg %d\n", + err, buf, lpra[i].buf.pv, len, i+1); + err = -EFAULT; + goto bail; + } + } + if (len > DEBUG_PRINT_SIZE_LIMIT) + ADSPRPC_DEBUG( + "copied non ion buffer sc 0x%x pv 0x%llx, mend 0x%llx mstart 0x%llx, len %zu\n", + sc, rpra[i].buf.pv, + ctx->overps[oix]->mend, + ctx->overps[oix]->mstart, len); + args = args + mlen; + rlen -= mlen; + } + PERF_END); + + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH), + for (oix = 0; oix < inbufs + outbufs; ++oix) { + int i = ctx->overps[oix]->raix; + struct fastrpc_mmap *map = ctx->maps[i]; + + if (i+1 > inbufs) // Avoiding flush for outbufs + continue; + if (ctx->fl->sctx && ctx->fl->sctx->smmu.coherent) + continue; + if (map && (map->attr & FASTRPC_ATTR_FORCE_NOFLUSH)) + continue; + + if (rpra && rpra[i].buf.len && (ctx->overps[oix]->mstart || + ctx->overps[oix]->do_cmo == 1)) { + if (map && map->buf) { + if (((buf_page_size(ctx->overps[oix]->mend - + ctx->overps[oix]->mstart)) == map->size) || + ctx->overps[oix]->do_cmo) { + dma_buf_begin_cpu_access(map->buf, + DMA_TO_DEVICE); + dma_buf_end_cpu_access(map->buf, + DMA_TO_DEVICE); + ADSPRPC_DEBUG( + "sc 0x%x pv 0x%llx, mend 0x%llx mstart 0x%llx, len %zu size %zu\n", + sc, rpra[i].buf.pv, + ctx->overps[oix]->mend, + ctx->overps[oix]->mstart, + rpra[i].buf.len, map->size); + } else { + uintptr_t offset; + uint64_t flush_len; + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_lock); + VERIFY(err, NULL != (vma = find_vma( + current->mm, rpra[i].buf.pv))); + if (err) { + up_read(¤t->mm->mmap_lock); + goto bail; + } + if (ctx->overps[oix]->do_cmo) { + offset = rpra[i].buf.pv - + vma->vm_start; + flush_len = rpra[i].buf.len; + } else { + offset = + ctx->overps[oix]->mstart + - vma->vm_start; + flush_len = + ctx->overps[oix]->mend - + ctx->overps[oix]->mstart; + } + up_read(¤t->mm->mmap_lock); + dma_buf_begin_cpu_access_partial( + map->buf, DMA_TO_DEVICE, offset, + flush_len); + dma_buf_end_cpu_access_partial( + map->buf, DMA_TO_DEVICE, offset, + flush_len); + ADSPRPC_DEBUG( + "sc 0x%x vm_start 0x%llx pv 0x%llx, offset 0x%llx, mend 0x%llx mstart 0x%llx, len %zu size %zu\n", + sc, vma->vm_start, + rpra[i].buf.pv, offset, + ctx->overps[oix]->mend, + ctx->overps[oix]->mstart, + rpra[i].buf.len, map->size); + } + } + } + } + PERF_END); + + for (i = bufs; ctx->fds && rpra && i < bufs + handles; i++) { + rpra[i].dma.fd = ctx->fds[i]; + rpra[i].dma.len = (uint32_t)lpra[i].buf.len; + rpra[i].dma.offset = + (uint32_t)(uintptr_t)lpra[i].buf.pv; + } + + /* Copy rpra to local buffer */ + if (ctx->lrpra && rpra && lrpralen > 0) + memcpy(ctx->lrpra, rpra, lrpralen); + bail: + return err; +} + +static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, + remote_arg_t *upra) +{ + uint32_t sc = ctx->sc; + struct smq_invoke_buf *list; + struct smq_phy_page *pages; + struct fastrpc_mmap *mmap; + uint64_t *fdlist; + uint32_t *crclist = NULL, *poll = NULL; + uint64_t *perf_dsp_list = NULL; + + remote_arg64_t *rpra = ctx->lrpra; + int i, inbufs, outbufs, handles; + int err = 0, perfErr = 0; + + inbufs = REMOTE_SCALARS_INBUFS(sc); + outbufs = REMOTE_SCALARS_OUTBUFS(sc); + handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc); + list = smq_invoke_buf_start(ctx->rpra, sc); + pages = smq_phy_page_start(sc, list); + fdlist = (uint64_t *)(pages + inbufs + outbufs + handles); + crclist = (uint32_t *)(fdlist + M_FDLIST); + poll = (uint32_t *)(crclist + M_CRCLIST); + perf_dsp_list = (uint64_t *)(poll + 1); + + for (i = inbufs; i < inbufs + outbufs; ++i) { + if (!ctx->maps[i]) { + K_COPY_TO_USER(err, kernel, + ctx->lpra[i].buf.pv, + uint64_to_ptr(rpra[i].buf.pv), + rpra[i].buf.len); + if (err) { + ADSPRPC_ERR( + "Invalid size 0x%llx for output argument %d ret %ld\n", + rpra[i].buf.len, i+1, err); + err = -EFAULT; + goto bail; + } + } else { + mutex_lock(&ctx->fl->map_mutex); + fastrpc_mmap_free(ctx->maps[i], 0); + mutex_unlock(&ctx->fl->map_mutex); + ctx->maps[i] = NULL; + } + } + mutex_lock(&ctx->fl->map_mutex); + for (i = 0; i < M_FDLIST; i++) { + if (!fdlist[i]) + break; + if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], NULL, 0, 0, + 0, 0, &mmap)) + fastrpc_mmap_free(mmap, 0); + } + mutex_unlock(&ctx->fl->map_mutex); + if (ctx->crc && crclist && rpra) + K_COPY_TO_USER(err, kernel, ctx->crc, + crclist, M_CRCLIST*sizeof(uint32_t)); + if (ctx->perf_dsp && perf_dsp_list) { + K_COPY_TO_USER(perfErr, kernel, ctx->perf_dsp, + perf_dsp_list, M_DSP_PERF_LIST*sizeof(uint64_t)); + if (perfErr) + ADSPRPC_WARN("failed to copy perf data err %d\n", perfErr); + } + + bail: + return err; +} + +static void inv_args(struct smq_invoke_ctx *ctx) +{ + int i, inbufs, outbufs; + uint32_t sc = ctx->sc; + remote_arg64_t *rpra = ctx->lrpra; + int err = 0; + + inbufs = REMOTE_SCALARS_INBUFS(sc); + outbufs = REMOTE_SCALARS_OUTBUFS(sc); + for (i = 0; i < inbufs + outbufs; ++i) { + int over = ctx->overps[i]->raix; + struct fastrpc_mmap *map = ctx->maps[over]; + + if ((over + 1 <= inbufs)) + continue; + if (!rpra[over].buf.len) + continue; + if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent) + continue; + if (map && (map->attr & FASTRPC_ATTR_FORCE_NOINVALIDATE)) + continue; + + if (buf_page_start(ptr_to_uint64((void *)rpra)) == + buf_page_start(rpra[over].buf.pv)) { + continue; + } + if (ctx->overps[i]->mstart || ctx->overps[i]->do_cmo == 1) { + if (map && map->buf) { + if (((buf_page_size(ctx->overps[i]->mend - + ctx->overps[i]->mstart)) == map->size) || + ctx->overps[i]->do_cmo) { + dma_buf_begin_cpu_access(map->buf, + DMA_TO_DEVICE); + dma_buf_end_cpu_access(map->buf, + DMA_FROM_DEVICE); + ADSPRPC_DEBUG( + "sc 0x%x pv 0x%llx, mend 0x%llx mstart 0x%llx, len %zu size %zu\n", + sc, rpra[over].buf.pv, + ctx->overps[i]->mend, + ctx->overps[i]->mstart, + rpra[over].buf.len, map->size); + } else { + uintptr_t offset; + uint64_t inv_len; + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_lock); + VERIFY(err, NULL != (vma = find_vma( + current->mm, + rpra[over].buf.pv))); + if (err) { + up_read(¤t->mm->mmap_lock); + goto bail; + } + if (ctx->overps[i]->do_cmo) { + offset = rpra[over].buf.pv - + vma->vm_start; + inv_len = rpra[over].buf.len; + } else { + offset = + ctx->overps[i]->mstart - + vma->vm_start; + inv_len = + ctx->overps[i]->mend - + ctx->overps[i]->mstart; + } + up_read(¤t->mm->mmap_lock); + dma_buf_begin_cpu_access_partial( + map->buf, DMA_TO_DEVICE, offset, + inv_len); + dma_buf_end_cpu_access_partial(map->buf, + DMA_FROM_DEVICE, offset, + inv_len); + ADSPRPC_DEBUG( + "sc 0x%x vm_start 0x%llx pv 0x%llx, offset 0x%llx, mend 0x%llx mstart 0x%llx, len %zu size %zu\n", + sc, vma->vm_start, + rpra[over].buf.pv, + offset, ctx->overps[i]->mend, + ctx->overps[i]->mstart, + rpra[over].buf.len, map->size); + } + } + } + } +bail: + return; +} + +static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, + uint32_t kernel, uint32_t handle) +{ + struct smq_msg *msg = &ctx->msg; + struct smq_msg msg_temp; + struct fastrpc_file *fl = ctx->fl; + struct fastrpc_channel_ctx *channel_ctx = NULL; + int err = 0, cid = -1; + uint32_t sc = ctx->sc; + int64_t ns = 0; + uint64_t xo_time_in_us = 0; + int isasync = (ctx->asyncjob.isasyncjob ? true : false); + + if (!fl) { + err = -EBADF; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + + channel_ctx = &fl->apps->channel[cid]; + mutex_lock(&channel_ctx->smd_mutex); + msg->pid = fl->tgid; + msg->tid = current->pid; + if (fl->sessionid) + msg->tid |= SESSION_ID_MASK; + if (kernel == KERNEL_MSG_WITH_ZERO_PID) + msg->pid = 0; + msg->invoke.header.ctx = ctx->ctxid | fl->pd; + msg->invoke.header.handle = handle; + msg->invoke.header.sc = sc; + msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0; + msg->invoke.page.size = buf_page_size(ctx->used); + + if (fl->ssrcount != channel_ctx->ssrcount) { + err = -ECONNRESET; + mutex_unlock(&channel_ctx->smd_mutex); + goto bail; + } + mutex_unlock(&channel_ctx->smd_mutex); + + xo_time_in_us = CONVERT_CNT_TO_US(__arch_counter_get_cntvct()); + if (isasync) { + /* + * After message is sent to DSP, async response thread could immediately + * get the response and free context, which will result in a use-after-free + * in this function. So use a local variable for message. + */ + memcpy(&msg_temp, msg, sizeof(struct smq_msg)); + msg = &msg_temp; + } + err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->trusted_vm); + trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx, + handle, sc, msg->invoke.page.addr, msg->invoke.page.size); + ns = get_timestamp_in_ns(); + fastrpc_update_txmsg_buf(channel_ctx, msg, err, ns, xo_time_in_us); + bail: + return err; +} + +/* + * fastrpc_get_dsp_status - Reads the property string from device node + * and updates the cdsp device avialbility status + * if the node belongs to cdsp device. + * @me : pointer to fastrpc_apps. + */ + +static void fastrpc_get_dsp_status(struct fastrpc_apps *me) +{ + int ret = -1; + struct device_node *node = NULL; + const char *name = NULL; + + do { + node = of_find_compatible_node(node, NULL, "qcom,pil-tz-generic"); + if (node) { + ret = of_property_read_string(node, "qcom,firmware-name", &name); + if (!strcmp(name, "cdsp")) { + ret = of_device_is_available(node); + me->remote_cdsp_status = ret; + ADSPRPC_INFO("cdsp node found with ret:%x\n", ret); + break; + } + } else { + ADSPRPC_ERR("cdsp node not found\n"); + break; + } + } while (1); +} + +static void fastrpc_init(struct fastrpc_apps *me) +{ + int i, jj; + + INIT_HLIST_HEAD(&me->drivers); + INIT_HLIST_HEAD(&me->maps); + spin_lock_init(&me->hlock); + me->channel = &gcinfo[0]; + mutex_init(&me->mut_uid); + for (i = 0; i < NUM_CHANNELS; i++) { + init_completion(&me->channel[i].work); + init_completion(&me->channel[i].workport); + me->channel[i].sesscount = 0; + /* All channels are secure by default except CDSP */ + me->channel[i].secure = SECURE_CHANNEL; + me->channel[i].unsigned_support = false; + mutex_init(&me->channel[i].smd_mutex); + fastrpc_transport_session_init(i, me->channel[i].subsys); + spin_lock_init(&me->channel[i].ctxlock); + spin_lock_init(&me->channel[i].gmsg_log.lock); + INIT_HLIST_HEAD(&me->channel[i].initmems); + for (jj = 0; jj < NUM_SESSIONS; jj++) + init_waitqueue_head(&me->channel[i].spd[jj].wait_for_pdup); + } + /* Set CDSP channel to non secure */ + me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL; + me->channel[CDSP_DOMAIN_ID].unsigned_support = true; +} + +static inline void fastrpc_pm_awake(struct fastrpc_file *fl, int channel_type) +{ + struct fastrpc_apps *me = &gfa; + struct wakeup_source *wake_source = NULL; + + if (!fl->wake_enable) + return; + /* + * Vote with PM to abort any suspend in progress and + * keep system awake for specified timeout + */ + if (channel_type == SECURE_CHANNEL) + wake_source = me->wake_source_secure; + else if (channel_type == NON_SECURE_CHANNEL) + wake_source = me->wake_source; + + if (wake_source) + pm_wakeup_ws_event(wake_source, fl->ws_timeout, true); +} + +static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx, + uint32_t kernel) +{ + int interrupted = 0; + + if (kernel) + wait_for_completion(&ctx->work); + else + interrupted = wait_for_completion_interruptible(&ctx->work); + + return interrupted; +} + +static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx, + int *ptr_interrupted, uint32_t kernel, uint32_t async, + bool *ptr_isworkdone) +{ + int interrupted = 0, err = 0; + int jj; + bool wait_resp; + uint32_t wTimeout = FASTRPC_USER_EARLY_HINT_TIMEOUT; + uint32_t wakeTime = 0; + unsigned long flags; + + if (!ctx) { + /* This failure is not expected */ + err = *ptr_interrupted = EFAULT; + *ptr_isworkdone = false; + ADSPRPC_ERR("ctx is NULL, cannot wait for response err %d\n", + err); + return; + } + wakeTime = ctx->early_wake_time; + + do { + switch (ctx->rsp_flags) { + /* try polling on completion with timeout */ + case USER_EARLY_SIGNAL: + /* try wait if completion time is less than timeout */ + /* disable preempt to avoid context switch latency */ + preempt_disable(); + jj = 0; + wait_resp = false; + for (; wakeTime < wTimeout && jj < wTimeout; jj++) { + wait_resp = try_wait_for_completion(&ctx->work); + if (wait_resp) + break; + udelay(1); + } + preempt_enable(); + if (async) { + spin_lock_irqsave(&ctx->fl->aqlock, flags); + if (!ctx->is_work_done) { + ctx->is_early_wakeup = false; + *ptr_isworkdone = false; + } else + *ptr_isworkdone = true; + spin_unlock_irqrestore(&ctx->fl->aqlock, flags); + goto bail; + } else if (!wait_resp) { + interrupted = fastrpc_wait_for_response(ctx, + kernel); + *ptr_interrupted = interrupted; + if (interrupted || ctx->is_work_done) + goto bail; + } + break; + + /* busy poll on memory for actual job done */ + case EARLY_RESPONSE: + trace_fastrpc_msg("early_response: poll_begin"); + err = poll_for_remote_response(ctx, FASTRPC_POLL_TIME); + + /* Mark job done if poll on memory successful */ + /* Wait for completion if poll on memory timoeut */ + if (!err) { + ctx->is_work_done = true; + *ptr_isworkdone = true; + goto bail; + } + trace_fastrpc_msg("early_response: poll_timeout"); + ADSPRPC_INFO("early rsp poll timeout (%u us) for handle 0x%x, sc 0x%x\n", + FASTRPC_POLL_TIME, ctx->handle, ctx->sc); + if (async) { + spin_lock_irqsave(&ctx->fl->aqlock, flags); + if (!ctx->is_work_done) { + ctx->is_early_wakeup = false; + *ptr_isworkdone = false; + } else + *ptr_isworkdone = true; + spin_unlock_irqrestore(&ctx->fl->aqlock, flags); + goto bail; + } else if (!ctx->is_work_done) { + interrupted = fastrpc_wait_for_response(ctx, + kernel); + *ptr_interrupted = interrupted; + if (interrupted || ctx->is_work_done) + goto bail; + } + break; + + case COMPLETE_SIGNAL: + case NORMAL_RESPONSE: + if (!async) { + interrupted = fastrpc_wait_for_response(ctx, + kernel); + *ptr_interrupted = interrupted; + if (interrupted || ctx->is_work_done) + goto bail; + } else { + spin_lock_irqsave(&ctx->fl->aqlock, flags); + if (!ctx->is_work_done) { + ctx->is_early_wakeup = false; + *ptr_isworkdone = false; + } else + *ptr_isworkdone = true; + spin_unlock_irqrestore(&ctx->fl->aqlock, flags); + goto bail; + } + break; + case POLL_MODE: + trace_fastrpc_msg("poll_mode: begin"); + err = poll_for_remote_response(ctx, ctx->fl->poll_timeout); + + /* If polling timed out, move to normal response state */ + if (err) { + trace_fastrpc_msg("poll_mode: timeout"); + ADSPRPC_INFO("poll mode timeout (%u us) for handle 0x%x, sc 0x%x\n", + ctx->fl->poll_timeout, ctx->handle, ctx->sc); + ctx->rsp_flags = NORMAL_RESPONSE; + } else { + *ptr_interrupted = 0; + *ptr_isworkdone = true; + } + break; + default: + *ptr_interrupted = EBADR; + *ptr_isworkdone = false; + ADSPRPC_ERR( + "unsupported response flags 0x%x for handle 0x%x, sc 0x%x\n", + ctx->rsp_flags, ctx->handle, ctx->sc); + goto bail; + } /* end of switch */ + } while (!ctx->is_work_done); +bail: + return; +} + +static void fastrpc_update_invoke_count(uint32_t handle, uint64_t *perf_counter, + struct timespec64 *invoket) +{ + /* update invoke count for dynamic handles */ + if (handle != FASTRPC_STATIC_HANDLE_LISTENER) { + uint64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE); + + if (count) + *count += getnstimediff(invoket); + } + if (handle > FASTRPC_STATIC_HANDLE_MAX) { + uint64_t *count = GET_COUNTER(perf_counter, PERF_COUNT); + + if (count) + *count += 1; + } +} + +int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, + uint32_t kernel, + struct fastrpc_ioctl_invoke_async *inv) +{ + struct smq_invoke_ctx *ctx = NULL; + struct fastrpc_ioctl_invoke *invoke = &inv->inv; + int err = 0, interrupted = 0, cid = -1, perfErr = 0; + struct timespec64 invoket = {0}; + uint64_t *perf_counter = NULL; + bool isasyncinvoke = false, isworkdone = false; + + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid) && + fl->sctx != NULL); + if (err) { + ADSPRPC_ERR("kernel session not initialized yet for %s\n", + current->comm); + err = -EBADR; + goto bail; + } + + if (fl->profile) + ktime_get_real_ts64(&invoket); + + if (!kernel) { + VERIFY(err, invoke->handle != + FASTRPC_STATIC_HANDLE_PROCESS_GROUP); + VERIFY(err, invoke->handle != + FASTRPC_STATIC_HANDLE_DSP_UTILITIES); + if (err) { + err = -EINVAL; + ADSPRPC_ERR( + "user application %s trying to send a kernel RPC message to channel %d, handle 0x%x\n", + cid, invoke->handle); + goto bail; + } + } + + if (!kernel) { + VERIFY(err, 0 == (err = context_restore_interrupted(fl, + inv, &ctx))); + if (err) + goto bail; + if (fl->sctx->smmu.faults) + err = -FASTRPC_ENOSUCH; + if (err) + goto bail; + if (ctx) { + trace_fastrpc_context_restore(cid, (uint64_t)ctx, + ctx->msg.invoke.header.ctx, + ctx->handle, ctx->sc); + goto wait; + } + } + + trace_fastrpc_msg("context_alloc: begin"); + VERIFY(err, 0 == (err = context_alloc(fl, kernel, inv, &ctx))); + trace_fastrpc_msg("context_alloc: end"); + if (err) + goto bail; + isasyncinvoke = (ctx->asyncjob.isasyncjob ? true : false); + if (fl->profile) + perf_counter = (uint64_t *)ctx->perf + PERF_COUNT; + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS), + VERIFY(err, 0 == (err = get_args(kernel, ctx))); + PERF_END); + trace_fastrpc_msg("get_args: end"); + if (err) + goto bail; + + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS), + inv_args(ctx); + PERF_END); + trace_fastrpc_msg("inv_args_1: end"); + + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK), + VERIFY(err, 0 == (err = fastrpc_invoke_send(ctx, + kernel, invoke->handle))); + PERF_END); + trace_fastrpc_msg("invoke_send: end"); + + if (err) + goto bail; + if (isasyncinvoke) + goto invoke_end; + wait: + /* Poll mode allowed only for non-static handle calls to dynamic CDSP process */ + if (fl->poll_mode && (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) + && (cid == CDSP_DOMAIN_ID) + && (fl->proc_flags == FASTRPC_INIT_CREATE)) + ctx->rsp_flags = POLL_MODE; + + fastrpc_wait_for_completion(ctx, &interrupted, kernel, 0, &isworkdone); + trace_fastrpc_msg("wait_for_completion: end"); + VERIFY(err, 0 == (err = interrupted)); + if (err) + goto bail; + + if (!ctx->is_work_done) { + err = -ETIMEDOUT; + ADSPRPC_ERR( + "WorkDone state is invalid for handle 0x%x, sc 0x%x\n", + invoke->handle, ctx->sc); + goto bail; + } + + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS), + inv_args(ctx); + PERF_END); + trace_fastrpc_msg("inv_args_2: end"); + + PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS), + VERIFY(err, 0 == (err = put_args(kernel, ctx, invoke->pra))); + PERF_END); + trace_fastrpc_msg("put_args: end"); + if (err) + goto bail; + + VERIFY(err, 0 == (err = ctx->retval)); + if (err) + goto bail; + bail: + if (ctx && interrupted == -ERESTARTSYS) { + trace_fastrpc_context_interrupt(cid, (uint64_t)ctx, + ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc); + context_save_interrupted(ctx); + } else if (ctx) { + if (fl->profile && !interrupted) + fastrpc_update_invoke_count(invoke->handle, + perf_counter, &invoket); + if (fl->profile && ctx->perf && ctx->handle > FASTRPC_STATIC_HANDLE_MAX) { + trace_fastrpc_perf_counters(ctx->handle, ctx->sc, + ctx->perf->count, ctx->perf->flush, ctx->perf->map, + ctx->perf->copy, ctx->perf->link, ctx->perf->getargs, + ctx->perf->putargs, ctx->perf->invargs, + ctx->perf->invoke, ctx->perf->tid); + if (ctx->perf_kernel) { + K_COPY_TO_USER(perfErr, kernel, ctx->perf_kernel, + ctx->perf, M_KERNEL_PERF_LIST*sizeof(uint64_t)); + if (perfErr) + ADSPRPC_WARN("failed to copy perf data err %d\n", perfErr); + } + } + context_free(ctx); + trace_fastrpc_msg("context_free: end"); + } + if (VALID_FASTRPC_CID(cid) + && (fl->ssrcount != fl->apps->channel[cid].ssrcount)) + err = -ECONNRESET; + +invoke_end: + if (fl->profile && !interrupted && isasyncinvoke) + fastrpc_update_invoke_count(invoke->handle, perf_counter, + &invoket); + return err; +} + +static int fastrpc_wait_on_async_queue( + struct fastrpc_ioctl_async_response *async_res, + struct fastrpc_file *fl) +{ + int err = 0, ierr = 0, interrupted = 0, perfErr = 0; + struct smq_invoke_ctx *ctx = NULL, *ictx = NULL, *n = NULL; + unsigned long flags; + uint64_t *perf_counter = NULL; + bool isworkdone = false; + +read_async_job: + interrupted = wait_event_interruptible(fl->async_wait_queue, + atomic_read(&fl->async_queue_job_count)); + if (!fl || fl->file_close >= FASTRPC_PROCESS_EXIT_START) { + err = -EBADF; + goto bail; + } + VERIFY(err, 0 == (err = interrupted)); + if (err) + goto bail; + + spin_lock_irqsave(&fl->aqlock, flags); + list_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) { + list_del_init(&ictx->asyncn); + atomic_sub(1, &fl->async_queue_job_count); + ctx = ictx; + break; + } + spin_unlock_irqrestore(&fl->aqlock, flags); + + if (ctx) { + if (fl->profile) + perf_counter = (uint64_t *)ctx->perf + PERF_COUNT; + fastrpc_wait_for_completion(ctx, &interrupted, 0, 1, + &isworkdone); + if (!isworkdone) {//In valid workdone state + ADSPRPC_DEBUG( + "Async early wake response did not reach on time for thread %d handle 0x%x, sc 0x%x\n", + ctx->pid, ctx->handle, ctx->sc); + goto read_async_job; + } + async_res->jobid = ctx->asyncjob.jobid; + async_res->result = ctx->retval; + async_res->handle = ctx->handle; + async_res->sc = ctx->sc; + async_res->perf_dsp = (uint64_t *)ctx->perf_dsp; + async_res->perf_kernel = (uint64_t *)ctx->perf_kernel; + + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS), + inv_args(ctx); + PERF_END); + if (ctx->retval != 0) + goto bail; + PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS), + VERIFY(ierr, 0 == (ierr = put_args(0, ctx, NULL))); + PERF_END); + if (ierr) + goto bail; + } else { // Go back to wait if ctx is invalid + ADSPRPC_ERR("Invalid async job wake up\n"); + goto read_async_job; + } +bail: + if (ierr) + async_res->result = ierr; + if (ctx) { + if (fl->profile && ctx->perf && ctx->handle > FASTRPC_STATIC_HANDLE_MAX) { + trace_fastrpc_perf_counters(ctx->handle, ctx->sc, + ctx->perf->count, ctx->perf->flush, ctx->perf->map, + ctx->perf->copy, ctx->perf->link, ctx->perf->getargs, + ctx->perf->putargs, ctx->perf->invargs, + ctx->perf->invoke, ctx->perf->tid); + if (ctx->perf_kernel) { + K_COPY_TO_USER(perfErr, 0, ctx->perf_kernel, + ctx->perf, M_KERNEL_PERF_LIST*sizeof(uint64_t)); + if (perfErr) + ADSPRPC_WARN("failed to copy perf data err %d\n", perfErr); + } + } + context_free(ctx); + } + return err; +} + +static int fastrpc_wait_on_notif_queue( + struct fastrpc_ioctl_notif_rsp *notif_rsp, + struct fastrpc_file *fl) +{ + int err = 0, interrupted = 0; + unsigned long flags; + struct smq_notif_rsp *notif = NULL, *inotif = NULL, *n = NULL; + +read_notif_status: + interrupted = wait_event_interruptible(fl->proc_state_notif.notif_wait_queue, + atomic_read(&fl->proc_state_notif.notif_queue_count)); + if (!fl || fl->file_close >= FASTRPC_PROCESS_EXIT_START) { + err = -EBADF; + goto bail; + } + VERIFY(err, 0 == (err = interrupted)); + if (err) + goto bail; + + spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); + list_for_each_entry_safe(inotif, n, &fl->clst.notif_queue, notifn) { + list_del_init(&inotif->notifn); + atomic_sub(1, &fl->proc_state_notif.notif_queue_count); + notif = inotif; + break; + } + spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags); + + if (notif) { + notif_rsp->status = notif->status; + notif_rsp->domain = notif->domain; + notif_rsp->session = notif->session; + } else {// Go back to wait if ctx is invalid + ADSPRPC_ERR("Invalid status notification response\n"); + goto read_notif_status; + } +bail: + kfree(notif); + return err; +} + +static int fastrpc_get_async_response( + struct fastrpc_ioctl_async_response *async_res, + void *param, struct fastrpc_file *fl) +{ + int err = 0; + + err = fastrpc_wait_on_async_queue(async_res, fl); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, async_res, + sizeof(struct fastrpc_ioctl_async_response)); +bail: + return err; +} + +static int fastrpc_get_notif_response( + struct fastrpc_ioctl_notif_rsp *notif, + void *param, struct fastrpc_file *fl) +{ + int err = 0; + + err = fastrpc_wait_on_notif_queue(notif, fl); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, notif, + sizeof(struct fastrpc_ioctl_notif_rsp)); +bail: + return err; +} + +static int fastrpc_create_persistent_headers(struct fastrpc_file *fl, + uint32_t user_concurrency) +{ + int err = 0, i = 0; + uint64_t virtb = 0; + struct fastrpc_buf *pers_hdr_buf = NULL, *hdr_bufs = NULL, *buf = NULL; + unsigned int num_pers_hdrs = 0; + size_t hdr_buf_alloc_len = 0; + + if (fl->pers_hdr_buf || !user_concurrency) + goto bail; + + /* + * Pre-allocate memory for persistent header buffers based + * on concurrency info passed by user. Upper limit enforced. + */ + num_pers_hdrs = (user_concurrency > MAX_PERSISTENT_HEADERS) ? + MAX_PERSISTENT_HEADERS : user_concurrency; + hdr_buf_alloc_len = num_pers_hdrs*PAGE_SIZE; + err = fastrpc_buf_alloc(fl, hdr_buf_alloc_len, 0, 0, + METADATA_BUF, &pers_hdr_buf); + if (err) + goto bail; + virtb = ptr_to_uint64(pers_hdr_buf->virt); + + /* Map entire buffer on remote subsystem in single RPC call */ + err = fastrpc_mem_map_to_dsp(fl, -1, 0, ADSP_MMAP_PERSIST_HDR, 0, + pers_hdr_buf->phys, pers_hdr_buf->size, + &pers_hdr_buf->raddr); + if (err) + goto bail; + + /* Divide and store as N chunks, each of 1 page size */ + hdr_bufs = kcalloc(num_pers_hdrs, sizeof(struct fastrpc_buf), + GFP_KERNEL); + if (!hdr_bufs) { + err = -ENOMEM; + goto bail; + } + spin_lock(&fl->hlock); + fl->pers_hdr_buf = pers_hdr_buf; + fl->num_pers_hdrs = num_pers_hdrs; + fl->hdr_bufs = hdr_bufs; + for (i = 0; i < num_pers_hdrs; i++) { + buf = &fl->hdr_bufs[i]; + buf->fl = fl; + buf->virt = uint64_to_ptr(virtb + (i*PAGE_SIZE)); + buf->phys = pers_hdr_buf->phys + (i*PAGE_SIZE); + buf->size = PAGE_SIZE; + buf->dma_attr = pers_hdr_buf->dma_attr; + buf->flags = pers_hdr_buf->flags; + buf->type = pers_hdr_buf->type; + buf->in_use = false; + } + spin_unlock(&fl->hlock); +bail: + if (err) { + ADSPRPC_ERR( + "failed to map len %zu, flags %d, user concurrency %u, num headers %u with err %d\n", + hdr_buf_alloc_len, ADSP_MMAP_PERSIST_HDR, + user_concurrency, num_pers_hdrs, err); + fl->pers_hdr_buf = NULL; + fl->hdr_bufs = NULL; + fl->num_pers_hdrs = 0; + if (!IS_ERR_OR_NULL(pers_hdr_buf)) + fastrpc_buf_free(pers_hdr_buf, 0); + if (!IS_ERR_OR_NULL(hdr_bufs)) + kfree(hdr_bufs); + } + return err; +} + +int fastrpc_internal_invoke2(struct fastrpc_file *fl, + struct fastrpc_ioctl_invoke2 *inv2) +{ + union { + struct fastrpc_ioctl_invoke_async inv; + struct fastrpc_ioctl_invoke_async_no_perf inv3; + struct fastrpc_ioctl_async_response async_res; + uint32_t user_concurrency; + struct fastrpc_ioctl_notif_rsp notif; + } p; + struct fastrpc_dsp_capabilities *dsp_cap_ptr = NULL; + uint32_t size = 0; + int err = 0, domain = fl->cid; + + if (inv2->req == FASTRPC_INVOKE2_ASYNC || + inv2->req == FASTRPC_INVOKE2_ASYNC_RESPONSE) { + VERIFY(err, domain == CDSP_DOMAIN_ID && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + dsp_cap_ptr = &gcinfo[domain].dsp_cap_kernel; + VERIFY(err, + dsp_cap_ptr->dsp_attributes[ASYNC_FASTRPC_CAP] == 1); + if (err) { + err = -EPROTONOSUPPORT; + goto bail; + } + } + switch (inv2->req) { + case FASTRPC_INVOKE2_ASYNC: + size = sizeof(struct fastrpc_ioctl_invoke_async); + VERIFY(err, size >= inv2->size); + if (err) { + err = -EBADE; + goto bail; + } + if (size > inv2->size) { + K_COPY_FROM_USER(err, fl->is_compat, &p.inv3, (void *)inv2->invparam, + sizeof(struct fastrpc_ioctl_invoke_async_no_perf)); + if (err) + goto bail; + memcpy(&p.inv, &p.inv3, sizeof(struct fastrpc_ioctl_invoke_crc)); + memcpy(&p.inv.job, &p.inv3.job, sizeof(p.inv.job)); + } else { + K_COPY_FROM_USER(err, fl->is_compat, &p.inv, (void *)inv2->invparam, size); + if (err) + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode, + USER_MSG, &p.inv))); + if (err) + goto bail; + break; + case FASTRPC_INVOKE2_ASYNC_RESPONSE: + VERIFY(err, + sizeof(struct fastrpc_ioctl_async_response) >= inv2->size); + if (err) { + err = -EBADE; + goto bail; + } + err = fastrpc_get_async_response(&p.async_res, + (void *)inv2->invparam, fl); + break; + case FASTRPC_INVOKE2_KERNEL_OPTIMIZATIONS: + size = sizeof(uint32_t); + if (inv2->size != size) { + err = -EBADE; + goto bail; + } + K_COPY_FROM_USER(err, 0, &p.user_concurrency, + (void *)inv2->invparam, size); + if (err) + goto bail; + err = fastrpc_create_persistent_headers(fl, + p.user_concurrency); + break; + case FASTRPC_INVOKE2_STATUS_NOTIF: + VERIFY(err, + sizeof(struct fastrpc_ioctl_notif_rsp) >= inv2->size); + if (err) { + err = -EBADE; + goto bail; + } + err = fastrpc_get_notif_response(&p.notif, + (void *)inv2->invparam, fl); + break; + default: + err = -ENOTTY; + break; + } +bail: + return err; +} + +static int fastrpc_get_spd_session(char *name, int *session, int *cid) +{ + struct fastrpc_apps *me = &gfa; + int err = 0, i, j, match = 0; + + for (i = 0; i < NUM_CHANNELS; i++) { + for (j = 0; j < NUM_SESSIONS; j++) { + if (!me->channel[i].spd[j].servloc_name) + continue; + if (!strcmp(name, me->channel[i].spd[j].servloc_name)) { + match = 1; + break; + } + } + if (match) + break; + } + VERIFY(err, i < NUM_CHANNELS && j < NUM_SESSIONS); + if (err) { + err = -EUSERS; + goto bail; + } + *cid = i; + *session = j; +bail: + return err; +} + +static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl); +static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags); +static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked); + +/* + * This function makes a call to create a thread group in the root + * process or static process on the remote subsystem. + * Examples: + * - guestOS daemons on all DSPs + * - sensors daemon on sensorsPD on SLPI/ADSP + */ +static int fastrpc_init_attach_process(struct fastrpc_file *fl, + struct fastrpc_ioctl_init *init) +{ + int err = 0, tgid = fl->tgid; + remote_arg_t ra[1]; + struct fastrpc_ioctl_invoke_async ioctl; + + if (fl->dev_minor == MINOR_NUM_DEV) { + err = -ECONNREFUSED; + ADSPRPC_ERR( + "untrusted app trying to attach to privileged DSP PD\n"); + return err; + } + /* + * Prepare remote arguments for creating thread group + * in guestOS/staticPD on the remote subsystem. + */ + ra[0].buf.pv = (void *)&tgid; + ra[0].buf.len = sizeof(tgid); + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + + if (init->flags == FASTRPC_INIT_ATTACH) + fl->pd = 0; + else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) { + if (fl->cid == ADSP_DOMAIN_ID) + fl->servloc_name = + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME; + else if (fl->cid == SDSP_DOMAIN_ID) + fl->servloc_name = + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME; + /* Setting to 2 will route the message to sensorsPD */ + fl->pd = 2; + } + + err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); + if (err) + goto bail; +bail: + return err; +} + +/* + * This function makes a call to spawn a dynamic process + * on the remote subsystem. + * Example: all compute offloads to CDSP + */ +static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, + struct fastrpc_ioctl_init_attrs *uproc) +{ + int err = 0, memlen = 0, mflags = 0, locked = 0; + struct fastrpc_ioctl_invoke_async ioctl; + struct fastrpc_ioctl_init *init = &uproc->init; + struct smq_phy_page pages[1]; + struct fastrpc_mmap *file = NULL; + struct fastrpc_buf *imem = NULL; + unsigned long imem_dma_attr = 0; + remote_arg_t ra[6]; + int fds[6]; + unsigned int gid = 0, one_mb = 1024*1024; + unsigned int dsp_userpd_memlen = 3 * one_mb; + struct fastrpc_buf *init_mem; + + struct { + int pgid; + unsigned int namelen; + unsigned int filelen; + unsigned int pageslen; + int attrs; + int siglen; + } inbuf; + + spin_lock(&fl->hlock); + if (fl->in_process_create) { + err = -EALREADY; + ADSPRPC_ERR("Already in create dynamic process\n"); + spin_unlock(&fl->hlock); + return err; + } + fl->in_process_create = true; + spin_unlock(&fl->hlock); + + inbuf.pgid = fl->tgid; + inbuf.namelen = strlen(current->comm) + 1; + inbuf.filelen = init->filelen; + fl->pd = 1; + + if (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE) + fl->is_unsigned_pd = true; + + /* Check if file memory passed by userspace is valid */ + VERIFY(err, access_ok((void __user *)init->file, init->filelen)); + if (err) + goto bail; + if (init->filelen) { + /* Map the shell file buffer to remote subsystem */ + mutex_lock(&fl->map_mutex); + err = fastrpc_mmap_create(fl, init->filefd, NULL, 0, + init->file, init->filelen, mflags, &file); + if (file) + file->is_filemap = true; + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + } + inbuf.pageslen = 1; + + /* Untrusted apps are not allowed to offload to signedPD on DSP. */ + if (fl->untrusted_process) { + VERIFY(err, fl->is_unsigned_pd); + if (err) { + err = -ECONNREFUSED; + ADSPRPC_ERR( + "untrusted app trying to offload to signed remote process\n"); + goto bail; + } + } + + /* Disregard any privilege bits from userspace */ + uproc->attrs &= (~FASTRPC_MODE_PRIVILEGED); + + /* + * Check if the primary or supplementary group(s) of the process is + * one of the 'privileged' fastrpc GIDs stored in the device-tree. + */ + gid = sorted_lists_intersection(fl->gidlist.gids, + fl->gidlist.gidcount, gfa.gidlist.gids, gfa.gidlist.gidcount); + if (gid) { + ADSPRPC_INFO("PID %d, GID %u is a privileged process\n", + fl->tgid, gid); + uproc->attrs |= FASTRPC_MODE_PRIVILEGED; + } + + /* + * Userspace client should try to allocate the initial memory donated + * to remote subsystem as only the kernel and DSP should have access + * to that memory. + */ + VERIFY(err, !init->mem); + if (err) { + err = -EINVAL; + ADSPRPC_ERR("donated memory allocated in userspace\n"); + goto bail; + } + /* Free any previous donated memory */ + spin_lock(&fl->hlock); + locked = 1; + if (fl->init_mem) { + init_mem = fl->init_mem; + fl->init_mem = NULL; + spin_unlock(&fl->hlock); + locked = 0; + fastrpc_buf_free(init_mem, 0); + } + if (locked) { + spin_unlock(&fl->hlock); + locked = 0; + } + + /* Allocate DMA buffer in kernel for donating to remote process + * Unsigned PD requires additional memory because of the + * additional static heap initialized within the process. + */ + if (fl->is_unsigned_pd) + dsp_userpd_memlen += 2*one_mb; + memlen = ALIGN(max(dsp_userpd_memlen, init->filelen * 4), one_mb); + imem_dma_attr = DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_NO_KERNEL_MAPPING; + err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, + INITMEM_BUF, &imem); + if (err) + goto bail; + fl->init_mem = imem; + + /* + * Prepare remote arguments for dynamic process create + * call to remote subsystem. + */ + inbuf.pageslen = 1; + ra[0].buf.pv = (void *)&inbuf; + ra[0].buf.len = sizeof(inbuf); + fds[0] = -1; + + ra[1].buf.pv = (void *)current->comm; + ra[1].buf.len = inbuf.namelen; + fds[1] = -1; + + ra[2].buf.pv = (void *)init->file; + ra[2].buf.len = inbuf.filelen; + fds[2] = init->filefd; + + pages[0].addr = imem->phys; + pages[0].size = imem->size; + ra[3].buf.pv = (void *)pages; + ra[3].buf.len = 1 * sizeof(*pages); + fds[3] = -1; + + inbuf.attrs = uproc->attrs; + ra[4].buf.pv = (void *)&(inbuf.attrs); + ra[4].buf.len = sizeof(inbuf.attrs); + fds[4] = -1; + + inbuf.siglen = uproc->siglen; + ra[5].buf.pv = (void *)&(inbuf.siglen); + ra[5].buf.len = sizeof(inbuf.siglen); + fds[5] = -1; + + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + /* + * Choose appropriate remote method ID depending on whether the + * HLOS process has any attributes enabled (like unsignedPD, + * critical process, adaptive QoS, CRC checks etc). + */ + ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0); + if (uproc->attrs) + ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 4, 0); + ioctl.inv.pra = ra; + ioctl.fds = fds; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); + if (err) + goto bail; +bail: + /* + * Shell is loaded into the donated memory on remote subsystem. So, the + * original file buffer can be DMA unmapped. In case of a failure also, + * the mapping needs to be removed. + */ + if (file) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(file, 0); + mutex_unlock(&fl->map_mutex); + } + if (err) { + spin_lock(&fl->hlock); + locked = 1; + if (!IS_ERR_OR_NULL(fl->init_mem)) { + init_mem = fl->init_mem; + fl->init_mem = NULL; + spin_unlock(&fl->hlock); + locked = 0; + fastrpc_buf_free(init_mem, 0); + } + if (locked) { + spin_unlock(&fl->hlock); + locked = 0; + } + } + spin_lock(&fl->hlock); + fl->in_process_create = false; + spin_unlock(&fl->hlock); + return err; +} + +/* + * This function makes a call to create a thread group in the static + * process on the remote subsystem. + * Example: audio daemon 'adsprpcd' on audioPD on ADSP + */ +static int fastrpc_init_create_static_process(struct fastrpc_file *fl, + struct fastrpc_ioctl_init *init) +{ + int err = 0, rh_hyp_done = 0; + struct fastrpc_apps *me = &gfa; + struct fastrpc_ioctl_invoke_async ioctl; + struct smq_phy_page pages[1]; + struct fastrpc_mmap *mem = NULL; + char *proc_name = NULL; + remote_arg_t ra[3]; + uint64_t phys = 0; + size_t size = 0; + int fds[3]; + struct secure_vm *rhvm = &me->channel[fl->cid].rhvm; + struct { + int pgid; + unsigned int namelen; + unsigned int pageslen; + } inbuf; + + if (fl->dev_minor == MINOR_NUM_DEV) { + err = -ECONNREFUSED; + ADSPRPC_ERR( + "untrusted app trying to attach to audio PD\n"); + return err; + } + + if (!init->filelen) + goto bail; + + proc_name = kzalloc(init->filelen + 1, GFP_KERNEL); + VERIFY(err, !IS_ERR_OR_NULL(proc_name)); + if (err) { + err = -ENOMEM; + goto bail; + } + err = copy_from_user((void *)proc_name, + (void __user *)init->file, init->filelen); + if (err) { + err = -EFAULT; + goto bail; + } + + fl->pd = 1; + inbuf.pgid = fl->tgid; + inbuf.namelen = init->filelen; + inbuf.pageslen = 0; + + if (!strcmp(proc_name, "audiopd")) { + fl->servloc_name = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME; + /* + * Remove any previous mappings in case process is trying + * to reconnect after a PD restart on remote subsystem. + */ + err = fastrpc_mmap_remove_pdr(fl); + if (err) + goto bail; + } else if (!strcmp(proc_name, "securepd")) { + fl->trusted_vm = true; + } else { + ADSPRPC_ERR( + "Create static process is failed for proc_name %s", + proc_name); + goto bail; + } + + if (!fl->trusted_vm && (!me->staticpd_flags && !me->legacy_remote_heap)) { + inbuf.pageslen = 1; + mutex_lock(&fl->map_mutex); + err = fastrpc_mmap_create(fl, -1, NULL, 0, init->mem, + init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + phys = mem->phys; + size = mem->size; + /* + * If remote-heap VMIDs are defined in DTSI, then do + * hyp_assign from HLOS to those VMs (LPASS, ADSP). + */ + if (rhvm->vmid && mem->refs == 1 && size) { + err = hyp_assign_phys(phys, (uint64_t)size, + hlosvm, 1, + rhvm->vmid, rhvm->vmperm, rhvm->vmcount); + if (err) { + ADSPRPC_ERR( + "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + goto bail; + } + rh_hyp_done = 1; + } + me->staticpd_flags = 1; + } + + /* + * Prepare remote arguments for static process create + * call to remote subsystem. + */ + ra[0].buf.pv = (void *)&inbuf; + ra[0].buf.len = sizeof(inbuf); + fds[0] = -1; + + ra[1].buf.pv = (void *)proc_name; + ra[1].buf.len = inbuf.namelen; + fds[1] = -1; + + pages[0].addr = phys; + pages[0].size = size; + + ra[2].buf.pv = (void *)pages; + ra[2].buf.len = sizeof(*pages); + fds[2] = -1; + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + + ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); + if (err) + goto bail; +bail: + kfree(proc_name); + if (err) { + me->staticpd_flags = 0; + if (rh_hyp_done) { + int hyp_err = 0; + + /* Assign memory back to HLOS in case of errors */ + hyp_err = hyp_assign_phys(phys, (uint64_t)size, + rhvm->vmid, rhvm->vmcount, + hlosvm, hlosvmperm, 1); + if (hyp_err) + ADSPRPC_WARN( + "rh hyp unassign failed with %d for phys 0x%llx of size %zu\n", + hyp_err, phys, size); + } + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(mem, 0); + mutex_unlock(&fl->map_mutex); + } + return err; +} + +int fastrpc_init_process(struct fastrpc_file *fl, + struct fastrpc_ioctl_init_attrs *uproc) +{ + int err = 0; + struct fastrpc_ioctl_init *init = &uproc->init; + int cid = fl->cid; + struct fastrpc_apps *me = &gfa; + struct fastrpc_channel_ctx *chan = NULL; + + VERIFY(err, init->filelen < INIT_FILELEN_MAX + && init->memlen < INIT_MEMLEN_MAX); + if (err) { + ADSPRPC_ERR( + "file size 0x%x or init memory 0x%x is more than max allowed file size 0x%x or init len 0x%x\n", + init->filelen, init->memlen, + INIT_FILELEN_MAX, INIT_MEMLEN_MAX); + err = -EFBIG; + goto bail; + } + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + chan = &me->channel[cid]; + if (chan->unsigned_support && fl->dev_minor == MINOR_NUM_DEV) { + /* Make sure third party applications */ + /* can spawn only unsigned PD when */ + /* channel configured as secure. */ + if (chan->secure && !(fl->is_unsigned_pd)) { + err = -ECONNREFUSED; + goto bail; + } + } + + err = fastrpc_channel_open(fl, init->flags); + if (err) + goto bail; + + fl->proc_flags = init->flags; + switch (init->flags) { + case FASTRPC_INIT_ATTACH: + case FASTRPC_INIT_ATTACH_SENSORS: + err = fastrpc_init_attach_process(fl, init); + break; + case FASTRPC_INIT_CREATE: + err = fastrpc_init_create_dynamic_process(fl, uproc); + break; + case FASTRPC_INIT_CREATE_STATIC: + err = fastrpc_init_create_static_process(fl, init); + break; + default: + err = -ENOTTY; + break; + } + if (err) + goto bail; + fl->dsp_proc_init = 1; + VERIFY(err, 0 == (err = fastrpc_device_create(fl))); + if (err) + goto bail; +bail: + return err; +} + +static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl) +{ + int err = 0; + uint64_t cpuinfo = 0; + struct fastrpc_apps *me = &gfa; + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[2]; + int cid = -1; + + if (!fl) { + err = -EBADF; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + ADSPRPC_ERR( + "invalid channel 0x%zx set for session\n", + cid); + goto bail; + } + + cpuinfo = me->channel[cid].cpuinfo_todsp; + /* return success if already updated to remote processor */ + if (me->channel[cid].cpuinfo_status) + return 0; + + ra[0].buf.pv = (void *)&cpuinfo; + ra[0].buf.len = sizeof(cpuinfo); + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_DSP_UTILITIES; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + + err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); + if (!err) + me->channel[cid].cpuinfo_status = true; +bail: + return err; +} + +int fastrpc_get_info_from_dsp(struct fastrpc_file *fl, + uint32_t *dsp_attr_buf, + uint32_t dsp_attr_buf_len, + uint32_t domain) +{ + int err = 0; + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[2]; + + dsp_attr_buf[0] = 0; // Capability filled in userspace + + // Fastrpc to modem not supported + if (domain == MDSP_DOMAIN_ID) + goto bail; + + err = fastrpc_channel_open(fl, FASTRPC_INIT_NO_CREATE); + if (err) + goto bail; + + ra[0].buf.pv = (void *)&dsp_attr_buf_len; + ra[0].buf.len = sizeof(dsp_attr_buf_len); + ra[1].buf.pv = (void *)(&dsp_attr_buf[1]); + ra[1].buf.len = dsp_attr_buf_len * sizeof(uint32_t); + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_DSP_UTILITIES; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 1); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + + err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); +bail: + + if (err) + ADSPRPC_ERR("could not obtain dsp information, err val %d\n", + err); + return err; +} + +int fastrpc_get_info_from_kernel( + struct fastrpc_ioctl_capability *cap, + struct fastrpc_file *fl) +{ + int err = 0; + uint32_t domain = cap->domain, attribute_ID = cap->attribute_ID; + uint32_t async_capability = 0; + struct fastrpc_dsp_capabilities *dsp_cap_ptr = NULL; + + VERIFY(err, domain < NUM_CHANNELS); + if (err) { + err = -ECHRNG; + goto bail; + } + + /* + * Check if number of attribute IDs obtained from userspace + * is less than the number of attribute IDs supported by + * kernel + */ + if (attribute_ID >= FASTRPC_MAX_ATTRIBUTES) { + err = -EOVERFLOW; + goto bail; + } + + dsp_cap_ptr = &gcinfo[domain].dsp_cap_kernel; + + if (attribute_ID >= FASTRPC_MAX_DSP_ATTRIBUTES) { + // Driver capability, pass it to user + memcpy(&cap->capability, + &kernel_capabilities[attribute_ID - + FASTRPC_MAX_DSP_ATTRIBUTES], + sizeof(cap->capability)); + } else if (!dsp_cap_ptr->is_cached) { + /* + * Information not on kernel, query device for information + * and cache on kernel + */ + err = fastrpc_get_info_from_dsp(fl, + dsp_cap_ptr->dsp_attributes, + FASTRPC_MAX_DSP_ATTRIBUTES - 1, + domain); + if (err) + goto bail; + + /* Async capability support depends on both kernel and DSP */ + async_capability = IS_ASYNC_FASTRPC_AVAILABLE && + dsp_cap_ptr->dsp_attributes[ASYNC_FASTRPC_CAP]; + dsp_cap_ptr->dsp_attributes[ASYNC_FASTRPC_CAP] + = async_capability; + memcpy(&cap->capability, + &dsp_cap_ptr->dsp_attributes[attribute_ID], + sizeof(cap->capability)); + + dsp_cap_ptr->is_cached = 1; + } else { + // Information on Kernel, pass it to user + memcpy(&cap->capability, + &dsp_cap_ptr->dsp_attributes[attribute_ID], + sizeof(cap->capability)); + } +bail: + return err; +} + +static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) +{ + int err = 0; + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[1]; + int tgid = 0; + int cid = -1; + unsigned long irq_flags = 0; + + if (!fl) { + err = -EBADF; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + VERIFY(err, fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + err = verify_transport_device(cid, fl->trusted_vm); + if (err) + goto bail; + + VERIFY(err, fl->apps->channel[cid].issubsystemup == 1); + if (err) { + err = -ECONNRESET; + goto bail; + } + tgid = fl->tgid; + ra[0].buf.pv = (void *)&tgid; + ra[0].buf.len = sizeof(tgid); + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + fl->file_close = FASTRPC_PROCESS_DSP_EXIT_INIT; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + /* + * Pass 2 for "kernel" arg to send kernel msg to DSP + * with non-zero msg PID for the DSP to directly use + * that info to kill the remote process. + */ + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_NONZERO_PID, &ioctl))); + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + fl->file_close = FASTRPC_PROCESS_DSP_EXIT_COMPLETE; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + if (err && fl->dsp_proc_init) + ADSPRPC_ERR( + "releasing DSP process failed with %d (0x%x) for %s\n", + err, err, current->comm); +bail: + if (err && fl && fl->apps) { + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + fl->file_close = FASTRPC_PROCESS_DSP_EXIT_ERROR; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + } + return err; +} + +static int fastrpc_mem_map_to_dsp(struct fastrpc_file *fl, int fd, int offset, + uint32_t flags, uintptr_t va, uint64_t phys, + size_t size, uintptr_t *raddr) +{ + struct fastrpc_ioctl_invoke_async ioctl; + struct smq_phy_page page; + remote_arg_t ra[4]; + int err = 0; + struct { + int pid; + int fd; + int offset; + uint32_t flags; + uint64_t vaddrin; + int num; + int data_len; + } inargs; + struct { + uint64_t vaddrout; + } routargs; + + inargs.pid = fl->tgid; + inargs.fd = fd; + inargs.offset = offset; + inargs.vaddrin = (uintptr_t)va; + inargs.flags = flags; + inargs.num = sizeof(page); + inargs.data_len = 0; + ra[0].buf.pv = (void *)&inargs; + ra[0].buf.len = sizeof(inargs); + page.addr = phys; + page.size = size; + ra[1].buf.pv = (void *)&page; + ra[1].buf.len = sizeof(page); + ra[2].buf.pv = (void *)&page; + ra[2].buf.len = 0; + ra[3].buf.pv = (void *)&routargs; + ra[3].buf.len = sizeof(routargs); + + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(10, 3, 1); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); + if (err) + goto bail; + if (raddr) + *raddr = (uintptr_t)routargs.vaddrout; +bail: + return err; +} + +static int fastrpc_mem_unmap_to_dsp(struct fastrpc_file *fl, int fd, + uint32_t flags, uintptr_t va, + uint64_t phys, size_t size) +{ + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[1]; + int err = 0; + struct { + int pid; + int fd; + uint64_t vaddrin; + uint64_t len; + } inargs; + + inargs.pid = fl->tgid; + inargs.fd = fd; + inargs.vaddrin = (uint64_t)va; + inargs.len = (uint64_t)size; + ra[0].buf.pv = (void *)&inargs; + ra[0].buf.len = sizeof(inargs); + + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(11, 1, 0); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); + if (err) + goto bail; +bail: + return err; +} + +static int fastrpc_unmap_on_dsp(struct fastrpc_file *fl, + uintptr_t raddr, uint64_t phys, size_t size, uint32_t flags) +{ + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[1] = {}; + int err = 0; + struct { + int pid; + uintptr_t vaddrout; + size_t size; + } inargs; + + inargs.pid = fl->tgid; + inargs.size = size; + inargs.vaddrout = raddr; + ra[0].buf.pv = (void *)&inargs; + ra[0].buf.len = sizeof(inargs); + + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + if (fl->apps->compat) + ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0); + else + ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); + if (err) + goto bail; +bail: + return err; +} + +static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, + uintptr_t va, uint64_t phys, + size_t size, int refs, uintptr_t *raddr) +{ + struct fastrpc_ioctl_invoke_async ioctl; + struct fastrpc_apps *me = &gfa; + struct smq_phy_page page; + int num = 1; + remote_arg_t ra[3]; + int err = 0; + struct { + int pid; + uint32_t flags; + uintptr_t vaddrin; + int num; + } inargs; + struct { + uintptr_t vaddrout; + } routargs; + int cid = -1; + + if (!fl) { + err = -EBADF; + goto bail; + } + cid = fl->cid; + inargs.pid = fl->tgid; + inargs.vaddrin = (uintptr_t)va; + inargs.flags = flags; + inargs.num = fl->apps->compat ? num * sizeof(page) : num; + ra[0].buf.pv = (void *)&inargs; + ra[0].buf.len = sizeof(inargs); + page.addr = phys; + page.size = size; + ra[1].buf.pv = (void *)&page; + ra[1].buf.len = num * sizeof(page); + + ra[2].buf.pv = (void *)&routargs; + ra[2].buf.len = sizeof(routargs); + + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + if (fl->apps->compat) + ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1); + else + ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); + *raddr = (uintptr_t)routargs.vaddrout; + if (err) + goto bail; + if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + ADSPRPC_ERR( + "invalid channel 0x%zx set for session\n", + cid); + goto bail; + } + } + if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR + && me->channel[cid].rhvm.vmid && refs == 1) { + err = hyp_assign_phys(phys, (uint64_t)size, + hlosvm, 1, me->channel[cid].rhvm.vmid, + me->channel[cid].rhvm.vmperm, + me->channel[cid].rhvm.vmcount); + if (err) { + ADSPRPC_ERR( + "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + err = fastrpc_unmap_on_dsp(fl, + *raddr, phys, size, flags); + if (err) { + ADSPRPC_ERR( + "failed to unmap %d for phys 0x%llx, size %zd\n", + err, phys, size); + } + goto bail; + } + } +bail: + return err; +} + +static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, + size_t size, uint32_t flags, int locked) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + int tgid = 0; + int destVM[1] = {VMID_HLOS}; + int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + int cid = -1; + + if (!fl) { + err = -EBADF; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + ADSPRPC_ERR( + "invalid channel 0x%zx set for session\n", + cid); + goto bail; + } + if (flags == ADSP_MMAP_HEAP_ADDR) { + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[2]; + int err = 0; + struct { + uint8_t skey; + } routargs; + + tgid = fl->tgid; + ra[0].buf.pv = (void *)&tgid; + ra[0].buf.len = sizeof(tgid); + + ra[1].buf.pv = (void *)&routargs; + ra[1].buf.len = sizeof(routargs); + + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + + if (locked) { + mutex_unlock(&fl->map_mutex); + mutex_unlock(&me->channel[cid].smd_mutex); + } + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); + if (locked) { + mutex_lock(&me->channel[cid].smd_mutex); + mutex_lock(&fl->map_mutex); + } + if (err) + goto bail; + } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + if (me->channel[cid].rhvm.vmid) { + err = hyp_assign_phys(phys, + (uint64_t)size, + me->channel[cid].rhvm.vmid, + me->channel[cid].rhvm.vmcount, + destVM, destVMperm, 1); + if (err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + goto bail; + } + } + } + +bail: + return err; +} + +static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr, + uint64_t phys, size_t size, uint32_t flags) +{ + int err = 0; + + VERIFY(err, 0 == (err = fastrpc_unmap_on_dsp(fl, raddr, phys, + size, flags))); + if (err) + goto bail; + if (flags == ADSP_MMAP_HEAP_ADDR || + flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + VERIFY(err, !(err = fastrpc_munmap_on_dsp_rh(fl, phys, + size, flags, 0))); + if (err) + goto bail; + } +bail: + return err; +} + +static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) +{ + struct fastrpc_mmap *match = NULL, *map = NULL; + struct hlist_node *n = NULL; + int err = 0, ret = 0; + struct fastrpc_apps *me = &gfa; + struct qcom_dump_segment ramdump_segments_rh; + struct list_head head; + unsigned long irq_flags = 0; + + INIT_LIST_HEAD(&head); + VERIFY(err, fl->cid == RH_CID); + if (err) { + err = -EBADR; + goto bail; + } + do { + match = NULL; + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + match = map; + hlist_del_init(&map->hn); + break; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + + if (match) { + err = fastrpc_munmap_on_dsp_rh(fl, match->phys, + match->size, match->flags, locked); + if (err) + goto bail; + memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); + ramdump_segments_rh.da = match->phys; + ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); + ramdump_segments_rh.size = match->size; + INIT_LIST_HEAD(&head); + list_add(&ramdump_segments_rh.node, &head); + if (me->dev && dump_enabled()) { + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); + if (ret < 0) + pr_err("adsprpc: %s: unable to dump heap (err %d)\n", + __func__, ret); + } + if (!locked) + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(match, 0); + if (!locked) + mutex_unlock(&fl->map_mutex); + } + } while (match); +bail: + if (err && match) { + if (!locked) + mutex_lock(&fl->map_mutex); + fastrpc_mmap_add(match); + if (!locked) + mutex_unlock(&fl->map_mutex); + } + return err; +} + +static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl) +{ + struct fastrpc_apps *me = &gfa; + int session = 0, err = 0, cid = -1; + + if (!fl) { + err = -EBADF; + goto bail; + } + err = fastrpc_get_spd_session(fl->servloc_name, + &session, &cid); + if (err) + goto bail; + VERIFY(err, cid == fl->cid); + if (err) { + err = -EBADR; + goto bail; + } + if (atomic_read(&me->channel[cid].spd[session].ispdup) == 0) { + err = -ENOTCONN; + goto bail; + } + if (me->channel[cid].spd[session].pdrcount != + me->channel[cid].spd[session].prevpdrcount) { + err = fastrpc_mmap_remove_ssr(fl, 0); + if (err) + ADSPRPC_WARN("failed to unmap remote heap (err %d)\n", + err); + me->channel[cid].spd[session].prevpdrcount = + me->channel[cid].spd[session].pdrcount; + } +bail: + return err; +} + +static inline void get_fastrpc_ioctl_mmap_64( + struct fastrpc_ioctl_mmap_64 *mmap64, + struct fastrpc_ioctl_mmap *immap) +{ + immap->fd = mmap64->fd; + immap->flags = mmap64->flags; + immap->vaddrin = (uintptr_t)mmap64->vaddrin; + immap->size = mmap64->size; +} + +static inline void put_fastrpc_ioctl_mmap_64( + struct fastrpc_ioctl_mmap_64 *mmap64, + struct fastrpc_ioctl_mmap *immap) +{ + mmap64->vaddrout = (uint64_t)immap->vaddrout; +} + +static inline void get_fastrpc_ioctl_munmap_64( + struct fastrpc_ioctl_munmap_64 *munmap64, + struct fastrpc_ioctl_munmap *imunmap) +{ + imunmap->vaddrout = (uintptr_t)munmap64->vaddrout; + imunmap->size = munmap64->size; +} + +int fastrpc_internal_munmap(struct fastrpc_file *fl, + struct fastrpc_ioctl_munmap *ud) +{ + int err = 0; + struct fastrpc_mmap *map = NULL; + struct fastrpc_buf *rbuf = NULL, *free = NULL; + struct hlist_node *n; + + VERIFY(err, fl->dsp_proc_init == 1); + if (err) { + ADSPRPC_ERR( + "user application %s trying to unmap without initialization\n", + current->comm); + err = -EHOSTDOWN; + return err; + } + mutex_lock(&fl->internal_map_mutex); + + spin_lock(&fl->hlock); + hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) { + if (rbuf->raddr && ((rbuf->flags == ADSP_MMAP_ADD_PAGES) || + (rbuf->flags == ADSP_MMAP_ADD_PAGES_LLC))) { + if ((rbuf->raddr == ud->vaddrout) && + (rbuf->size == ud->size)) { + free = rbuf; + break; + } + } + } + spin_unlock(&fl->hlock); + + if (free) { + VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, free->raddr, + free->phys, free->size, free->flags))); + if (err) + goto bail; + fastrpc_buf_free(rbuf, 0); + mutex_unlock(&fl->internal_map_mutex); + return err; + } + + mutex_lock(&fl->map_mutex); + VERIFY(err, !(err = fastrpc_mmap_remove(fl, -1, ud->vaddrout, + ud->size, &map))); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + VERIFY(err, map != NULL); + if (err) { + err = -EINVAL; + goto bail; + } + VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, + map->phys, map->size, map->flags))); + if (err) + goto bail; + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); +bail: + if (err && map) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_add(map); + mutex_unlock(&fl->map_mutex); + } + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +/* + * fastrpc_internal_munmap_fd can only be used for buffers + * mapped with persist attributes. This can only be called + * once for any persist buffer + */ +static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl, + struct fastrpc_ioctl_munmap_fd *ud) +{ + int err = 0; + struct fastrpc_mmap *map = NULL; + + VERIFY(err, (fl && ud)); + if (err) { + err = -EINVAL; + return err; + } + VERIFY(err, fl->dsp_proc_init == 1); + if (err) { + ADSPRPC_ERR( + "user application %s trying to unmap without initialization\n", + current->comm); + err = -EHOSTDOWN; + return err; + } + mutex_lock(&fl->internal_map_mutex); + mutex_lock(&fl->map_mutex); + err = fastrpc_mmap_find(fl, ud->fd, NULL, ud->va, ud->len, 0, 0, &map); + if (err) { + ADSPRPC_ERR( + "mapping not found to unmap fd 0x%x, va 0x%llx, len 0x%x, err %d\n", + ud->fd, (unsigned long long)ud->va, + (unsigned int)ud->len, err); + mutex_unlock(&fl->map_mutex); + goto bail; + } + if (map && (map->attr & FASTRPC_ATTR_KEEP_MAP)) { + map->attr = map->attr & (~FASTRPC_ATTR_KEEP_MAP); + fastrpc_mmap_free(map, 0); + } + mutex_unlock(&fl->map_mutex); +bail: + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +int fastrpc_internal_mem_map(struct fastrpc_file *fl, + struct fastrpc_ioctl_mem_map *ud) +{ + int err = 0; + struct fastrpc_mmap *map = NULL; + + VERIFY(err, fl->dsp_proc_init == 1); + if (err) { + pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n", + __func__, current->comm); + err = EBADR; + goto bail; + } + + /* create SMMU mapping */ + mutex_lock(&fl->map_mutex); + VERIFY(err, !(err = fastrpc_mmap_create(fl, ud->m.fd, NULL, ud->m.attrs, + ud->m.vaddrin, ud->m.length, + ud->m.flags, &map))); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + + if (map->raddr) { + err = -EEXIST; + goto bail; + } + + /* create DSP mapping */ + VERIFY(err, !(err = fastrpc_mem_map_to_dsp(fl, ud->m.fd, ud->m.offset, + ud->m.flags, map->va, map->phys, map->size, &map->raddr))); + if (err) + goto bail; + ud->m.vaddrout = map->raddr; +bail: + if (err) { + ADSPRPC_ERR("failed to map fd %d, len 0x%x, flags %d, map %pK, err %d\n", + ud->m.fd, ud->m.length, ud->m.flags, map, err); + if (map) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); + } + } + return err; +} + +int fastrpc_internal_mem_unmap(struct fastrpc_file *fl, + struct fastrpc_ioctl_mem_unmap *ud) +{ + int err = 0; + struct fastrpc_mmap *map = NULL; + size_t map_size = 0; + + VERIFY(err, fl->dsp_proc_init == 1); + if (err) { + pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n", + __func__, current->comm); + err = EBADR; + goto bail; + } + + mutex_lock(&fl->map_mutex); + VERIFY(err, !(err = fastrpc_mmap_remove(fl, ud->um.fd, + (uintptr_t)ud->um.vaddr, ud->um.length, &map))); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + + VERIFY(err, map->flags == FASTRPC_MAP_FD || + map->flags == FASTRPC_MAP_FD_DELAYED || + map->flags == FASTRPC_MAP_STATIC); + if (err) { + err = -EBADMSG; + goto bail; + } + map_size = map->size; + /* remove mapping on DSP */ + VERIFY(err, !(err = fastrpc_mem_unmap_to_dsp(fl, map->fd, map->flags, + map->raddr, map->phys, map->size))); + if (err) + goto bail; + + /* remove SMMU mapping */ + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); + map = NULL; +bail: + if (err) { + ADSPRPC_ERR( + "failed to unmap fd %d addr 0x%llx length %zu map size %zu err 0x%x\n", + ud->um.fd, ud->um.vaddr, ud->um.length, map_size, err); + /* Add back to map list in case of error to unmap on DSP */ + if (map) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_add(map); + mutex_unlock(&fl->map_mutex); + } + } + return err; +} + +int fastrpc_internal_mmap(struct fastrpc_file *fl, + struct fastrpc_ioctl_mmap *ud) +{ + struct fastrpc_mmap *map = NULL; + struct fastrpc_buf *rbuf = NULL; + unsigned long dma_attr = 0; + uintptr_t raddr = 0; + int err = 0; + + VERIFY(err, fl->dsp_proc_init == 1); + if (err) { + ADSPRPC_ERR( + "user application %s trying to map without initialization\n", + current->comm); + err = -EHOSTDOWN; + return err; + } + mutex_lock(&fl->internal_map_mutex); + /* Pages for unsigned PD's user-heap should be allocated in userspace */ + if (((ud->flags == ADSP_MMAP_ADD_PAGES) || + (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)) && !fl->is_unsigned_pd) { + if (ud->vaddrin) { + err = -EINVAL; + ADSPRPC_ERR( + "adding user allocated pages is not supported\n"); + goto bail; + } + dma_attr = DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_NO_KERNEL_MAPPING; + if (ud->flags == ADSP_MMAP_ADD_PAGES_LLC) + dma_attr |= DMA_ATTR_SYS_CACHE_ONLY; + err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags, + USERHEAP_BUF, &rbuf); + if (err) + goto bail; + err = fastrpc_mmap_on_dsp(fl, ud->flags, 0, + rbuf->phys, rbuf->size, 0, &raddr); + if (err) + goto bail; + rbuf->raddr = raddr; + } else { + uintptr_t va_to_dsp; + if (fl->is_unsigned_pd && ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + err = -EINVAL; + ADSPRPC_ERR( + "Secure memory allocation is not supported in unsigned PD"); + goto bail; + } + + mutex_lock(&fl->map_mutex); + VERIFY(err, !(err = fastrpc_mmap_create(fl, ud->fd, NULL, 0, + (uintptr_t)ud->vaddrin, ud->size, + ud->flags, &map))); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + + if (ud->flags == ADSP_MMAP_HEAP_ADDR || + ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) + va_to_dsp = 0; + else + va_to_dsp = (uintptr_t)map->va; + VERIFY(err, 0 == (err = fastrpc_mmap_on_dsp(fl, ud->flags, + va_to_dsp, map->phys, map->size, map->refs, &raddr))); + if (err) + goto bail; + map->raddr = raddr; + } + ud->vaddrout = raddr; + bail: + if (err) { + if (map) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); + } + if (!IS_ERR_OR_NULL(rbuf)) + fastrpc_buf_free(rbuf, 0); + } + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +static void fastrpc_context_list_dtor(struct fastrpc_file *fl); + +static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan, + int secure, struct fastrpc_session_ctx **session) +{ + struct fastrpc_apps *me = &gfa; + uint64_t idx = 0; + int err = 0; + + if (chan->sesscount) { + for (idx = 0; idx < chan->sesscount; ++idx) { + if (!chan->session[idx].used && + chan->session[idx].smmu.secure == secure) { + chan->session[idx].used = 1; + break; + } + } + if (idx >= chan->sesscount) { + err = -EUSERS; + goto bail; + } + chan->session[idx].smmu.faults = 0; + } else { + VERIFY(err, me->dev != NULL); + if (err) { + err = -ENODEV; + goto bail; + } + chan->session[0].dev = me->dev; + chan->session[0].smmu.dev = me->dev; + } + + *session = &chan->session[idx]; + bail: + return err; +} + +static void handle_remote_signal(uint64_t msg, int cid) +{ + struct fastrpc_apps *me = &gfa; + uint32_t pid = msg >> 32; + uint32_t signal_id = msg & 0xffffffff; + struct fastrpc_file *fl = NULL; + struct hlist_node *n = NULL; + unsigned long irq_flags = 0; + + DSPSIGNAL_VERBOSE("Received queue signal %llx: PID %u, signal %u\n", msg, pid, signal_id); + + if (signal_id >= DSPSIGNAL_NUM_SIGNALS) { + ADSPRPC_ERR("Received bad signal %u for PID %u\n", signal_id, pid); + return; + } + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if ((fl->tgid == pid) && (fl->cid == cid)) { + unsigned long fflags = 0; + + spin_lock_irqsave(&fl->dspsignals_lock, fflags); + if (fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE]) { + struct fastrpc_dspsignal *group = + fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE]; + struct fastrpc_dspsignal *sig = + &group[signal_id % DSPSIGNAL_GROUP_SIZE]; + + if ((sig->state == DSPSIGNAL_STATE_PENDING) || + (sig->state == DSPSIGNAL_STATE_SIGNALED)) { + DSPSIGNAL_VERBOSE("Signaling signal %u for PID %u\n", + signal_id, pid); + complete(&sig->comp); + sig->state = DSPSIGNAL_STATE_SIGNALED; + } else if (sig->state == DSPSIGNAL_STATE_UNUSED) { + ADSPRPC_ERR("Received unknown signal %u for PID %u\n", + signal_id, pid); + } + } else { + ADSPRPC_ERR("Received unknown signal %u for PID %u\n", + signal_id, pid); + } + spin_unlock_irqrestore(&fl->dspsignals_lock, fflags); + break; + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); +} + + +int fastrpc_handle_rpc_response(void *data, int len, int cid) +{ + struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)data; + struct smq_notif_rspv3 *notif = (struct smq_notif_rspv3 *)data; + struct smq_invoke_rspv2 *rspv2 = NULL; + struct smq_invoke_ctx *ctx = NULL; + struct fastrpc_apps *me = &gfa; + uint32_t index, rsp_flags = 0, early_wake_time = 0, ver = 0; + int err = 0, ignore_rsp_err = 0; + struct fastrpc_channel_ctx *chan = NULL; + unsigned long irq_flags = 0; + int64_t ns = 0; + uint64_t xo_time_in_us = 0; + + xo_time_in_us = CONVERT_CNT_TO_US(__arch_counter_get_cntvct()); + + if (len == sizeof(uint64_t)) { + /* + * dspsignal message from the DSP + */ + handle_remote_signal(*((uint64_t *)data), cid); + goto bail; + } + + chan = &me->channel[cid]; + VERIFY(err, (rsp && len >= sizeof(*rsp))); + if (err) { + err = -EINVAL; + goto bail; + } + + if (notif->ctx == FASTRPC_NOTIF_CTX_RESERVED) { + VERIFY(err, (notif->type == STATUS_RESPONSE && + len >= sizeof(*notif))); + if (err) + goto bail; + fastrpc_notif_find_process(cid, notif); + goto bail; + } + + if (len >= sizeof(struct smq_invoke_rspv2)) + rspv2 = (struct smq_invoke_rspv2 *)data; + + if (rspv2) { + early_wake_time = rspv2->early_wake_time; + rsp_flags = rspv2->flags; + ver = rspv2->version; + } + trace_fastrpc_transport_response(cid, rsp->ctx, + rsp->retval, rsp_flags, early_wake_time); + ns = get_timestamp_in_ns(); + fastrpc_update_rxmsg_buf(chan, rsp->ctx, rsp->retval, + rsp_flags, early_wake_time, ver, ns, xo_time_in_us); + + index = (uint32_t)GET_TABLE_IDX_FROM_CTXID(rsp->ctx); + VERIFY(err, index < FASTRPC_CTX_MAX); + if (err) + goto bail; + + spin_lock_irqsave(&chan->ctxlock, irq_flags); + ctx = chan->ctxtable[index]; + VERIFY(err, !IS_ERR_OR_NULL(ctx) && + (ctx->ctxid == GET_CTXID_FROM_RSP_CTX(rsp->ctx)) && + ctx->magic == FASTRPC_CTX_MAGIC); + if (err) { + /* + * Received an anticipatory COMPLETE_SIGNAL from DSP for a + * context after CPU successfully polling on memory and + * completed processing of context. Ignore the message. + * Also ignore response for a call which was already + * completed by update of poll memory and the context was + * removed from the table and possibly reused for another call. + */ + ignore_rsp_err = ((rsp_flags == COMPLETE_SIGNAL) || !ctx || + (ctx && (ctx->ctxid != GET_CTXID_FROM_RSP_CTX(rsp->ctx)))) ? 1 : 0; + goto bail_unlock; + } + + if (rspv2) { + VERIFY(err, rspv2->version == FASTRPC_RSP_VERSION2); + if (err) + goto bail_unlock; + } + VERIFY(err, VALID_FASTRPC_CID(ctx->fl->cid)); + if (err) { + err = -ECHRNG; + goto bail_unlock; + } + context_notify_user(ctx, rsp->retval, rsp_flags, early_wake_time); +bail_unlock: + spin_unlock_irqrestore(&chan->ctxlock, irq_flags); +bail: + if (err) { + err = -ENOKEY; + if (!ignore_rsp_err) + ADSPRPC_ERR( + "invalid response data %pK, len %d from remote subsystem err %d\n", + data, len, err); + else { + err = 0; + me->duplicate_rsp_err_cnt++; + } + } + + return err; +} + +static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure, + struct fastrpc_session_ctx **session) +{ + int err = 0; + + mutex_lock(&chan->smd_mutex); + if (!*session) + err = fastrpc_session_alloc_locked(chan, secure, session); + mutex_unlock(&chan->smd_mutex); + if (err == -EUSERS) { + ADSPRPC_WARN( + "max concurrent sessions limit (%d) already reached on %s err %d\n", + chan->sesscount, chan->subsys, err); + } + return err; +} + +static void fastrpc_session_free(struct fastrpc_channel_ctx *chan, + struct fastrpc_session_ctx *session) +{ + mutex_lock(&chan->smd_mutex); + session->used = 0; + mutex_unlock(&chan->smd_mutex); +} + +static int fastrpc_file_free(struct fastrpc_file *fl) +{ + struct hlist_node *n = NULL; + struct fastrpc_mmap *map = NULL, *lmap = NULL; + unsigned long flags; + int cid; + struct fastrpc_apps *me = &gfa; + bool is_driver_closed = false; + int err = 0; + unsigned long irq_flags = 0; + bool is_locked = false; + int i; + + if (!fl) + return 0; + cid = fl->cid; + + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->device) { + fl->device->dev_close = true; + if (fl->device->refs == 0) { + is_driver_closed = true; + hlist_del_init(&fl->device->hn); + } + } + fl->file_close = FASTRPC_PROCESS_EXIT_START; + spin_unlock_irqrestore(&me->hlock, irq_flags); + + (void)fastrpc_release_current_dsp_process(fl); + + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + is_locked = true; + if (!fl->is_ramdump_pend) { + goto skip_dump_wait; + } + is_locked = false; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + wait_for_completion(&fl->work); + +skip_dump_wait: + if (!is_locked) { + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + is_locked = true; + } + hlist_del_init(&fl->hn); + fl->is_ramdump_pend = false; + fl->in_process_create = false; + is_locked = false; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + + if (!fl->sctx) { + kfree(fl); + return 0; + } + + //Dummy wake up to exit Async worker thread + spin_lock_irqsave(&fl->aqlock, flags); + atomic_add(1, &fl->async_queue_job_count); + wake_up_interruptible(&fl->async_wait_queue); + spin_unlock_irqrestore(&fl->aqlock, flags); + + // Dummy wake up to exit notification worker thread + spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); + atomic_add(1, &fl->proc_state_notif.notif_queue_count); + wake_up_interruptible(&fl->proc_state_notif.notif_wait_queue); + spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags); + + if (!IS_ERR_OR_NULL(fl->init_mem)) + fastrpc_buf_free(fl->init_mem, 0); + fastrpc_context_list_dtor(fl); + fastrpc_cached_buf_list_free(fl); + if (!IS_ERR_OR_NULL(fl->hdr_bufs)) + kfree(fl->hdr_bufs); + if (!IS_ERR_OR_NULL(fl->pers_hdr_buf)) + fastrpc_buf_free(fl->pers_hdr_buf, 0); + mutex_lock(&fl->map_mutex); + do { + lmap = NULL; + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + hlist_del_init(&map->hn); + lmap = map; + break; + } + fastrpc_mmap_free(lmap, 1); + } while (lmap); + mutex_unlock(&fl->map_mutex); + + if (fl->device && is_driver_closed) + device_unregister(&fl->device->dev); + + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (!err && fl->sctx) + fastrpc_session_free(&fl->apps->channel[cid], fl->sctx); + if (!err && fl->secsctx) + fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx); + + for (i = 0; i < (DSPSIGNAL_NUM_SIGNALS / DSPSIGNAL_GROUP_SIZE); i++) + kfree(fl->signal_groups[i]); + mutex_destroy(&fl->signal_create_mutex); + + fastrpc_remote_buf_list_free(fl); + mutex_destroy(&fl->map_mutex); + mutex_destroy(&fl->internal_map_mutex); + kfree(fl->dev_pm_qos_req); + kfree(fl->gidlist.gids); + kfree(fl); + return 0; +} + +static int fastrpc_device_release(struct inode *inode, struct file *file) +{ + struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; + struct fastrpc_apps *me = &gfa; + u32 ii; + + if (!fl) + return 0; + + if (fl->qos_request && fl->dev_pm_qos_req) { + for (ii = 0; ii < me->silvercores.corecount; ii++) { + if (!dev_pm_qos_request_active(&fl->dev_pm_qos_req[ii])) + continue; + dev_pm_qos_remove_request(&fl->dev_pm_qos_req[ii]); + } + } + debugfs_remove(fl->debugfs_file); + fastrpc_file_free(fl); + file->private_data = NULL; + + return 0; +} + +static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, + size_t count, loff_t *position) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_file *fl = filp->private_data; + struct hlist_node *n; + struct fastrpc_buf *buf = NULL; + struct fastrpc_mmap *map = NULL; + struct fastrpc_mmap *gmaps = NULL; + struct smq_invoke_ctx *ictx = NULL; + struct fastrpc_channel_ctx *chan = NULL; + unsigned int len = 0; + int i, j, sess_used = 0, ret = 0; + char *fileinfo = NULL; + char single_line[] = "----------------"; + char title[] = "========================="; + unsigned long irq_flags = 0; + + fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL); + if (!fileinfo) { + ret = -ENOMEM; + goto bail; + } + if (fl == NULL) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n%s %s %s\n", title, " CHANNEL INFO ", title); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-7s|%-10s|%-14s|%-9s|%-13s\n", + "subsys", "sesscount", "issubsystemup", + "ssrcount", "session_used"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "-%s%s%s%s-\n", single_line, single_line, + single_line, single_line); + for (i = 0; i < NUM_CHANNELS; i++) { + sess_used = 0; + chan = &gcinfo[i]; + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "%-7s", chan->subsys); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "|%-10u", + chan->sesscount); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "|%-14d", + chan->issubsystemup); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "|%-9u", + chan->ssrcount); + for (j = 0; j < chan->sesscount; j++) + sess_used += chan->session[j].used; + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "|%-13d\n", sess_used); + } + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n%s%s%s\n", "=============", + " CMA HEAP ", "=============="); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size"); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "--%s%s---\n", + single_line, single_line); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n==========%s %s %s===========\n", + title, " GMAPS ", title); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-20s|%-20s|%-20s\n", + "fd", "phys", "size", "va"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", single_line, single_line, + single_line, single_line, single_line); + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n", + gmaps->fd, gmaps->phys, + (uint32_t)gmaps->size, + gmaps->va); + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-20s|%-20s|%-20s\n", + "len", "refs", "raddr", "flags"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", single_line, single_line, + single_line, single_line, single_line); + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "0x%-18X|%-20d|%-20lu|%-20u\n", + (uint32_t)gmaps->len, gmaps->refs, + gmaps->raddr, gmaps->flags); + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + } else { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n%s %13s %d\n", "cid", ":", fl->cid); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %12s %d\n", "tgid", ":", fl->tgid); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %7s %d\n", "sessionid", ":", fl->sessionid); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %8s %u\n", "ssrcount", ":", fl->ssrcount); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %14s %d\n", "pd", ":", fl->pd); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %9s %s\n", "servloc_name", ":", fl->servloc_name); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %6s %d\n", "file_close", ":", fl->file_close); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %9s %d\n", "profile", ":", fl->profile); + if (fl->sctx) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %3s %d\n", "smmu.coherent", ":", + fl->sctx->smmu.coherent); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %4s %d\n", "smmu.enabled", ":", + fl->sctx->smmu.enabled); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %5s %d\n", "smmu.secure", ":", + fl->sctx->smmu.secure); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %5s %d\n", "smmu.faults", ":", + fl->sctx->smmu.faults); + } + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n=======%s %s %s======\n", title, + " LIST OF MAPS ", title); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-20s|%-20s\n", "va", "phys", "size"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", + single_line, single_line, single_line, + single_line, single_line); + mutex_lock(&fl->map_mutex); + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "0x%-20lX|0x%-20llX|0x%-20zu\n\n", + map->va, map->phys, + map->size); + } + mutex_unlock(&fl->map_mutex); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-20s|%-20s|%-20s\n", + "len", "refs", + "raddr"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", + single_line, single_line, single_line, + single_line, single_line); + mutex_lock(&fl->map_mutex); + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20zu|%-20d|0x%-20lX|%-20d\n\n", + map->len, map->refs, map->raddr); + } + mutex_unlock(&fl->map_mutex); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-20s\n", "secure", "attr"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", + single_line, single_line, single_line, + single_line, single_line); + mutex_lock(&fl->map_mutex); + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20d|0x%-20lX\n\n", + map->secure, map->attr); + } + mutex_unlock(&fl->map_mutex); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n======%s %s %s======\n", title, + " LIST OF BUFS ", title); + spin_lock(&fl->hlock); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-19s|%-19s|%-19s\n", + "virt", "phys", "size"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", single_line, single_line, + single_line, single_line, single_line); + hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) { + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, + "0x%-17p|0x%-17llX|%-19zu\n", + buf->virt, (uint64_t)buf->phys, buf->size); + } + + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n======%s %s %s======\n", title, + " LIST OF REMOTE BUFS ", title); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-19s|%-19s|%-19s|%-19s\n", + "virt", "phys", "size", "flags"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", single_line, single_line, + single_line, single_line, single_line); + hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) { + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, + "0x%-17p|0x%-17llX|%-19zu|0x%-17llX\n", + buf->virt, (uint64_t)buf->phys, buf->size, buf->flags); + } + + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n%s %s %s\n", title, + " LIST OF PENDING SMQCONTEXTS ", title); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-10s|%-10s|%-10s|%-20s\n", + "sc", "pid", "tgid", "used", "ctxid"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", single_line, single_line, + single_line, single_line, single_line); + hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n", + ictx->sc, ictx->pid, ictx->tgid, + ictx->used, ictx->ctxid); + } + + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "\n%s %s %s\n", title, + " LIST OF INTERRUPTED SMQCONTEXTS ", title); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20s|%-10s|%-10s|%-10s|%-20s\n", + "sc", "pid", "tgid", "used", "ctxid"); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s%s%s%s%s\n", single_line, single_line, + single_line, single_line, single_line); + hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) { + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n", + ictx->sc, ictx->pid, ictx->tgid, + ictx->used, ictx->ctxid); + } + spin_unlock(&fl->hlock); + } + if (len > DEBUGFS_SIZE) + len = DEBUGFS_SIZE; + ret = simple_read_from_buffer(buffer, count, position, fileinfo, len); + kfree(fileinfo); +bail: + return ret; +} + +static const struct file_operations debugfs_fops = { + .open = simple_open, + .read = fastrpc_debugfs_read, +}; + +static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) +{ + struct fastrpc_apps *me = &gfa; + int cid = -1, err = 0; + + VERIFY(err, fl && fl->sctx && fl->cid >= 0 && fl->cid < NUM_CHANNELS); + if (err) { + ADSPRPC_ERR("kernel session not initialized yet for %s\n", + current->comm); + err = -EBADR; + return err; + } + cid = fl->cid; + + err = fastrpc_wait_for_transport_interrupt(cid, flags); + if (err) + goto bail; + + err = verify_transport_device(cid, fl->trusted_vm); + if (err) + goto bail; + + mutex_lock(&me->channel[cid].smd_mutex); + if (me->channel[cid].ssrcount != + me->channel[cid].prevssrcount) { + if (!me->channel[cid].issubsystemup) { + err = -ECONNREFUSED; + mutex_unlock(&me->channel[cid].smd_mutex); + goto bail; + } + } + fl->ssrcount = me->channel[cid].ssrcount; + + if (cid == ADSP_DOMAIN_ID && me->channel[cid].ssrcount != + me->channel[cid].prevssrcount) { + mutex_lock(&fl->map_mutex); + err = fastrpc_mmap_remove_ssr(fl, 1); + mutex_unlock(&fl->map_mutex); + if (err) + ADSPRPC_WARN( + "failed to unmap remote heap for %s (err %d)\n", + me->channel[cid].subsys, err); + me->channel[cid].prevssrcount = + me->channel[cid].ssrcount; + } + mutex_unlock(&me->channel[cid].smd_mutex); + +bail: + return err; +} + +static inline void fastrpc_register_wakeup_source(struct device *dev, + const char *client_name, struct wakeup_source **device_wake_source) +{ + struct wakeup_source *wake_source = NULL; + + wake_source = wakeup_source_register(dev, client_name); + if (IS_ERR_OR_NULL(wake_source)) { + ADSPRPC_ERR( + "wakeup_source_register failed for dev %s, client %s with err %ld\n", + dev_name(dev), client_name, PTR_ERR(wake_source)); + return; + } + *device_wake_source = wake_source; +} + +static int fastrpc_device_open(struct inode *inode, struct file *filp) +{ + int err = 0; + struct fastrpc_file *fl = NULL; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + /* + * Indicates the device node opened + * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV + */ + int dev_minor = MINOR(inode->i_rdev); + + VERIFY(err, ((dev_minor == MINOR_NUM_DEV) || + (dev_minor == MINOR_NUM_SECURE_DEV))); + if (err) { + ADSPRPC_ERR("Invalid dev minor num %d\n", + dev_minor); + return err; + } + + VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL))); + if (err) { + err = -ENOMEM; + return err; + } + + context_list_ctor(&fl->clst); + spin_lock_init(&fl->hlock); + spin_lock_init(&fl->aqlock); + spin_lock_init(&fl->proc_state_notif.nqlock); + INIT_HLIST_HEAD(&fl->maps); + INIT_HLIST_HEAD(&fl->cached_bufs); + fl->num_cached_buf = 0; + INIT_HLIST_HEAD(&fl->remote_bufs); + init_waitqueue_head(&fl->async_wait_queue); + init_waitqueue_head(&fl->proc_state_notif.notif_wait_queue); + INIT_HLIST_NODE(&fl->hn); + fl->sessionid = 0; + fl->tgid_open = current->tgid; + fl->apps = me; + fl->mode = FASTRPC_MODE_SERIAL; + fl->cid = -1; + fl->dev_minor = dev_minor; + fl->init_mem = NULL; + fl->qos_request = 0; + fl->dsp_proc_init = 0; + fl->is_ramdump_pend = false; + fl->in_process_create = false; + fl->is_unsigned_pd = false; + fl->is_compat = false; + init_completion(&fl->work); + fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; + filp->private_data = fl; + mutex_init(&fl->internal_map_mutex); + mutex_init(&fl->map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_add_head(&fl->hn, &me->drivers); + spin_unlock_irqrestore(&me->hlock, irq_flags); + fl->dev_pm_qos_req = kcalloc(me->silvercores.corecount, + sizeof(struct dev_pm_qos_request), + GFP_KERNEL); + spin_lock_init(&fl->dspsignals_lock); + mutex_init(&fl->signal_create_mutex); + + return 0; +} + +static int fastrpc_get_process_gids(struct gid_list *gidlist) +{ + struct group_info *group_info = get_current_groups(); + int i = 0, err = 0, num_gids = group_info->ngroups + 1; + unsigned int *gids = NULL; + + gids = kcalloc(num_gids, sizeof(unsigned int), GFP_KERNEL); + if (!gids) { + err = -ENOMEM; + goto bail; + } + + /* Get the real GID */ + gids[0] = __kgid_val(current_gid()); + + /* Get the supplemental GIDs */ + for (i = 1; i < num_gids; i++) + gids[i] = __kgid_val(group_info->gid[i - 1]); + + sort(gids, num_gids, sizeof(*gids), uint_cmp_func, NULL); + gidlist->gids = gids; + gidlist->gidcount = num_gids; +bail: + if (err) + kfree(gids); + return err; +} + +static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) +{ + int err = 0, buf_size = 0; + char strpid[PID_SIZE]; + char cur_comm[TASK_COMM_LEN]; + + memcpy(cur_comm, current->comm, TASK_COMM_LEN); + cur_comm[TASK_COMM_LEN-1] = '\0'; + fl->tgid = current->tgid; + + /* + * Third-party apps don't have permission to open the fastrpc device, so + * it is opened on their behalf by DSP HAL. This is detected by + * comparing current PID with the one stored during device open. + */ + if (current->tgid != fl->tgid_open) + fl->untrusted_process = true; + + snprintf(strpid, PID_SIZE, "%d", current->pid); + if (debugfs_root) { + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + + strlen("_") + strlen(__TOSTR__(NUM_CHANNELS)) + 1; + + spin_lock(&fl->hlock); + if (fl->debug_buf_alloced_attempted) { + spin_unlock(&fl->hlock); + return err; + } + fl->debug_buf_alloced_attempted = 1; + spin_unlock(&fl->hlock); + fl->debug_buf = kzalloc(buf_size, GFP_KERNEL); + + if (!fl->debug_buf) { + err = -ENOMEM; + return err; + } + snprintf(fl->debug_buf, buf_size, "%.10s%s%d%s%d", + cur_comm, "_", current->pid, "_", cid); + fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644, + debugfs_root, fl, &debugfs_fops); + if (IS_ERR_OR_NULL(fl->debugfs_file)) { + pr_warn("Error: %s: %s: failed to create debugfs file %s\n", + cur_comm, __func__, fl->debug_buf); + fl->debugfs_file = NULL; + } + kfree(fl->debug_buf); + fl->debug_buf = NULL; + } +bail: + return err; +} + +int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) +{ + int err = 0; + uint32_t cid = *info; + struct fastrpc_apps *me = &gfa; + + VERIFY(err, fl != NULL); + if (err) { + err = -EBADF; + goto bail; + } + + fastrpc_get_process_gids(&fl->gidlist); + err = fastrpc_set_process_info(fl, cid); + if (err) + goto bail; + + if (fl->cid == -1) { + struct fastrpc_channel_ctx *chan = NULL; + + VERIFY(err, cid < NUM_CHANNELS); + if (err) { + err = -ECHRNG; + goto bail; + } + chan = &me->channel[cid]; + /* Check to see if the device node is non-secure */ + if (fl->dev_minor == MINOR_NUM_DEV) { + /* + * If an app is trying to offload to a secure remote + * channel by opening the non-secure device node, allow + * the access if the subsystem supports unsigned + * offload. Untrusted apps will be restricted from + * offloading to signed PD using DSP HAL. + */ + if (chan->secure == SECURE_CHANNEL + && !chan->unsigned_support) { + ADSPRPC_ERR( + "cannot use domain %d with non-secure device\n", + cid); + err = -EACCES; + goto bail; + } + } + fl->cid = cid; + fl->ssrcount = fl->apps->channel[cid].ssrcount; + mutex_lock(&fl->apps->channel[cid].smd_mutex); + err = fastrpc_session_alloc_locked(&fl->apps->channel[cid], + 0, &fl->sctx); + mutex_unlock(&fl->apps->channel[cid].smd_mutex); + if (err == -EUSERS) { + ADSPRPC_WARN( + "max concurrent sessions limit (%d) already reached on %s err %d\n", + chan->sesscount, chan->subsys, err); + } + if (err) + goto bail; + } + VERIFY(err, fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + *info = (fl->sctx->smmu.enabled ? 1 : 0); +bail: + return err; +} + +static int fastrpc_manage_poll_mode(struct fastrpc_file *fl, uint32_t enable, uint32_t timeout) +{ + int err = 0; + const unsigned int MAX_POLL_TIMEOUT_US = 10000; + + if ((fl->cid != CDSP_DOMAIN_ID) || (fl->proc_flags != FASTRPC_INIT_CREATE)) { + err = -EPERM; + ADSPRPC_ERR("flags %d, cid %d, poll mode allowed only for dynamic CDSP process\n", + fl->proc_flags, fl->cid); + goto bail; + } + if (timeout > MAX_POLL_TIMEOUT_US) { + err = -EBADMSG; + ADSPRPC_ERR("poll timeout %u is greater than max allowed value %u\n", + timeout, MAX_POLL_TIMEOUT_US); + goto bail; + } + spin_lock(&fl->hlock); + if (enable) { + fl->poll_mode = true; + fl->poll_timeout = timeout; + } else { + fl->poll_mode = false; + fl->poll_timeout = 0; + } + spin_unlock(&fl->hlock); + ADSPRPC_INFO("updated poll mode to %d, timeout %u\n", enable, timeout); +bail: + return err; +} + +int fastrpc_internal_control(struct fastrpc_file *fl, + struct fastrpc_ioctl_control *cp) +{ + int err = 0; + unsigned int latency; + struct fastrpc_apps *me = &gfa; + u32 silver_core_count = me->silvercores.corecount, ii = 0, cpu; + + VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps)); + if (err) { + err = -EBADF; + goto bail; + } + VERIFY(err, !IS_ERR_OR_NULL(cp)); + if (err) { + err = -EINVAL; + goto bail; + } + + switch (cp->req) { + case FASTRPC_CONTROL_LATENCY: + latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ? + fl->apps->latency : PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + VERIFY(err, latency != 0); + if (err) { + err = -EINVAL; + goto bail; + } + + VERIFY(err, me->silvercores.coreno && fl->dev_pm_qos_req); + if (err) { + err = -EINVAL; + goto bail; + } + + for (ii = 0; ii < silver_core_count; ii++) { + cpu = me->silvercores.coreno[ii]; + if (!fl->qos_request) { + err = dev_pm_qos_add_request( + get_cpu_device(cpu), + &fl->dev_pm_qos_req[ii], + DEV_PM_QOS_RESUME_LATENCY, + latency); + } else { + err = dev_pm_qos_update_request( + &fl->dev_pm_qos_req[ii], + latency); + } + /* PM QoS request APIs return 0 or 1 on success */ + if (err < 0) { + ADSPRPC_WARN("QoS with lat %u failed for CPU %d, err %d, req %d\n", + latency, cpu, err, fl->qos_request); + break; + } + } + if (err >= 0) { + fl->qos_request = 1; + err = 0; + } + + /* Ensure CPU feature map updated to DSP for early WakeUp */ + fastrpc_send_cpuinfo_to_dsp(fl); + break; + case FASTRPC_CONTROL_KALLOC: + cp->kalloc.kalloc_support = 1; + break; + case FASTRPC_CONTROL_WAKELOCK: + if (fl->dev_minor != MINOR_NUM_SECURE_DEV) { + ADSPRPC_ERR( + "PM voting not allowed for non-secure device node %d\n", + fl->dev_minor); + err = -EPERM; + goto bail; + } + fl->wake_enable = cp->wp.enable; + break; + case FASTRPC_CONTROL_PM: + if (!fl->wake_enable) { + /* Kernel PM voting not requested by this application */ + err = -EACCES; + goto bail; + } + if (cp->pm.timeout > MAX_PM_TIMEOUT_MS) + fl->ws_timeout = MAX_PM_TIMEOUT_MS; + else + fl->ws_timeout = cp->pm.timeout; + VERIFY(err, VALID_FASTRPC_CID(fl->cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + fastrpc_pm_awake(fl, gcinfo[fl->cid].secure); + break; + case FASTRPC_CONTROL_DSPPROCESS_CLEAN: + (void)fastrpc_release_current_dsp_process(fl); + break; + case FASTRPC_CONTROL_RPC_POLL: + err = fastrpc_manage_poll_mode(fl, cp->lp.enable, cp->lp.latency); + if (err) + goto bail; + break; + default: + err = -EBADRQC; + break; + } +bail: + return err; +} + +static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name) +{ + int err = 0, session = -1, cid = -1; + struct fastrpc_apps *me = &gfa; + + if (fl->servloc_name && sloc_name + && !strcmp(fl->servloc_name, sloc_name)) { + err = fastrpc_get_spd_session(sloc_name, &session, &cid); + if (err || cid != fl->cid) + goto bail; + if (!strcmp(fl->servloc_name, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME) || !strcmp(fl->servloc_name, + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME) || + !strcmp(fl->servloc_name, + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME)) { + err = wait_event_interruptible( + me->channel[cid].spd[session].wait_for_pdup, + atomic_read(&me->channel[cid].spd[session].ispdup)); + goto bail; + } + } +bail: + return err; +} + +int fastrpc_setmode(unsigned long ioctl_param, + struct fastrpc_file *fl) +{ + int err = 0; + + switch ((uint32_t)ioctl_param) { + case FASTRPC_MODE_PARALLEL: + case FASTRPC_MODE_SERIAL: + fl->mode = (uint32_t)ioctl_param; + break; + case FASTRPC_MODE_PROFILE: + fl->profile = (uint32_t)ioctl_param; + break; + case FASTRPC_MODE_SESSION: + if (fl->untrusted_process) { + err = -EPERM; + ADSPRPC_ERR( + "multiple sessions not allowed for untrusted apps\n"); + goto bail; + } + fl->sessionid = 1; + fl->tgid |= SESSION_ID_MASK; + break; + default: + err = -ENOTTY; + break; + } +bail: + return err; +} + +int fastrpc_control(struct fastrpc_ioctl_control *cp, + void *param, struct fastrpc_file *fl) +{ + int err = 0; + + K_COPY_FROM_USER(err, 0, cp, param, + sizeof(*cp)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_control(fl, cp))); + if (err) + goto bail; + if (cp->req == FASTRPC_CONTROL_KALLOC) { + K_COPY_TO_USER(err, 0, param, cp, sizeof(*cp)); + if (err) { + err = -EFAULT; + goto bail; + } + } +bail: + return err; +} + +static int fastrpc_get_dsp_info( + struct fastrpc_ioctl_capability *cap, + void *param, struct fastrpc_file *fl) +{ + int err = 0; + + K_COPY_FROM_USER(err, 0, cap, param, + sizeof(struct fastrpc_ioctl_capability)); + VERIFY(err, cap->domain < NUM_CHANNELS); + if (err) { + err = -ECHRNG; + goto bail; + } + cap->capability = 0; + + err = fastrpc_get_info_from_kernel(cap, fl); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, &((struct fastrpc_ioctl_capability *) + param)->capability, &cap->capability, sizeof(cap->capability)); +bail: + return err; +} + + +int fastrpc_dspsignal_signal(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_signal *sig) +{ + int err = 0, cid = -1; + struct fastrpc_channel_ctx *channel_ctx = NULL; + uint64_t msg = 0; + + // We don't check if the signal has even been allocated since we don't + // track outgoing signals in the driver. The userspace library does a + // basic sanity check and any security validation needs to be done by + // the recipient. + DSPSIGNAL_VERBOSE("Send signal PID %u, signal %u\n", + (unsigned int)fl->tgid, (unsigned int)sig->signal_id); + VERIFY(err, sig->signal_id < DSPSIGNAL_NUM_SIGNALS); + if (err) { + ADSPRPC_ERR("Sending bad signal %u for PID %u", + sig->signal_id, (unsigned int)fl->tgid); + err = -EBADR; + goto bail; + } + + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid) && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + + channel_ctx = &fl->apps->channel[cid]; + mutex_lock(&channel_ctx->smd_mutex); + if (fl->ssrcount != channel_ctx->ssrcount) { + err = -ECONNRESET; + mutex_unlock(&channel_ctx->smd_mutex); + goto bail; + } + + msg = (((uint64_t)fl->tgid) << 32) | ((uint64_t)sig->signal_id); + err = fastrpc_transport_send(cid, (void *)&msg, sizeof(msg), fl->trusted_vm); + mutex_unlock(&channel_ctx->smd_mutex); + +bail: + return err; +} + + +int fastrpc_dspsignal_wait(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_wait *wait) +{ + int err = 0, cid = -1; + unsigned long timeout = usecs_to_jiffies(wait->timeout_usec); + uint32_t signal_id = wait->signal_id; + struct fastrpc_dspsignal *s = NULL; + long ret = 0; + unsigned long irq_flags = 0; + + DSPSIGNAL_VERBOSE("Wait for signal %u\n", signal_id); + VERIFY(err, signal_id < DSPSIGNAL_NUM_SIGNALS); + if (err) { + ADSPRPC_ERR("Waiting on bad signal %u", signal_id); + err = -EINVAL; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid) && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + + spin_lock_irqsave(&fl->dspsignals_lock, irq_flags); + if (fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE] != NULL) { + struct fastrpc_dspsignal *group = + fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE]; + + s = &group[signal_id % DSPSIGNAL_GROUP_SIZE]; + } + if ((s == NULL) || (s->state == DSPSIGNAL_STATE_UNUSED)) { + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + ADSPRPC_ERR("Unknown signal id %u\n", signal_id); + err = -ENOENT; + goto bail; + } + if (s->state != DSPSIGNAL_STATE_PENDING) { + if ((s->state == DSPSIGNAL_STATE_CANCELED) || (s->state == DSPSIGNAL_STATE_UNUSED)) + err = -EINTR; + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + DSPSIGNAL_VERBOSE("Signal %u in state %u, complete wait immediately", + signal_id, s->state); + goto bail; + } + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + + if (timeout != 0xffffffff) + ret = wait_for_completion_interruptible_timeout(&s->comp, timeout); + else + ret = wait_for_completion_interruptible(&s->comp); + + if (ret == 0) { + DSPSIGNAL_VERBOSE("Wait for signal %u timed out\n", signal_id); + err = -ETIMEDOUT; + goto bail; + } else if (ret < 0) { + ADSPRPC_ERR("Wait for signal %u failed %d\n", signal_id, (int)ret); + err = ret; + goto bail; + } + + spin_lock_irqsave(&fl->dspsignals_lock, irq_flags); + if (s->state == DSPSIGNAL_STATE_SIGNALED) { + s->state = DSPSIGNAL_STATE_PENDING; + DSPSIGNAL_VERBOSE("Signal %u completed\n", signal_id); + } else if ((s->state == DSPSIGNAL_STATE_CANCELED) || (s->state == DSPSIGNAL_STATE_UNUSED)) { + DSPSIGNAL_VERBOSE("Signal %u cancelled or destroyed\n", signal_id); + err = -EINTR; + } + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + +bail: + return err; +} + + +int fastrpc_dspsignal_create(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_create *create) +{ + int err = 0, cid = -1; + uint32_t signal_id = create->signal_id; + struct fastrpc_dspsignal *group, *sig; + unsigned long irq_flags = 0; + + VERIFY(err, signal_id < DSPSIGNAL_NUM_SIGNALS); + if (err) { + err = -EINVAL; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid) && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + + // Use a separate mutex for creating signals. This avoids holding on + // to a spinlock if we need to allocate a whole group of signals. The + // mutex ensures nobody else will allocate the same group. + mutex_lock(&fl->signal_create_mutex); + spin_lock_irqsave(&fl->dspsignals_lock, irq_flags); + + group = fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE]; + if (group == NULL) { + int i; + // Release the spinlock while we allocate a new group but take + // it back before taking the group into use. No other code + // allocates groups so the mutex is sufficient. + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + VERIFY(err, (group = kzalloc(DSPSIGNAL_GROUP_SIZE * sizeof(*group), + GFP_KERNEL)) != NULL); + if (err) { + ADSPRPC_ERR("Unable to allocate signal group\n"); + err = -ENOMEM; + mutex_unlock(&fl->signal_create_mutex); + goto bail; + } + + for (i = 0; i < DSPSIGNAL_GROUP_SIZE; i++) { + sig = &group[i]; + init_completion(&sig->comp); + sig->state = DSPSIGNAL_STATE_UNUSED; + } + spin_lock_irqsave(&fl->dspsignals_lock, irq_flags); + fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE] = group; + } + + sig = &group[signal_id % DSPSIGNAL_GROUP_SIZE]; + if (sig->state != DSPSIGNAL_STATE_UNUSED) { + err = -EBUSY; + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + mutex_unlock(&fl->signal_create_mutex); + ADSPRPC_ERR("Attempting to create signal %u already in use (state %u)\n", + signal_id, sig->state); + goto bail; + } + + sig->state = DSPSIGNAL_STATE_PENDING; + reinit_completion(&sig->comp); + + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + mutex_unlock(&fl->signal_create_mutex); + + DSPSIGNAL_VERBOSE("Signal %u created\n", signal_id); + +bail: + return err; +} + + +int fastrpc_dspsignal_destroy(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_destroy *destroy) +{ + int err = 0, cid = -1; + uint32_t signal_id = destroy->signal_id; + struct fastrpc_dspsignal *s = NULL; + unsigned long irq_flags = 0; + + DSPSIGNAL_VERBOSE("Destroy signal %u\n", signal_id); + + VERIFY(err, signal_id < DSPSIGNAL_NUM_SIGNALS); + if (err) { + err = -EINVAL; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid) && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + + spin_lock_irqsave(&fl->dspsignals_lock, irq_flags); + + if (fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE] != NULL) { + struct fastrpc_dspsignal *group = + fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE]; + + s = &group[signal_id % DSPSIGNAL_GROUP_SIZE]; + } + if ((s == NULL) || (s->state == DSPSIGNAL_STATE_UNUSED)) { + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + ADSPRPC_ERR("Attempting to destroy unused signal %u\n", signal_id); + err = -ENOENT; + goto bail; + } + + s->state = DSPSIGNAL_STATE_UNUSED; + complete_all(&s->comp); + + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + DSPSIGNAL_VERBOSE("Signal %u destroyed\n", signal_id); + +bail: + return err; +} + + +int fastrpc_dspsignal_cancel_wait(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_cancel_wait *cancel) +{ + int err = 0, cid = -1; + uint32_t signal_id = cancel->signal_id; + struct fastrpc_dspsignal *s = NULL; + unsigned long irq_flags = 0; + + DSPSIGNAL_VERBOSE("Cancel wait for signal %u\n", signal_id); + + VERIFY(err, signal_id < DSPSIGNAL_NUM_SIGNALS); + if (err) { + err = -EINVAL; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid) && fl->sctx != NULL); + if (err) { + err = -EBADR; + goto bail; + } + + spin_lock_irqsave(&fl->dspsignals_lock, irq_flags); + + if (fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE] != NULL) { + struct fastrpc_dspsignal *group = + fl->signal_groups[signal_id / DSPSIGNAL_GROUP_SIZE]; + + s = &group[signal_id % DSPSIGNAL_GROUP_SIZE]; + } + if ((s == NULL) || (s->state == DSPSIGNAL_STATE_UNUSED)) { + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + ADSPRPC_ERR("Attempting to cancel unused signal %u\n", signal_id); + err = -ENOENT; + goto bail; + } + + if (s->state != DSPSIGNAL_STATE_CANCELED) { + s->state = DSPSIGNAL_STATE_CANCELED; + complete_all(&s->comp); + } + + spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + + DSPSIGNAL_VERBOSE("Signal %u cancelled\n", signal_id); + +bail: + return err; +} + + +static inline int fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, + unsigned int ioctl_num, union fastrpc_ioctl_param *p, + void *param) +{ + union { + struct fastrpc_ioctl_mmap mmap; + struct fastrpc_ioctl_munmap munmap; + } i; + int err = 0; + + switch (ioctl_num) { + case FASTRPC_IOCTL_MEM_MAP: + K_COPY_FROM_USER(err, 0, &p->mem_map, param, + sizeof(p->mem_map)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_mem_map(fl, + &p->mem_map))); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, &p->mem_map, sizeof(p->mem_map)); + if (err) { + err = -EFAULT; + goto bail; + } + break; + case FASTRPC_IOCTL_MEM_UNMAP: + K_COPY_FROM_USER(err, 0, &p->mem_unmap, param, + sizeof(p->mem_unmap)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_mem_unmap(fl, + &p->mem_unmap))); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, &p->mem_unmap, + sizeof(p->mem_unmap)); + if (err) { + err = -EFAULT; + goto bail; + } + break; + case FASTRPC_IOCTL_MMAP: + K_COPY_FROM_USER(err, 0, &p->mmap, param, + sizeof(p->mmap)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p->mmap))); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, &p->mmap, sizeof(p->mmap)); + if (err) { + err = -EFAULT; + goto bail; + } + break; + case FASTRPC_IOCTL_MUNMAP: + K_COPY_FROM_USER(err, 0, &p->munmap, param, + sizeof(p->munmap)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, + &p->munmap))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_MMAP_64: + K_COPY_FROM_USER(err, 0, &p->mmap64, param, + sizeof(p->mmap64)); + if (err) { + err = -EFAULT; + goto bail; + } + get_fastrpc_ioctl_mmap_64(&p->mmap64, &i.mmap); + VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap))); + if (err) + goto bail; + put_fastrpc_ioctl_mmap_64(&p->mmap64, &i.mmap); + K_COPY_TO_USER(err, 0, param, &p->mmap64, sizeof(p->mmap64)); + if (err) { + err = -EFAULT; + goto bail; + } + break; + case FASTRPC_IOCTL_MUNMAP_64: + K_COPY_FROM_USER(err, 0, &p->munmap64, param, + sizeof(p->munmap64)); + if (err) { + err = -EFAULT; + goto bail; + } + get_fastrpc_ioctl_munmap_64(&p->munmap64, &i.munmap); + VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, + &i.munmap))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_MUNMAP_FD: + K_COPY_FROM_USER(err, 0, &p->munmap_fd, param, + sizeof(p->munmap_fd)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl, + &p->munmap_fd))); + if (err) + goto bail; + break; + default: + err = -ENOTTY; + pr_info("bad ioctl: %d\n", ioctl_num); + break; + } +bail: + return err; +} + +static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + union fastrpc_ioctl_param p; + void *param = (char *)ioctl_param; + struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; + int size = 0, err = 0; + uint32_t info; + + p.inv.fds = NULL; + p.inv.attrs = NULL; + p.inv.crc = NULL; + p.inv.perf_kernel = NULL; + p.inv.perf_dsp = NULL; + p.inv.job = NULL; + + if (fl->servloc_name) { + err = fastrpc_check_pd_status(fl, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME); + err |= fastrpc_check_pd_status(fl, + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME); + err |= fastrpc_check_pd_status(fl, + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME); + if (err) + goto bail; + } + + spin_lock(&fl->hlock); + if (fl->file_close >= FASTRPC_PROCESS_EXIT_START) { + err = -ESHUTDOWN; + pr_warn("adsprpc: fastrpc_device_release is happening, So not sending any new requests to DSP\n"); + spin_unlock(&fl->hlock); + goto bail; + } + spin_unlock(&fl->hlock); + + switch (ioctl_num) { + case FASTRPC_IOCTL_INVOKE: + size = sizeof(struct fastrpc_ioctl_invoke); + fallthrough; + case FASTRPC_IOCTL_INVOKE_FD: + if (!size) + size = sizeof(struct fastrpc_ioctl_invoke_fd); + fallthrough; + case FASTRPC_IOCTL_INVOKE_ATTRS: + if (!size) + size = sizeof(struct fastrpc_ioctl_invoke_attrs); + fallthrough; + case FASTRPC_IOCTL_INVOKE_CRC: + if (!size) + size = sizeof(struct fastrpc_ioctl_invoke_crc); + case FASTRPC_IOCTL_INVOKE_PERF: + if (!size) + size = sizeof(struct fastrpc_ioctl_invoke_perf); + trace_fastrpc_msg("invoke: begin"); + K_COPY_FROM_USER(err, 0, &p.inv, param, size); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode, + USER_MSG, &p.inv))); + trace_fastrpc_msg("invoke: end"); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_INVOKE2: + K_COPY_FROM_USER(err, 0, &p.inv2, param, + sizeof(struct fastrpc_ioctl_invoke2)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, &p.inv2))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_SETMODE: + err = fastrpc_setmode(ioctl_param, fl); + break; + case FASTRPC_IOCTL_CONTROL: + err = fastrpc_control(&p.cp, param, fl); + break; + case FASTRPC_IOCTL_GETINFO: + K_COPY_FROM_USER(err, 0, &info, param, sizeof(info)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info))); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, &info, sizeof(info)); + if (err) { + err = -EFAULT; + goto bail; + } + break; + case FASTRPC_IOCTL_INIT: + p.init.attrs = 0; + p.init.siglen = 0; + size = sizeof(struct fastrpc_ioctl_init); + fallthrough; + case FASTRPC_IOCTL_INIT_ATTRS: + if (!size) + size = sizeof(struct fastrpc_ioctl_init_attrs); + K_COPY_FROM_USER(err, 0, &p.init, param, size); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_GET_DSP_INFO: + err = fastrpc_get_dsp_info(&p.cap, param, fl); + break; + case FASTRPC_IOCTL_MEM_MAP: + fallthrough; + case FASTRPC_IOCTL_MEM_UNMAP: + fallthrough; + case FASTRPC_IOCTL_MMAP: + fallthrough; + case FASTRPC_IOCTL_MUNMAP: + fallthrough; + case FASTRPC_IOCTL_MMAP_64: + fallthrough; + case FASTRPC_IOCTL_MUNMAP_64: + fallthrough; + case FASTRPC_IOCTL_MUNMAP_FD: + err = fastrpc_mmap_device_ioctl(fl, ioctl_num, &p, param); + break; + + case FASTRPC_IOCTL_DSPSIGNAL_SIGNAL: + K_COPY_FROM_USER(err, 0, &p.sig, param, + sizeof(struct fastrpc_ioctl_dspsignal_signal)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_dspsignal_signal(fl, &p.sig))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_DSPSIGNAL_WAIT: + K_COPY_FROM_USER(err, 0, &p.wait, param, + sizeof(struct fastrpc_ioctl_dspsignal_wait)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_dspsignal_wait(fl, &p.wait))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_DSPSIGNAL_CREATE: + K_COPY_FROM_USER(err, 0, &p.cre, param, + sizeof(struct fastrpc_ioctl_dspsignal_create)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_dspsignal_create(fl, &p.cre))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_DSPSIGNAL_DESTROY: + K_COPY_FROM_USER(err, 0, &p.des, param, + sizeof(struct fastrpc_ioctl_dspsignal_destroy)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_dspsignal_destroy(fl, &p.des))); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_DSPSIGNAL_CANCEL_WAIT: + K_COPY_FROM_USER(err, 0, &p.canc, param, + sizeof(struct fastrpc_ioctl_dspsignal_cancel_wait)); + if (err) { + err = -EFAULT; + goto bail; + } + VERIFY(err, 0 == (err = fastrpc_dspsignal_cancel_wait(fl, &p.canc))); + if (err) + goto bail; + break; + default: + err = -ENOTTY; + pr_info("bad ioctl: %d\n", ioctl_num); + break; + } + bail: + return err; +} + +/* + * fastrpc_smq_ctx_detail : Store smq_invoke_ctx structure parameter. + * Input : + * structure smq_invoke_ctx + * void* mini_dump_buff + */ +static void fastrpc_smq_ctx_detail(struct smq_invoke_ctx *smq_ctx, int cid, void *mini_dump_buff) +{ + int i = 0; + remote_arg64_t *rpra = NULL; + struct fastrpc_mmap *map = NULL; + + if (!smq_ctx) + return; + if (smq_ctx->buf && smq_ctx->buf->virt) + rpra = smq_ctx->buf->virt; + for (i = 0; rpra && + i < (REMOTE_SCALARS_INBUFS(smq_ctx->sc) + REMOTE_SCALARS_OUTBUFS(smq_ctx->sc)); + ++i) { + map = smq_ctx->maps[i]; + if (map) { + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + smq_invoke_ctx_params, fastrpc_mmap_params, + smq_ctx->pid, smq_ctx->tgid, smq_ctx->handle, + smq_ctx->sc, smq_ctx->fl, smq_ctx->fds, + smq_ctx->magic, map->fd, map->flags, map->buf, + map->phys, map->size, map->va, + map->raddr, map->len, map->refs, + map->secure); + } else { + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + smq_invoke_ctx_params, smq_ctx->pid, smq_ctx->tgid, + smq_ctx->handle, smq_ctx->sc, smq_ctx->fl, smq_ctx->fds, + smq_ctx->magic); + } + break; + } +} + +/* + * fastrpc_print_fastrpcbuf : Print fastrpc_buf structure parameter. + * Input : + * structure fastrpc_buf + * void* buffer + */ +static void fastrpc_print_fastrpcbuf(struct fastrpc_buf *buf, void *buffer) +{ + if (!buf || !buffer) + return; + + scnprintf(buffer + strlen(buffer), + MINI_DUMP_DBG_SIZE - strlen(buffer), + fastrpc_buf_params, buf->fl, buf->phys, + buf->virt, buf->size, buf->dma_attr, buf->raddr, + buf->flags, buf->type, buf->in_use); +} + +/* + * fastrpc_print_debug_data : Print debug structure variable in CMA memory. + * Input cid: Channel id + */ +static void fastrpc_print_debug_data(int cid) +{ + unsigned int i = 0, count = 0, gmsg_log_iter = 3, err = 0, len = 0; + unsigned int tx_index = 0, rx_index = 0; + unsigned long flags = 0; + char *gmsg_log_tx = NULL; + char *gmsg_log_rx = NULL; + void *mini_dump_buff = NULL; + struct fastrpc_apps *me = &gfa; + struct smq_invoke_rspv2 *rsp = NULL; + struct fastrpc_file *fl = NULL; + struct fastrpc_channel_ctx *chan = NULL; + struct hlist_node *n = NULL; + struct smq_invoke_ctx *ictx = NULL; + struct fastrpc_tx_msg *tx_msg = NULL; + struct fastrpc_buf *buf = NULL; + struct fastrpc_mmap *map = NULL; + unsigned long irq_flags = 0; + + VERIFY(err, NULL != (gmsg_log_tx = kzalloc(MD_GMSG_BUFFER, GFP_KERNEL))); + if (err) { + err = -ENOMEM; + return; + } + VERIFY(err, NULL != (gmsg_log_rx = kzalloc(MD_GMSG_BUFFER, GFP_KERNEL))); + if (err) { + err = -ENOMEM; + return; + } + chan = &me->channel[cid]; + if ((!chan) || (!chan->buf)) + return; + + mini_dump_buff = chan->buf->virt; + if (!mini_dump_buff) + return; + + if (chan) { + tx_index = chan->gmsg_log.tx_index; + rx_index = chan->gmsg_log.rx_index; + } + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->cid == cid) { + scnprintf(mini_dump_buff + + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - + strlen(mini_dump_buff), + "\nfastrpc_file : %p\n", fl); + scnprintf(mini_dump_buff + + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - + strlen(mini_dump_buff), + fastrpc_file_params, fl->tgid, + fl->cid, fl->ssrcount, fl->pd, + fl->profile, fl->mode, + fl->tgid_open, fl->num_cached_buf, + fl->num_pers_hdrs, fl->sessionid, + fl->servloc_name, fl->file_close, + fl->dsp_proc_init, fl->apps, + fl->qos_request, fl->dev_minor, + fl->debug_buf, + fl->debug_buf_alloced_attempted, + fl->wake_enable, + fl->ws_timeout, + fl->untrusted_process); + scnprintf(mini_dump_buff + + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - + strlen(mini_dump_buff), + "\nSession Maps\n"); + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + scnprintf(mini_dump_buff + + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - + strlen(mini_dump_buff), + fastrpc_mmap_params, + map->fd, + map->flags, map->buf, + map->phys, map->size, + map->va, map->raddr, + map->len, map->refs, + map->secure); + } + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\ncached_bufs\n"); + hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) { + fastrpc_print_fastrpcbuf(buf, mini_dump_buff); + } + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\ninit_mem: %p\n", fl->init_mem); + fastrpc_print_fastrpcbuf(fl->init_mem, mini_dump_buff); + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\npers_hdr_buf: %p\n", fl->pers_hdr_buf); + fastrpc_print_fastrpcbuf(fl->pers_hdr_buf, mini_dump_buff); + snprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\nhdr_bufs: %p\n", fl->hdr_bufs); + fastrpc_print_fastrpcbuf(fl->hdr_bufs, mini_dump_buff); + if (fl->debugfs_file) { + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\nfl->debugfs_file.d_iname : %s\n", + fl->debugfs_file->d_iname); + } + if (fl->sctx) { + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\nfl->sctx->smmu.cb : %d\n", + fl->sctx->smmu.cb); + } + if (fl->secsctx) { + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "\nfl->secsctx->smmu.cb : %d\n", + fl->secsctx->smmu.cb); + } + spin_lock(&fl->hlock); + scnprintf(mini_dump_buff + + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - + strlen(mini_dump_buff), + "\nPending Ctx:\n"); + hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) { + fastrpc_smq_ctx_detail(ictx, + cid, mini_dump_buff); + } + scnprintf(mini_dump_buff + + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - + strlen(mini_dump_buff), + "\nInterrupted Ctx:\n"); + hlist_for_each_entry_safe(ictx, n, + &fl->clst.interrupted, + hn) { + fastrpc_smq_ctx_detail(ictx, + cid, mini_dump_buff); + } + spin_unlock(&fl->hlock); + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + spin_lock_irqsave(&chan->gmsg_log.lock, flags); + if (rx_index) { + for (i = rx_index, count = 0, len = 0 ; i > 0 && + count <= gmsg_log_iter; i--, count++) { + rsp = &chan->gmsg_log.rx_msgs[i].rsp; + len += scnprintf(gmsg_log_rx + len, MD_GMSG_BUFFER - len, + "ctx: 0x%x, retval: %d, flags: %d, early_wake_time: %d, version: %d\n", + rsp->ctx, rsp->retval, rsp->flags, + rsp->early_wake_time, rsp->version); + } + } + if (tx_index) { + for (i = tx_index, count = 0, len = 0; + i > 0 && count <= gmsg_log_iter; + i--, count++) { + tx_msg = &chan->gmsg_log.tx_msgs[i]; + len += scnprintf(gmsg_log_tx + len, MD_GMSG_BUFFER - len, + "pid: %d, tid: %d, ctx: 0x%x, handle: 0x%x, sc: 0x%x, addr: 0x%x, size:%d\n", + tx_msg->msg.pid, + tx_msg->msg.tid, + tx_msg->msg.invoke.header.ctx, + tx_msg->msg.invoke.header.handle, + tx_msg->msg.invoke.header.sc, + tx_msg->msg.invoke.page.addr, + tx_msg->msg.invoke.page.size); + } + } + spin_unlock_irqrestore(&chan->gmsg_log.lock, flags); + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "gmsg_log_tx:\n%s\n", gmsg_log_tx); + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + "gmsg_log_rx:\n %s\n", gmsg_log_rx); + if (chan && chan->buf) + chan->buf->size = strlen(mini_dump_buff); + kfree(gmsg_log_tx); + kfree(gmsg_log_rx); +} + +static int fastrpc_restart_notifier_cb(struct notifier_block *nb, + unsigned long code, + void *data) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_channel_ctx *ctx; + int cid = -1; + + ctx = container_of(nb, struct fastrpc_channel_ctx, nb); + cid = ctx - &me->channel[0]; + switch (code) { + case QCOM_SSR_BEFORE_SHUTDOWN: + fastrpc_rproc_trace_events(gcinfo[cid].subsys, + "QCOM_SSR_BEFORE_SHUTDOWN", "fastrpc_restart_notifier-enter"); + pr_info("adsprpc: %s: %s subsystem is restarting\n", + __func__, gcinfo[cid].subsys); + mutex_lock(&me->channel[cid].smd_mutex); + ctx->ssrcount++; + ctx->issubsystemup = 0; + mutex_unlock(&me->channel[cid].smd_mutex); + if (cid == RH_CID) + me->staticpd_flags = 0; + break; + case QCOM_SSR_AFTER_SHUTDOWN: + fastrpc_rproc_trace_events(gcinfo[cid].subsys, + "QCOM_SSR_AFTER_SHUTDOWN", "fastrpc_restart_notifier-enter"); + pr_info("adsprpc: %s: received RAMDUMP notification for %s\n", + __func__, gcinfo[cid].subsys); + break; + case QCOM_SSR_BEFORE_POWERUP: + fastrpc_rproc_trace_events(gcinfo[cid].subsys, + "QCOM_SSR_BEFORE_POWERUP", "fastrpc_restart_notifier-enter"); + /* Skip ram dump collection in first boot */ + if (cid == CDSP_DOMAIN_ID && dump_enabled() && + ctx->ssrcount) { + mutex_lock(&me->channel[cid].smd_mutex); + fastrpc_print_debug_data(cid); + mutex_unlock(&me->channel[cid].smd_mutex); + fastrpc_ramdump_collection(cid); + } + fastrpc_notify_drivers(me, cid); + break; + case QCOM_SSR_AFTER_POWERUP: + fastrpc_rproc_trace_events(gcinfo[cid].subsys, + "QCOM_SSR_AFTER_POWERUP", "fastrpc_restart_notifier-enter"); + pr_info("adsprpc: %s: %s subsystem is up\n", + __func__, gcinfo[cid].subsys); + ctx->issubsystemup = 1; + break; + default: + break; + } + + fastrpc_rproc_trace_events(dev_name(me->dev), "fastrpc_restart_notifier", "exit"); + return NOTIFY_DONE; +} + + +static void fastrpc_pdr_cb(int state, char *service_path, void *priv) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_static_pd *spd; + int err = 0; + + spd = priv; + VERIFY(err, spd); + if (err) + goto bail; + + switch (state) { + case SERVREG_SERVICE_STATE_DOWN: + pr_info("adsprpc: %s: %s (%s) is down for PDR on %s\n", + __func__, spd->spdname, + spd->servloc_name, + gcinfo[spd->cid].subsys); + mutex_lock(&me->channel[spd->cid].smd_mutex); + spd->pdrcount++; + atomic_set(&spd->ispdup, 0); + mutex_unlock(&me->channel[spd->cid].smd_mutex); + if (!strcmp(spd->servloc_name, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME)) + me->staticpd_flags = 0; + + fastrpc_notify_pdr_drivers(me, spd->servloc_name); + break; + case SERVREG_SERVICE_STATE_UP: + pr_info("adsprpc: %s: %s (%s) is up for PDR on %s\n", + __func__, spd->spdname, + spd->servloc_name, + gcinfo[spd->cid].subsys); + atomic_set(&spd->ispdup, 1); + wake_up_interruptible(&spd->wait_for_pdup); + break; + default: + break; + } +bail: + if (err) { + pr_err("adsprpc: %s: failed for path %s, state %d, spd %pK\n", + __func__, service_path, state, spd); + } +} + +static const struct file_operations fops = { + .open = fastrpc_device_open, + .release = fastrpc_device_release, + .unlocked_ioctl = fastrpc_device_ioctl, +/* Only DSP service 64-bit app will interface with fastrpc TVM driver. + * There is not need to support 32-bit fastrpc driver on TVM. + */ +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + .compat_ioctl = NULL, +#else + .compat_ioctl = compat_fastrpc_device_ioctl, +#endif +}; + +static const struct of_device_id fastrpc_match_table[] = { + { .compatible = "qcom,msm-fastrpc-adsp", }, + { .compatible = "qcom,msm-fastrpc-compute", }, + { .compatible = "qcom,msm-fastrpc-compute-cb", }, + { .compatible = "qcom,msm-adsprpc-mem-region", }, + {} +}; + +static int fastrpc_cb_probe(struct device *dev) +{ + struct fastrpc_channel_ctx *chan; + struct fastrpc_session_ctx *sess; + struct of_phandle_args iommuspec; + struct fastrpc_apps *me = &gfa; + const char *name; + int err = 0, cid = -1, i = 0; + u32 sharedcb_count = 0, j = 0; + uint32_t dma_addr_pool[2] = {0, 0}; + + VERIFY(err, NULL != (name = of_get_property(dev->of_node, + "label", NULL))); + if (err) { + err = -EINVAL; + goto bail; + } + + for (i = 0; i < NUM_CHANNELS; i++) { + if (!gcinfo[i].name) + continue; + if (!strcmp(name, gcinfo[i].name)) + break; + } + VERIFY(err, i < NUM_CHANNELS); + if (err) { + err = -ECHRNG; + goto bail; + } + cid = i; + chan = &gcinfo[i]; + VERIFY(err, chan->sesscount < NUM_SESSIONS); + if (err) { + err = -EINVAL; + goto bail; + } + err = of_parse_phandle_with_args(dev->of_node, "iommus", + "#iommu-cells", 0, &iommuspec); + if (err) { + pr_err("Error: adsprpc: %s: parsing iommu arguments failed for %s with err %d\n", + __func__, dev_name(dev), err); + goto bail; + } + sess = &chan->session[chan->sesscount]; + sess->used = 0; + sess->smmu.coherent = of_property_read_bool(dev->of_node, + "dma-coherent"); + sess->smmu.secure = of_property_read_bool(dev->of_node, + "qcom,secure-context-bank"); + sess->smmu.cb = iommuspec.args[0] & 0xf; + sess->smmu.dev = dev; + sess->smmu.dev_name = dev_name(dev); + sess->smmu.enabled = 1; + + if (!sess->smmu.dev->dma_parms) + sess->smmu.dev->dma_parms = devm_kzalloc(sess->smmu.dev, + sizeof(*sess->smmu.dev->dma_parms), GFP_KERNEL); + + dma_set_max_seg_size(sess->smmu.dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(sess->smmu.dev, (unsigned long)DMA_BIT_MASK(64)); + + of_property_read_u32_array(dev->of_node, "qcom,iommu-dma-addr-pool", + dma_addr_pool, 2); + me->max_size_limit = (dma_addr_pool[1] == 0 ? 0x78000000 : + dma_addr_pool[1]); + + if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { + err = of_property_read_u32(dev->of_node, "shared-cb", + &sharedcb_count); + if (err) + goto bail; + if (sharedcb_count > 0) { + struct fastrpc_session_ctx *dup_sess; + + for (j = 1; j < sharedcb_count && + chan->sesscount < NUM_SESSIONS; j++) { + chan->sesscount++; + dup_sess = &chan->session[chan->sesscount]; + memcpy(dup_sess, sess, + sizeof(struct fastrpc_session_ctx)); + } + } + } + + chan->sesscount++; + if (debugfs_root && !debugfs_global_file) { + debugfs_global_file = debugfs_create_file("global", 0644, + debugfs_root, NULL, &debugfs_fops); + if (IS_ERR_OR_NULL(debugfs_global_file)) { + pr_warn("Error: %s: %s: failed to create debugfs global file\n", + current->comm, __func__); + debugfs_global_file = NULL; + } + } +bail: + return err; +} + +static void init_secure_vmid_list(struct device *dev, char *prop_name, + struct secure_vm *destvm) +{ + int err = 0; + u32 len = 0, i = 0; + u32 *rhvmlist = NULL; + u32 *rhvmpermlist = NULL; + + if (!of_find_property(dev->of_node, prop_name, &len)) + goto bail; + if (len == 0) + goto bail; + len /= sizeof(u32); + VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL))); + if (err) + goto bail; + VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32), + GFP_KERNEL))); + if (err) + goto bail; + for (i = 0; i < len; i++) { + err = of_property_read_u32_index(dev->of_node, prop_name, i, + &rhvmlist[i]); + if (err) { + pr_err("Error: adsprpc: %s: failed to read VMID\n", + __func__); + goto bail; + } + ADSPRPC_INFO("secure VMID = %d\n", + rhvmlist[i]); + rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC; + } + destvm->vmid = rhvmlist; + destvm->vmperm = rhvmpermlist; + destvm->vmcount = len; +bail: + if (err) { + kfree(rhvmlist); + kfree(rhvmpermlist); + } +} + +static void init_qos_cores_list(struct device *dev, char *prop_name, + struct qos_cores *silvercores) +{ + int err = 0; + u32 len = 0, i = 0; + u32 *coreslist = NULL; + + if (!of_find_property(dev->of_node, prop_name, &len)) + goto bail; + if (len == 0) + goto bail; + len /= sizeof(u32); + VERIFY(err, NULL != (coreslist = kcalloc(len, sizeof(u32), + GFP_KERNEL))); + if (err) + goto bail; + for (i = 0; i < len; i++) { + err = of_property_read_u32_index(dev->of_node, prop_name, i, + &coreslist[i]); + if (err) { + pr_err("adsprpc: %s: failed to read QOS cores list\n", + __func__); + goto bail; + } + } + silvercores->coreno = coreslist; + silvercores->corecount = len; +bail: + if (err) + kfree(coreslist); + +} + +static void fastrpc_init_privileged_gids(struct device *dev, char *prop_name, + struct gid_list *gidlist) +{ + int err = 0; + u32 len = 0, i = 0; + u32 *gids = NULL; + + if (!of_find_property(dev->of_node, prop_name, &len)) + goto bail; + if (len == 0) + goto bail; + len /= sizeof(u32); + gids = kcalloc(len, sizeof(u32), GFP_KERNEL); + if (!gids) { + err = ENOMEM; + goto bail; + } + for (i = 0; i < len; i++) { + err = of_property_read_u32_index(dev->of_node, prop_name, + i, &gids[i]); + if (err) { + pr_err("Error: adsprpc: %s: failed to read GID %u\n", + __func__, i); + goto bail; + } + pr_info("adsprpc: %s: privileged GID: %u\n", __func__, gids[i]); + } + sort(gids, len, sizeof(*gids), uint_cmp_func, NULL); + gidlist->gids = gids; + gidlist->gidcount = len; +bail: + if (err) + kfree(gids); +} + +static void configure_secure_channels(uint32_t secure_domains) +{ + struct fastrpc_apps *me = &gfa; + int ii = 0; + /* + * secure_domains contains the bitmask of the secure channels + * Bit 0 - ADSP + * Bit 1 - MDSP + * Bit 2 - SLPI + * Bit 3 - CDSP + */ + for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) { + int secure = (secure_domains >> ii) & 0x01; + + me->channel[ii].secure = secure; + ADSPRPC_INFO("domain %d configured as secure %d\n", ii, secure); + } +} + +/* + * This function is used to create the service locator required for + * registering for remote process restart (PDR) notifications if that + * PDR property has been enabled in the fastrpc node on the DTSI. + */ +static int fastrpc_setup_service_locator(struct device *dev, + const char *propname, + char *client_name, char *service_name, + char *service_path) +{ + int err = 0, session = -1, cid = -1; + struct fastrpc_apps *me = &gfa; + struct pdr_handle *handle = NULL; + struct pdr_service *service = NULL; + + if (of_property_read_bool(dev->of_node, propname)) { + err = fastrpc_get_spd_session(client_name, &session, &cid); + if (err) + goto bail; + /* Register the service locator's callback function */ + handle = pdr_handle_alloc(fastrpc_pdr_cb, &me->channel[cid].spd[session]); + if (IS_ERR_OR_NULL(handle)) { + err = PTR_ERR(handle); + goto bail; + } + me->channel[cid].spd[session].pdrhandle = handle; + service = pdr_add_lookup(handle, service_name, service_path); + if (IS_ERR_OR_NULL(service)) { + err = PTR_ERR(service); + goto bail; + } + pr_info("adsprpc: %s: pdr_add_lookup enabled for %s (%s, %s), DTSI (%s)\n", + __func__, service_name, client_name, service_path, propname); + } + +bail: + if (err) { + pr_warn("adsprpc: %s: failed for %s (%s, %s), DTSI (%s) with err %d\n", + __func__, service_name, client_name, service_path, propname, err); + } + return err; +} + +/* + * remote_cdsp_status_show - Updates the buffer with remote cdsp status + * by reading the fastrpc node. + * @dev : pointer to device node. + * @attr: pointer to device attribute. + * @buf : Output parameter to be updated with remote cdsp status. + * Return : bytes written to buffer. + */ + +static ssize_t remote_cdsp_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fastrpc_apps *me = &gfa; + + /* + * Default remote DSP status: 0 + * driver possibly not probed yet or not the main device. + */ + + if (!dev || !dev->driver || + !of_device_is_compatible(dev->of_node, "qcom,msm-fastrpc-compute")) { + ADSPRPC_ERR("Driver not probed yet or not the main device\n"); + return 0; + } + + return scnprintf(buf, PAGE_SIZE, "%d", + me->remote_cdsp_status); +} + +/* Remote cdsp status attribute declaration as read only */ +static DEVICE_ATTR_RO(remote_cdsp_status); + +/* Declaring attribute for remote dsp */ +static struct attribute *msm_remote_dsp_attrs[] = { + &dev_attr_remote_cdsp_status.attr, + NULL +}; + +/* Defining remote dsp attributes in attributes group */ +static struct attribute_group msm_remote_dsp_attr_group = { + .attrs = msm_remote_dsp_attrs, +}; + +static int fastrpc_probe(struct platform_device *pdev) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + struct device *dev = &pdev->dev; + int ret = 0; + uint32_t secure_domains = 0; + + if (of_device_is_compatible(dev->of_node, + "qcom,msm-fastrpc-compute")) { + err = sysfs_create_group(&pdev->dev.kobj, &msm_remote_dsp_attr_group); + if (err) { + ADSPRPC_ERR( + "Initialization of sysfs create group failed with %d\n", + err); + goto bail; + } + init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid", + &gcinfo[0].rhvm); + fastrpc_init_privileged_gids(dev, "qcom,fastrpc-gids", + &me->gidlist); + init_qos_cores_list(dev, "qcom,qos-cores", + &me->silvercores); + + of_property_read_u32(dev->of_node, "qcom,rpc-latency-us", + &me->latency); + if (of_get_property(dev->of_node, + "qcom,secure-domains", NULL) != NULL) { + VERIFY(err, !of_property_read_u32(dev->of_node, + "qcom,secure-domains", + &secure_domains)); + if (!err) + configure_secure_channels(secure_domains); + else + pr_info("adsprpc: unable to read the domain configuration from dts\n"); + } + } + if (of_device_is_compatible(dev->of_node, + "qcom,msm-fastrpc-compute-cb")) + return fastrpc_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, + "qcom,msm-adsprpc-mem-region")) { + me->dev = dev; + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0); + if (ret) { + pr_warn("adsprpc: Error: %s: initialization of memory region adsp_mem failed with %d\n", + __func__, ret); + } + goto bail; + } + me->legacy_remote_heap = of_property_read_bool(dev->of_node, + "qcom,fastrpc-legacy-remote-heap"); + + err = fastrpc_setup_service_locator(dev, AUDIO_PDR_ADSP_DTSI_PROPERTY_NAME, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, + AUDIO_PDR_ADSP_SERVICE_NAME, ADSP_AUDIOPD_NAME); + if (err) + goto bail; + + err = fastrpc_setup_service_locator(dev, SENSORS_PDR_ADSP_DTSI_PROPERTY_NAME, + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME, + SENSORS_PDR_ADSP_SERVICE_NAME, ADSP_SENSORPD_NAME); + if (err) + goto bail; + + err = fastrpc_setup_service_locator(dev, SENSORS_PDR_SLPI_DTSI_PROPERTY_NAME, + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME, + SENSORS_PDR_SLPI_SERVICE_NAME, SLPI_SENSORPD_NAME); + if (err) + goto bail; + + err = of_platform_populate(pdev->dev.of_node, + fastrpc_match_table, + NULL, &pdev->dev); + if (err) + goto bail; +bail: + return err; +} + +static void fastrpc_deinit(void) +{ + struct fastrpc_channel_ctx *chan = gcinfo; + struct fastrpc_apps *me = &gfa; + int i, j; + + for (i = 0; i < NUM_CHANNELS; i++, chan++) { + for (j = 0; j < NUM_SESSIONS; j++) { + struct fastrpc_session_ctx *sess = &chan->session[j]; + + if (sess->smmu.dev) + sess->smmu.dev = NULL; + } + kfree(chan->rhvm.vmid); + kfree(chan->rhvm.vmperm); + fastrpc_transport_session_deinit(i); + mutex_destroy(&chan->smd_mutex); + } + if (me->transport_initialized) + fastrpc_transport_deinit(); + me->transport_initialized = 0; + mutex_destroy(&me->mut_uid); +} + +static struct platform_driver fastrpc_driver = { + .probe = fastrpc_probe, + .driver = { + .name = "fastrpc", + .of_match_table = fastrpc_match_table, + .suppress_bind_attrs = true, + }, +}; + +union fastrpc_dev_param { + struct fastrpc_dev_map_dma *map; + struct fastrpc_dev_unmap_dma *unmap; +}; + +long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, + unsigned long invoke_param) +{ + int err = 0; + union fastrpc_dev_param p; + struct fastrpc_file *fl = NULL; + struct fastrpc_mmap *map = NULL; + struct fastrpc_apps *me = &gfa; + uintptr_t raddr = 0; + unsigned long irq_flags = 0; + + switch (invoke_num) { + case FASTRPC_DEV_MAP_DMA: + p.map = (struct fastrpc_dev_map_dma *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + break; + } + fl = dev->fl; + spin_lock(&fl->hlock); + /* Verify if fastrpc file is being closed, holding device lock*/ + if (fl->file_close) { + err = -ESRCH; + spin_unlock(&fl->hlock); + spin_unlock_irqrestore(&me->hlock, irq_flags); + break; + } + spin_unlock(&fl->hlock); + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->internal_map_mutex); + mutex_lock(&fl->map_mutex); + /* Map DMA buffer on SMMU device*/ + err = fastrpc_mmap_create(fl, -1, p.map->buf, + p.map->attrs, 0, p.map->size, + ADSP_MMAP_DMA_BUFFER, &map); + mutex_unlock(&fl->map_mutex); + if (err) { + mutex_unlock(&fl->internal_map_mutex); + break; + } + /* Map DMA buffer on DSP*/ + VERIFY(err, 0 == (err = fastrpc_mmap_on_dsp(fl, + map->flags, 0, map->phys, map->size, map->refs, &raddr))); + if (err) { + mutex_unlock(&fl->internal_map_mutex); + break; + } + map->raddr = raddr; + mutex_unlock(&fl->internal_map_mutex); + p.map->v_dsp_addr = raddr; + break; + case FASTRPC_DEV_UNMAP_DMA: + p.unmap = (struct fastrpc_dev_unmap_dma *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + break; + } + fl = dev->fl; + spin_lock(&fl->hlock); + /* Verify if fastrpc file is being closed, holding device lock*/ + if (fl->file_close) { + err = -ESRCH; + spin_unlock(&fl->hlock); + spin_unlock_irqrestore(&me->hlock, irq_flags); + break; + } + spin_unlock(&fl->hlock); + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->internal_map_mutex); + mutex_lock(&fl->map_mutex); + if (!fastrpc_mmap_find(fl, -1, p.unmap->buf, 0, 0, ADSP_MMAP_DMA_BUFFER, 0, &map)) { + /* Un-map DMA buffer on DSP*/ + mutex_unlock(&fl->map_mutex); + VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, + map->phys, map->size, map->flags))); + if (err) { + mutex_unlock(&fl->internal_map_mutex); + break; + } + fastrpc_mmap_free(map, 0); + } + mutex_unlock(&fl->map_mutex); + mutex_unlock(&fl->internal_map_mutex); + break; + default: + err = -ENOTTY; + break; + } + return err; +} +EXPORT_SYMBOL(fastrpc_driver_invoke); + +static struct device fastrpc_bus = { + .init_name = "fastrpc" +}; + +static int fastrpc_bus_match(struct device *dev, struct device_driver *driver) +{ + struct fastrpc_driver *frpc_driver = to_fastrpc_driver(driver); + struct fastrpc_device *frpc_device = to_fastrpc_device(dev); + + if (frpc_device->handle == frpc_driver->handle) + return 1; + return 0; +} + +static int fastrpc_bus_probe(struct device *dev) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_device *frpc_dev = to_fastrpc_device(dev); + struct fastrpc_driver *frpc_drv = to_fastrpc_driver(dev->driver); + unsigned long irq_flags = 0; + + if (frpc_drv && frpc_drv->probe) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (frpc_dev->dev_close) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + return 0; + } + frpc_dev->refs++; + frpc_drv->device = dev; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return frpc_drv->probe(frpc_dev); + } + + return 0; +} + +static void fastrpc_bus_remove(struct device *dev) +{ + struct fastrpc_driver *frpc_drv = to_fastrpc_driver(dev->driver); + + if (frpc_drv && frpc_drv->callback) + frpc_drv->callback(to_fastrpc_device(dev), FASTRPC_PROC_DOWN); +} + +static struct bus_type fastrpc_bus_type = { + .name = "fastrpc", + .match = fastrpc_bus_match, + .probe = fastrpc_bus_probe, + .remove = fastrpc_bus_remove, +}; + +static void fastrpc_dev_release(struct device *dev) +{ + kfree(to_fastrpc_device(dev)); +} + +static int fastrpc_device_create(struct fastrpc_file *fl) +{ + int err = 0; + struct fastrpc_device *frpc_dev; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + frpc_dev = kzalloc(sizeof(*frpc_dev), GFP_KERNEL); + if (!frpc_dev) { + err = -ENOMEM; + goto bail; + } + + frpc_dev->dev.parent = &fastrpc_bus; + frpc_dev->dev.bus = &fastrpc_bus_type; + + dev_set_name(&frpc_dev->dev, "%s-%d-%d", + dev_name(frpc_dev->dev.parent), fl->tgid, fl->cid); + frpc_dev->dev.release = fastrpc_dev_release; + frpc_dev->fl = fl; + frpc_dev->handle = fl->tgid; + + err = device_register(&frpc_dev->dev); + if (err) { + put_device(&frpc_dev->dev); + ADSPRPC_ERR("fastrpc device register failed for process %d with error %d\n", + fl->tgid, err); + goto bail; + } + fl->device = frpc_dev; + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_add_head(&frpc_dev->hn, &me->frpc_devices); + spin_unlock_irqrestore(&me->hlock, irq_flags); +bail: + return err; +} + +void fastrpc_driver_unregister(struct fastrpc_driver *frpc_driver) +{ + struct fastrpc_apps *me = &gfa; + struct device *dev = NULL; + struct fastrpc_device *frpc_dev = NULL; + bool is_device_closed = false; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + dev = frpc_driver->device; + if (dev) { + frpc_dev = to_fastrpc_device(dev); + if (frpc_dev->refs > 0) + frpc_dev->refs--; + else + ADSPRPC_ERR("Fastrpc device for driver %s is already freed\n", + frpc_driver->driver.name); + if (frpc_dev->dev_close) { + hlist_del_init(&frpc_dev->hn); + is_device_closed = true; + } + } + hlist_del_init(&frpc_driver->hn); + spin_unlock_irqrestore(&me->hlock, irq_flags); + if (is_device_closed) { + ADSPRPC_INFO("un-registering fastrpc device with handle %d\n", + frpc_dev->handle); + device_unregister(dev); + } + driver_unregister(&frpc_driver->driver); + ADSPRPC_INFO("Un-registering fastrpc driver %s with handle %d\n", + frpc_driver->driver.name, frpc_driver->handle); +} +EXPORT_SYMBOL(fastrpc_driver_unregister); + +int fastrpc_driver_register(struct fastrpc_driver *frpc_driver) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + frpc_driver->driver.bus = &fastrpc_bus_type; + frpc_driver->driver.owner = THIS_MODULE; + err = driver_register(&frpc_driver->driver); + if (err) { + ADSPRPC_ERR("fastrpc driver %s failed to register with error %d\n", + frpc_driver->driver.name, err); + goto bail; + } + ADSPRPC_INFO("fastrpc driver %s registered with handle %d\n", + frpc_driver->driver.name, frpc_driver->handle); + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_add_head(&frpc_driver->hn, &me->frpc_drivers); + spin_unlock_irqrestore(&me->hlock, irq_flags); + +bail: + return err; +} +EXPORT_SYMBOL(fastrpc_driver_register); + +static int __init fastrpc_device_init(void) +{ + struct fastrpc_apps *me = &gfa; + int err = 0, i; + uintptr_t attr = 0; + dma_addr_t region_phys = 0; + void *region_vaddr = NULL; + struct fastrpc_buf *buf = NULL; + + debugfs_root = debugfs_create_dir("adsprpc", NULL); + if (IS_ERR_OR_NULL(debugfs_root)) { + pr_warn("Error: %s: %s: failed to create debugfs root dir\n", + current->comm, __func__); + debugfs_remove_recursive(debugfs_root); + debugfs_root = NULL; + } + memset(me, 0, sizeof(*me)); + fastrpc_init(me); + fastrpc_get_dsp_status(me); + me->dev = NULL; + me->legacy_remote_heap = false; + err = bus_register(&fastrpc_bus_type); + if (err) { + ADSPRPC_ERR("fastrpc bus register failed with err %d\n", + err); + goto bus_register_bail; + } + err = device_register(&fastrpc_bus); + if (err) { + ADSPRPC_ERR("fastrpc bus device register failed with err %d\n", + err); + goto bus_device_register_bail; + } + me->fastrpc_bus_register = true; + VERIFY(err, 0 == platform_driver_register(&fastrpc_driver)); + if (err) + goto register_bail; + VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS, + DEVICE_NAME)); + if (err) + goto alloc_chrdev_bail; + cdev_init(&me->cdev, &fops); + me->cdev.owner = THIS_MODULE; + VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), + NUM_DEVICES)); + if (err) + goto cdev_init_bail; + me->class = class_create(THIS_MODULE, "fastrpc"); + VERIFY(err, !IS_ERR(me->class)); + if (err) + goto class_create_bail; + me->compat = (fops.compat_ioctl == NULL) ? 0 : 1; + + /* + * Create devices and register with sysfs + * Create first device with minor number 0 + */ + me->non_secure_dev = device_create(me->class, NULL, + MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV), + NULL, DEVICE_NAME); + VERIFY(err, !IS_ERR_OR_NULL(me->non_secure_dev)); + if (err) { + err = -ENODEV; + goto device_create_bail; + } + + /* Create secure device with minor number for secure device */ + me->secure_dev = device_create(me->class, NULL, + MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV), + NULL, DEVICE_NAME_SECURE); + VERIFY(err, !IS_ERR_OR_NULL(me->secure_dev)); + if (err) + goto device_create_bail; + + for (i = 0; i < NUM_CHANNELS; i++) { + me->jobid[i] = 1; + me->channel[i].dev = me->secure_dev; + me->channel[i].ssrcount = 0; + me->channel[i].prevssrcount = 0; + me->channel[i].issubsystemup = 1; + me->channel[i].rh_dump_dev = NULL; + me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb; + me->channel[i].handle = qcom_register_ssr_notifier( + gcinfo[i].subsys, + &me->channel[i].nb); + if (i == CDSP_DOMAIN_ID) { + me->channel[i].dev = me->non_secure_dev; + err = fastrpc_alloc_cma_memory(®ion_phys, + ®ion_vaddr, + MINI_DUMP_DBG_SIZE, + (unsigned long)attr); + if (err) + ADSPRPC_WARN("%s: CMA alloc failed err 0x%x\n", + __func__, err); + VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL))); + if (err) { + err = -ENOMEM; + ADSPRPC_WARN("%s: CMA alloc failed err 0x%x\n", + __func__, err); + } + INIT_HLIST_NODE(&buf->hn); + buf->virt = region_vaddr; + buf->phys = (uintptr_t)region_phys; + buf->size = MINI_DUMP_DBG_SIZE; + buf->dma_attr = attr; + buf->raddr = 0; + ktime_get_real_ts64(&buf->buf_start_time); + me->channel[i].buf = buf; + } + if (IS_ERR_OR_NULL(me->channel[i].handle)) + pr_warn("adsprpc: %s: SSR notifier register failed for %s with err %d\n", + __func__, gcinfo[i].subsys, + PTR_ERR(me->channel[i].handle)); + else + pr_info("adsprpc: %s: SSR notifier registered for %s\n", + __func__, gcinfo[i].subsys); + } + + err = fastrpc_transport_init(); + if (err) + goto device_create_bail; + me->transport_initialized = 1; + + fastrpc_register_wakeup_source(me->non_secure_dev, + FASTRPC_NON_SECURE_WAKE_SOURCE_CLIENT_NAME, + &me->wake_source); + fastrpc_register_wakeup_source(me->secure_dev, + FASTRPC_SECURE_WAKE_SOURCE_CLIENT_NAME, + &me->wake_source_secure); + + return 0; +device_create_bail: + for (i = 0; i < NUM_CHANNELS; i++) { + if (me->channel[i].handle) + qcom_unregister_ssr_notifier(me->channel[i].handle, + &me->channel[i].nb); + } + if (!IS_ERR_OR_NULL(me->non_secure_dev)) + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), + MINOR_NUM_DEV)); + if (!IS_ERR_OR_NULL(me->secure_dev)) + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), + MINOR_NUM_SECURE_DEV)); + class_destroy(me->class); +class_create_bail: + cdev_del(&me->cdev); +cdev_init_bail: + unregister_chrdev_region(me->dev_no, NUM_CHANNELS); +alloc_chrdev_bail: + platform_driver_unregister(&fastrpc_driver); +register_bail: + device_unregister(&fastrpc_bus); +bus_device_register_bail: + bus_unregister(&fastrpc_bus_type); +bus_register_bail: + fastrpc_deinit(); + return err; +} + +static void __exit fastrpc_device_exit(void) +{ + struct fastrpc_apps *me = &gfa; + int i; + + fastrpc_file_list_dtor(me); + fastrpc_deinit(); + wakeup_source_unregister(me->wake_source); + wakeup_source_unregister(me->wake_source_secure); + for (i = 0; i < NUM_CHANNELS; i++) { + if (i == CDSP_DOMAIN_ID) + kfree(me->channel[i].buf); + if (!gcinfo[i].name) + continue; + qcom_unregister_ssr_notifier(me->channel[i].handle, + &me->channel[i].nb); + } + + /* Destroy the secure and non secure devices */ + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV)); + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), + MINOR_NUM_SECURE_DEV)); + + of_reserved_mem_device_release(me->dev); + class_destroy(me->class); + cdev_del(&me->cdev); + unregister_chrdev_region(me->dev_no, NUM_CHANNELS); + if (me->transport_initialized) + fastrpc_transport_deinit(); + me->transport_initialized = 0; + if (me->fastrpc_bus_register) { + bus_unregister(&fastrpc_bus_type); + device_unregister(&fastrpc_bus); + } + kfree(me->gidlist.gids); + debugfs_remove_recursive(debugfs_root); +} + +module_init(fastrpc_device_init); +module_exit(fastrpc_device_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/dsp/adsprpc_compat.c b/dsp/adsprpc_compat.c new file mode 100644 index 0000000000..2f8cb0b18c --- /dev/null +++ b/dsp/adsprpc_compat.c @@ -0,0 +1,1081 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include +#include + +#include "adsprpc_compat.h" +#include "adsprpc_shared.h" + +#define COMPAT_FASTRPC_IOCTL_INVOKE \ + _IOWR('R', 1, struct compat_fastrpc_ioctl_invoke) +#define COMPAT_FASTRPC_IOCTL_MMAP \ + _IOWR('R', 2, struct compat_fastrpc_ioctl_mmap) +#define COMPAT_FASTRPC_IOCTL_MUNMAP \ + _IOWR('R', 3, struct compat_fastrpc_ioctl_munmap) +#define COMPAT_FASTRPC_IOCTL_INVOKE_FD \ + _IOWR('R', 4, struct compat_fastrpc_ioctl_invoke_fd) +#define COMPAT_FASTRPC_IOCTL_INIT \ + _IOWR('R', 6, struct compat_fastrpc_ioctl_init) +#define COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS \ + _IOWR('R', 7, struct compat_fastrpc_ioctl_invoke_attrs) +#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \ + _IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs) +#define COMPAT_FASTRPC_IOCTL_INVOKE_CRC \ + _IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc) +#define COMPAT_FASTRPC_IOCTL_CONTROL \ + _IOWR('R', 12, struct compat_fastrpc_ioctl_control) +#define COMPAT_FASTRPC_IOCTL_MMAP_64 \ + _IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64) +#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \ + _IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64) +#define COMPAT_FASTRPC_IOCTL_GET_DSP_INFO \ + _IOWR('R', 17, \ + struct compat_fastrpc_ioctl_capability) +#define COMPAT_FASTRPC_IOCTL_INVOKE2 \ + _IOWR('R', 18, struct compat_fastrpc_ioctl_invoke2) +#define COMPAT_FASTRPC_IOCTL_MEM_MAP \ + _IOWR('R', 19, struct compat_fastrpc_ioctl_mem_map) +#define COMPAT_FASTRPC_IOCTL_MEM_UNMAP \ + _IOWR('R', 20, struct compat_fastrpc_ioctl_mem_unmap) +#define COMPAT_FASTRPC_IOCTL_INVOKE_PERF \ + _IOWR('R', 21, struct compat_fastrpc_ioctl_invoke_perf) +#define COMPAT_FASTRPC_IOCTL_DSPSIGNAL_CREATE \ + _IOWR('R', 23, struct fastrpc_ioctl_dspsignal_create) +#define COMPAT_FASTRPC_IOCTL_DSPSIGNAL_DESTROY \ + _IOWR('R', 24, struct fastrpc_ioctl_dspsignal_destroy) +#define COMPAT_FASTRPC_IOCTL_DSPSIGNAL_SIGNAL \ + _IOWR('R', 25, struct fastrpc_ioctl_dspsignal_signal) +#define COMPAT_FASTRPC_IOCTL_DSPSIGNAL_WAIT \ + _IOWR('R', 26, struct fastrpc_ioctl_dspsignal_wait) +#define COMPAT_FASTRPC_IOCTL_DSPSIGNAL_CANCEL_WAIT \ + _IOWR('R', 27, struct fastrpc_ioctl_dspsignal_cancel_wait) + +struct compat_remote_buf { + compat_uptr_t pv; /* buffer pointer */ + compat_size_t len; /* length of buffer */ +}; + +union compat_remote_arg { + struct compat_remote_buf buf; + compat_uint_t h; +}; + +struct compat_fastrpc_ioctl_invoke { + compat_uint_t handle; /* remote handle */ + compat_uint_t sc; /* scalars describing the data */ + compat_uptr_t pra; /* remote arguments list */ +}; + +struct compat_fastrpc_ioctl_invoke_fd { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; /* fd list */ +}; + +struct compat_fastrpc_ioctl_invoke_attrs { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; /* fd list */ + compat_uptr_t attrs; /* attribute list */ +}; + +struct compat_fastrpc_ioctl_invoke_crc { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; /* fd list */ + compat_uptr_t attrs; /* attribute list */ + compat_uptr_t crc; /* crc list */ +}; + +struct compat_fastrpc_ioctl_invoke_perf { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; + compat_uptr_t attrs; + compat_uptr_t crc; + compat_uptr_t perf_kernel; + compat_uptr_t perf_dsp; +}; + +struct compat_fastrpc_ioctl_invoke_async { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; /* fd list */ + compat_uptr_t attrs; /* attribute list */ + compat_uptr_t crc; /* crc list */ + compat_uptr_t perf_kernel; /* Kernel perf data pointer */ + compat_uptr_t perf_dsp; /* DSP perf data pointer */ + compat_uptr_t job; /* Async job */ +}; +struct compat_fastrpc_ioctl_invoke_async_no_perf { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; /* fd list */ + compat_uptr_t attrs; /* attribute list */ + compat_uptr_t crc; /* crc list */ + compat_uptr_t job; /* Async job */ +}; + +struct compat_fastrpc_ioctl_invoke2 { + compat_uint_t req; /* type of invocation request */ + compat_uptr_t invparam; /* invocation request param */ + compat_uint_t size; /* size of invocation param */ + compat_int_t err; /* reserved */ +}; +struct compat_fastrpc_ioctl_async_response { + compat_u64 jobid; /* job id generated by user */ + compat_int_t result; /* result from DSP */ + compat_uptr_t perf_kernel; /* Kernel perf data pointer */ + compat_uptr_t perf_dsp; /* DSP perf data pointer */ + compat_uint_t handle; + compat_uint_t sc; +}; + +struct compat_fastrpc_ioctl_notif_rsp { + compat_int_t domain; /* Domain of User PD */ + compat_int_t session; /* Session ID of User PD */ + compat_uint_t status; /* Status of the process */ +}; + +struct compat_fastrpc_mem_map { + compat_int_t fd; /* ion fd */ + compat_int_t offset; /* buffer offset */ + compat_uint_t flags; /* flags to control memory map */ + compat_uint_t attrs; /* buffer attributes used for SMMU mapping */ + compat_uptr_t vaddrin; /* virtual address */ + compat_size_t length; /* buffer length */ + compat_u64 vaddrout; /* dsp virtual address */ +}; + +struct compat_fastrpc_ioctl_mem_map { + compat_int_t version; + union { + struct compat_fastrpc_mem_map m; + compat_int_t reserved[MAP_RESERVED_NUM]; + }; +}; + +struct compat_fastrpc_mem_unmap { + compat_int_t fd; /* ion fd */ + compat_u64 vaddr; /* dsp virtual address */ + compat_size_t length; /* buffer length */ +}; + +struct compat_fastrpc_ioctl_mem_unmap { + compat_int_t version; + union { + struct compat_fastrpc_mem_unmap um; + compat_int_t reserved[UNMAP_RESERVED_NUM]; + }; +}; + +struct compat_fastrpc_ioctl_mmap { + compat_int_t fd; /* ion fd */ + compat_uint_t flags; /* flags for dsp to map with */ + compat_uptr_t vaddrin; /* optional virtual address */ + compat_size_t size; /* size */ + compat_uptr_t vaddrout; /* dsps virtual address */ +}; + +struct compat_fastrpc_ioctl_mmap_64 { + compat_int_t fd; /* ion fd */ + compat_uint_t flags; /* flags for dsp to map with */ + compat_u64 vaddrin; /* optional virtual address */ + compat_size_t size; /* size */ + compat_u64 vaddrout; /* dsps virtual address */ +}; + +struct compat_fastrpc_ioctl_munmap { + compat_uptr_t vaddrout; /* address to unmap */ + compat_size_t size; /* size */ +}; + +struct compat_fastrpc_ioctl_munmap_64 { + compat_u64 vaddrout; /* address to unmap */ + compat_size_t size; /* size */ +}; + +struct compat_fastrpc_ioctl_init { + compat_uint_t flags; /* one of FASTRPC_INIT_* macros */ + compat_uptr_t file; /* pointer to elf file */ + compat_int_t filelen; /* elf file length */ + compat_int_t filefd; /* ION fd for the file */ + compat_uptr_t mem; /* mem for the PD */ + compat_int_t memlen; /* mem length */ + compat_int_t memfd; /* ION fd for the mem */ +}; + +struct compat_fastrpc_ioctl_init_attrs { + struct compat_fastrpc_ioctl_init init; + compat_int_t attrs; /* attributes to init process */ + compat_int_t siglen; /* test signature file length */ +}; + +#define FASTRPC_CONTROL_LATENCY (1) +struct compat_fastrpc_ctrl_latency { + compat_uint_t enable; /* latency control enable */ + compat_uint_t latency; /* target latency in us */ +}; + +#define FASTRPC_CONTROL_KALLOC (3) +struct compat_fastrpc_ctrl_kalloc { + compat_uint_t kalloc_support; /* Remote memory allocation from kernel */ +}; + +struct compat_fastrpc_ctrl_wakelock { + compat_uint_t enable; /* wakelock control enable */ +}; + +struct compat_fastrpc_ctrl_pm { + compat_uint_t timeout; /* timeout(in ms) for PM to keep system awake */ +}; + +struct compat_fastrpc_ioctl_control { + compat_uint_t req; + union { + struct compat_fastrpc_ctrl_latency lp; + struct compat_fastrpc_ctrl_kalloc kalloc; + struct compat_fastrpc_ctrl_wakelock wp; + struct compat_fastrpc_ctrl_pm pm; + }; +}; + +struct compat_fastrpc_ioctl_capability { + /* + * @param[in]: DSP domain ADSP_DOMAIN_ID, + * SDSP_DOMAIN_ID, or CDSP_DOMAIN_ID + */ + compat_uint_t domain; + /* + * @param[in]: One of the DSP attributes + * from enum remote_dsp_attributes + */ + compat_uint_t attribute_ID; + /* + * @param[out]: Result of the DSP + * capability query based on attribute_ID + */ + compat_uint_t capability; +}; + +static int compat_get_fastrpc_ioctl_invoke( + struct compat_fastrpc_ioctl_invoke_async __user *inv32, + struct fastrpc_ioctl_invoke_async *inv, + unsigned int cmd, unsigned int sc) +{ + compat_uint_t u = 0; + compat_size_t s; + compat_uptr_t p, k; + union compat_remote_arg *pra32; + union remote_arg *pra; + int err = 0, len = 0, j = 0; + + len = REMOTE_SCALARS_LENGTH(sc); + + pra = (union remote_arg *)(inv + 1); + memcpy(&inv->inv.pra, &pra, sizeof(pra)); + memcpy(&inv->inv.sc, &sc, sizeof(sc)); + err |= get_user(u, &inv32->inv.handle); + memcpy(&inv->inv.handle, &u, sizeof(u)); + err |= get_user(p, &inv32->inv.pra); + if (err) + return err; + pra32 = compat_ptr(p); + pra = (union remote_arg *)(inv + 1); + for (j = 0; j < len; j++) { + err |= get_user(p, &pra32[j].buf.pv); + memcpy((uintptr_t *)&pra[j].buf.pv, &p, sizeof(p)); + err |= get_user(s, &pra32[j].buf.len); + memcpy(&pra[j].buf.len, &s, sizeof(s)); + } + + inv->fds = NULL; + if (cmd != COMPAT_FASTRPC_IOCTL_INVOKE) { + err |= get_user(p, &inv32->fds); + memcpy(&inv->fds, &p, sizeof(p)); + } + inv->attrs = NULL; + if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) || + (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) || + (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_PERF) || + (cmd == FASTRPC_INVOKE2_ASYNC)) { + err |= get_user(p, &inv32->attrs); + memcpy(&inv->attrs, &p, sizeof(p)); + } + inv->crc = NULL; + if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) || + (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_PERF)) { + err |= get_user(p, &inv32->crc); + memcpy(&inv->crc, &p, sizeof(p)); + } + inv->job = NULL; + if (cmd == FASTRPC_INVOKE2_ASYNC) { + err |= get_user(p, &inv32->job); + memcpy(&inv->job, &p, sizeof(p)); + } + inv->perf_kernel = NULL; + inv->perf_dsp = NULL; + + if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_PERF) || (cmd == FASTRPC_INVOKE2_ASYNC)) { + err |= get_user(k, &inv32->perf_kernel); + err |= get_user(p, &inv32->perf_dsp); + memcpy(&inv->perf_kernel, &k, sizeof(k)); + memcpy(&inv->perf_dsp, &p, sizeof(p)); + } + return err; +} + +static int compat_fastrpc_ioctl_invoke(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct compat_fastrpc_ioctl_invoke_async __user *inv32; + struct fastrpc_ioctl_invoke_async *inv; + compat_uint_t sc = 0; + int err = 0, len = 0; + struct fastrpc_file *fl = (struct fastrpc_file *)filp->private_data; + + inv32 = compat_ptr(arg); + err = get_user(sc, &inv32->inv.sc); + if (err) + return err; + len = REMOTE_SCALARS_LENGTH(sc); + VERIFY(err, NULL != (inv = kmalloc( + sizeof(*inv) + len * sizeof(union remote_arg), GFP_KERNEL))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32, + inv, cmd, sc)); + if (err) + return err; + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + fl->mode, USER_MSG, inv))); + return err; +} + +static int compat_get_fastrpc_ioctl_invoke2( + struct compat_fastrpc_ioctl_invoke2 __user *inv32, + struct fastrpc_ioctl_invoke2 **inva, + unsigned int cmd) +{ + int err = 0; + compat_uptr_t pparam, p; + compat_uint_t req, size, ref_size = 0; + struct fastrpc_ioctl_invoke2 *inv2_user = NULL; + struct fastrpc_ioctl_invoke_async *asyncinv_user; + + err = get_user(req, &inv32->req); + err |= get_user(pparam, &inv32->invparam); + err |= get_user(size, &inv32->size); + if (err) + goto bail; + + switch (req) { + case FASTRPC_INVOKE2_ASYNC: + { + struct compat_fastrpc_ioctl_invoke_async __user *lasync32; + struct compat_fastrpc_ioctl_invoke_async_no_perf __user *lasync32_old; + compat_uint_t sc = 0; + int len = 0; + + VERIFY(err, size <= sizeof(*lasync32)); + if (err) { + err = -EBADE; + goto bail; + } + lasync32 = compat_ptr(pparam); + err = get_user(sc, &lasync32->inv.sc); + if (err) + goto bail; + len = REMOTE_SCALARS_LENGTH(sc); + VERIFY(err, NULL != (inv2_user = kmalloc( + sizeof(*inv2_user) + sizeof(*asyncinv_user) + + len * sizeof(union remote_arg), GFP_KERNEL))); + if (err) { + err = -EFAULT; + goto bail; + } + asyncinv_user = + (struct fastrpc_ioctl_invoke_async *)(inv2_user + 1); + if (size < sizeof(struct compat_fastrpc_ioctl_invoke_async)) { + lasync32_old = compat_ptr(pparam); + VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(lasync32, + asyncinv_user, COMPAT_FASTRPC_IOCTL_INVOKE_CRC, sc)); + if (err) + goto bail; + + asyncinv_user->job = NULL; + err |= get_user(p, &lasync32_old->job); + memcpy(&asyncinv_user->job, &p, sizeof(p)); + asyncinv_user->perf_kernel = NULL; + asyncinv_user->perf_dsp = NULL; + } else { + VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(lasync32, + asyncinv_user, req, sc)); + } + if (err) + goto bail; + memcpy(&inv2_user->req, &req, sizeof(req)); + inv2_user->invparam = (uintptr_t)asyncinv_user; + inv2_user->size = sizeof(*asyncinv_user); + if (err) + goto bail; + break; + } + case FASTRPC_INVOKE2_ASYNC_RESPONSE: + ref_size = sizeof(struct compat_fastrpc_ioctl_async_response); + VERIFY(err, size <= ref_size); + if (err) { + err = -EBADE; + goto bail; + } + fallthrough; + case FASTRPC_INVOKE2_KERNEL_OPTIMIZATIONS: + if (!ref_size) { + ref_size = sizeof(uint32_t); + VERIFY(err, size == ref_size); + if (err) { + err = -EBADE; + goto bail; + } + } + fallthrough; + case FASTRPC_INVOKE2_STATUS_NOTIF: + { + if (!ref_size) { + ref_size = sizeof(struct compat_fastrpc_ioctl_notif_rsp); + VERIFY(err, size == ref_size); + if (err) { + err = -EBADE; + goto bail; + } + } + VERIFY(err, NULL != (inv2_user = kmalloc( + sizeof(*inv2_user), GFP_KERNEL))); + if (err) { + err = -EFAULT; + goto bail; + } + memcpy(&inv2_user->req, &req, sizeof(req)); + memcpy(&inv2_user->invparam, &pparam, sizeof(pparam)); + memcpy(&inv2_user->size, &size, sizeof(size)); + if (err) + goto bail; + break; + } + default: + err = -ENOTTY; + break; + } + *inva = inv2_user; +bail: + return err; +} + +static int compat_fastrpc_ioctl_invoke2(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct compat_fastrpc_ioctl_invoke2 __user *inv32; + struct fastrpc_ioctl_invoke2 *inv; + int err = 0; + struct fastrpc_file *fl = (struct fastrpc_file *)filp->private_data; + + inv32 = compat_ptr(arg); + VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke2(inv32, + &inv, cmd)); + if (err) + return err; + + VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, inv))); + return err; +} + +static int compat_get_fastrpc_ioctl_mem_map( + struct compat_fastrpc_ioctl_mem_map __user *map32, + struct fastrpc_ioctl_mem_map *map) +{ + compat_uint_t u = 0; + compat_int_t i = 0; + compat_size_t s = 0; + compat_uptr_t p = 0; + int err; + + err = get_user(i, &map32->version); + if (err || i != 0) + return -EINVAL; + + memcpy(&map->version, &i, sizeof(i)); + err |= get_user(i, &map32->m.fd); + memcpy(&map->m.fd, &i, sizeof(i)); + err |= get_user(i, &map32->m.offset); + memcpy(&map->m.offset, &i, sizeof(i)); + err |= get_user(u, &map32->m.flags); + memcpy(&map->m.flags, &u, sizeof(u)); + err |= get_user(p, &map32->m.vaddrin); + memcpy(&map->m.vaddrin, &s, sizeof(s)); + err |= get_user(s, &map32->m.length); + memcpy(&map->m.length, &s, sizeof(s)); + err |= get_user(u, &map32->m.attrs); + memcpy(&map->m.attrs, &u, sizeof(u)); + + return err; +} + +static int compat_put_fastrpc_ioctl_mem_map( + struct compat_fastrpc_ioctl_mem_map __user *map32, + struct fastrpc_ioctl_mem_map *map) +{ + compat_u64 p; + int err = 0; + + memcpy(&p, &map->m.vaddrout, sizeof(map->m.vaddrout)); + err |= put_user(p, &map32->m.vaddrout); + + return err; +} + +static int compat_get_fastrpc_ioctl_mem_unmap( + struct compat_fastrpc_ioctl_mem_unmap __user *unmap32, + struct fastrpc_ioctl_mem_unmap __user *unmap) +{ + compat_int_t i; + compat_size_t s; + compat_u64 p; + int err; + + err = get_user(i, &unmap32->version); + if (err || i != 0) + return -EINVAL; + + memcpy(&unmap->version, &i, sizeof(i)); + err |= get_user(i, &unmap32->um.fd); + memcpy(&unmap->um.fd, &i, sizeof(i)); + err |= get_user(p, &unmap32->um.vaddr); + memcpy(&unmap->um.vaddr, &p, sizeof(p)); + err |= get_user(s, &unmap32->um.length); + memcpy(&unmap->um.length, &s, sizeof(s)); + + return err; +} + +static int compat_get_fastrpc_ioctl_mmap( + struct compat_fastrpc_ioctl_mmap __user *map32, + struct fastrpc_ioctl_mmap __user *map) +{ + compat_uint_t u; + compat_int_t i; + compat_size_t s; + compat_uptr_t p; + int err; + + err = get_user(i, &map32->fd); + memcpy(&map->fd, &i, sizeof(i)); + err |= get_user(u, &map32->flags); + memcpy(&map->flags, &u, sizeof(u)); + err |= get_user(p, &map32->vaddrin); + memcpy((uintptr_t *)&map->vaddrin, &p, sizeof(p)); + err |= get_user(s, &map32->size); + memcpy(&map->size, &s, sizeof(s)); + + return err; +} + +static int compat_get_fastrpc_ioctl_mmap_64( + struct compat_fastrpc_ioctl_mmap_64 __user *map32, + struct fastrpc_ioctl_mmap __user *map) +{ + compat_uint_t u; + compat_int_t i; + compat_size_t s; + compat_u64 p; + int err; + + err = get_user(i, &map32->fd); + memcpy(&map->fd, &i, sizeof(i)); + err |= get_user(u, &map32->flags); + memcpy(&map->flags, &u, sizeof(u)); + err |= get_user(p, &map32->vaddrin); + memcpy(&map->vaddrin, &p, sizeof(p)); + err |= get_user(s, &map32->size); + memcpy(&map->size, &s, sizeof(s)); + + return err; +} + +static int compat_put_fastrpc_ioctl_mmap( + struct compat_fastrpc_ioctl_mmap __user *map32, + struct fastrpc_ioctl_mmap __user *map) +{ + compat_uptr_t p; + int err = 0; + + memcpy(&p, &map->vaddrout, sizeof(p)); + err |= put_user(p, &map32->vaddrout); + + return err; +} + +static int compat_put_fastrpc_ioctl_mmap_64( + struct compat_fastrpc_ioctl_mmap_64 __user *map32, + struct fastrpc_ioctl_mmap __user *map) +{ + compat_u64 p; + int err = 0; + + memcpy(&p, &map->vaddrout, sizeof(map->vaddrout)); + err |= put_user(p, &map32->vaddrout); + + return err; +} + +static int compat_get_fastrpc_ioctl_munmap( + struct compat_fastrpc_ioctl_munmap __user *unmap32, + struct fastrpc_ioctl_munmap __user *unmap) +{ + compat_uptr_t p; + compat_size_t s; + int err; + + err = get_user(p, &unmap32->vaddrout); + memcpy(&unmap->vaddrout, &p, sizeof(p)); + err |= get_user(s, &unmap32->size); + memcpy(&unmap->size, &s, sizeof(s)); + + return err; +} + +static int compat_get_fastrpc_ioctl_munmap_64( + struct compat_fastrpc_ioctl_munmap_64 __user *unmap32, + struct fastrpc_ioctl_munmap *unmap) +{ + compat_u64 p; + compat_size_t s; + int err; + + err = get_user(p, &unmap32->vaddrout); + memcpy(&unmap->vaddrout, &p, sizeof(p)); + err |= get_user(s, &unmap32->size); + memcpy(&unmap->size, &s, sizeof(s)); + + return err; +} + +static int compat_get_fastrpc_ioctl_control( + struct compat_fastrpc_ioctl_control __user *ctrl32, + struct fastrpc_ioctl_control *ctrl) +{ + compat_uptr_t p; + int err; + + err = get_user(p, &ctrl32->req); + memcpy(&ctrl->req, &p, sizeof(p)); + if ((p == FASTRPC_CONTROL_LATENCY) || (p == FASTRPC_CONTROL_RPC_POLL)) { + err |= get_user(p, &ctrl32->lp.enable); + memcpy(&ctrl->lp.enable, &p, sizeof(p)); + err |= get_user(p, &ctrl32->lp.latency); + memcpy(&ctrl->lp.latency, &p, sizeof(p)); + } else if (p == FASTRPC_CONTROL_WAKELOCK) { + err |= get_user(p, &ctrl32->wp.enable); + memcpy(&ctrl->wp.enable, &p, sizeof(p)); + } else if (p == FASTRPC_CONTROL_PM) { + err |= get_user(p, &ctrl32->pm.timeout); + memcpy(&ctrl->pm.timeout, &p, sizeof(p)); + } + + return err; +} + +static int compat_get_fastrpc_ioctl_init( + struct compat_fastrpc_ioctl_init_attrs __user *init32, + struct fastrpc_ioctl_init_attrs *init, + unsigned int cmd) +{ + compat_uint_t u; + compat_uptr_t p; + compat_int_t i; + int err; + + err = get_user(u, &init32->init.flags); + memcpy(&init->init.flags, &u, sizeof(u)); + err |= get_user(p, &init32->init.file); + memcpy(&init->init.file, &p, sizeof(p)); + err |= get_user(i, &init32->init.filelen); + memcpy(&init->init.filelen, &i, sizeof(i)); + err |= get_user(i, &init32->init.filefd); + memcpy(&init->init.filefd, &i, sizeof(i)); + err |= get_user(p, &init32->init.mem); + memcpy(&init->init.mem, &p, sizeof(p)); + err |= get_user(i, &init32->init.memlen); + memcpy(&init->init.memlen, &i, sizeof(i)); + err |= get_user(i, &init32->init.memfd); + memcpy(&init->init.memfd, &i, sizeof(i)); + + init->attrs = 0; + if (cmd == COMPAT_FASTRPC_IOCTL_INIT_ATTRS) { + err |= get_user(i, &init32->attrs); + memcpy((compat_uptr_t *)&init->attrs, &i, sizeof(i)); + } + + init->siglen = 0; + if (cmd == COMPAT_FASTRPC_IOCTL_INIT_ATTRS) { + err |= get_user(i, &init32->siglen); + memcpy((compat_uptr_t *)&init->siglen, &i, sizeof(i)); + } + + return err; +} + +static int compat_put_fastrpc_ioctl_get_dsp_info( + struct compat_fastrpc_ioctl_capability __user *info32, + struct fastrpc_ioctl_capability __user *info) +{ + compat_uint_t u; + int err = 0; + + memcpy(&u, &info->capability, sizeof(u)); + err |= put_user(u, &info32->capability); + return err; +} + +static int compat_fastrpc_control(struct fastrpc_file *fl, + unsigned long arg) +{ + int err = 0; + struct compat_fastrpc_ioctl_control __user *ctrl32; + struct fastrpc_ioctl_control *ctrl; + compat_uptr_t p; + + ctrl32 = compat_ptr(arg); + VERIFY(err, NULL != (ctrl = kmalloc( + sizeof(*ctrl), GFP_KERNEL))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_control(ctrl32, + ctrl)); + if (err) + return err; + VERIFY(err, 0 == (err = fastrpc_internal_control(fl, ctrl))); + if (err) + return err; + err = get_user(p, &ctrl32->req); + if (err) + return err; + if (p == FASTRPC_CONTROL_KALLOC) { + memcpy(&p, &ctrl->kalloc.kalloc_support, sizeof(ctrl->kalloc.kalloc_support)); + err |= put_user(p, &ctrl32->kalloc.kalloc_support); + } + return err; +} + +static int compat_fastrpc_get_dsp_info(struct fastrpc_file *fl, + unsigned long arg) +{ + struct compat_fastrpc_ioctl_capability __user *info32 = NULL; + struct fastrpc_ioctl_capability *info = NULL; + compat_uint_t u; + int err = 0; + + info32 = compat_ptr(arg); + VERIFY(err, NULL != (info = kmalloc( + sizeof(*info), GFP_KERNEL))); + + err = get_user(u, &info32->domain); + if (err) + return err; + memcpy(&info->domain, &u, sizeof(info->domain)); + + err = get_user(u, &info32->attribute_ID); + if (err) + return err; + memcpy(&info->attribute_ID, &u, sizeof(info->attribute_ID)); + + err = fastrpc_get_info_from_kernel(info, fl); + if (err) + return err; + + err = compat_put_fastrpc_ioctl_get_dsp_info(info32, info); + + return err; +} + +static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + + switch (cmd) { + case COMPAT_FASTRPC_IOCTL_MEM_MAP: + { + struct compat_fastrpc_ioctl_mem_map __user *map32; + struct fastrpc_ioctl_mem_map *map; + + map32 = compat_ptr(arg); + VERIFY(err, NULL != (map = kmalloc( + sizeof(*map), GFP_KERNEL))); + if (err) + return -EFAULT; + + err = compat_get_fastrpc_ioctl_mem_map(map32, map); + if (err) + return err; + + VERIFY(err, 0 == (err = fastrpc_internal_mem_map(fl, + map))); + if (err) + return err; + VERIFY(err, 0 == compat_put_fastrpc_ioctl_mem_map(map32, map)); + return err; + } + case COMPAT_FASTRPC_IOCTL_MEM_UNMAP: + { + struct compat_fastrpc_ioctl_mem_unmap __user *unmap32; + struct fastrpc_ioctl_mem_unmap *unmap; + + unmap32 = compat_ptr(arg); + unmap = kmalloc(sizeof(*unmap), GFP_KERNEL); + if (unmap == NULL) + return -EFAULT; + + err = compat_get_fastrpc_ioctl_mem_unmap(unmap32, unmap); + if (err) + return err; + + VERIFY(err, 0 == (err = fastrpc_internal_mem_unmap(fl, + unmap))); + return err; + } + case COMPAT_FASTRPC_IOCTL_MMAP: + { + struct compat_fastrpc_ioctl_mmap __user *map32; + struct fastrpc_ioctl_mmap *map; + + map32 = compat_ptr(arg); + VERIFY(err, NULL != (map = kmalloc( + sizeof(*map), GFP_KERNEL))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap(map32, map)); + if (err) + return err; + + VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, map))); + + VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map)); + return err; + } + case COMPAT_FASTRPC_IOCTL_MMAP_64: + { + struct compat_fastrpc_ioctl_mmap_64 __user *map32; + struct fastrpc_ioctl_mmap *map; + + map32 = compat_ptr(arg); + VERIFY(err, NULL != (map = kmalloc( + sizeof(*map), GFP_KERNEL))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map)); + if (err) + return err; + VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, map))); + VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map)); + return err; + } + case COMPAT_FASTRPC_IOCTL_MUNMAP: + { + struct compat_fastrpc_ioctl_munmap __user *unmap32; + struct fastrpc_ioctl_munmap *unmap; + + unmap32 = compat_ptr(arg); + VERIFY(err, NULL != (unmap = kmalloc( + sizeof(*unmap), GFP_KERNEL))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap(unmap32, + unmap)); + if (err) + return err; + VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, + unmap))); + return err; + } + default: + return -ENOIOCTLCMD; + } +} + +static long compat_fastrpc_dspsignal_create(struct fastrpc_file *fl, unsigned long arg) +{ + struct fastrpc_ioctl_dspsignal_create __user *uc = compat_ptr(arg); + struct fastrpc_ioctl_dspsignal_create c; + int err = 0; + + err = copy_from_user(&c, uc, sizeof(c)); + if (err) + return -EFAULT; + return fastrpc_dspsignal_create(fl, &c); +} + +static long compat_fastrpc_dspsignal_destroy(struct fastrpc_file *fl, unsigned long arg) +{ + struct fastrpc_ioctl_dspsignal_destroy __user *uc = compat_ptr(arg); + struct fastrpc_ioctl_dspsignal_destroy c; + int err = 0; + + err = copy_from_user(&c, uc, sizeof(c)); + if (err) + return -EFAULT; + return fastrpc_dspsignal_destroy(fl, &c); +} + +static long compat_fastrpc_dspsignal_signal(struct fastrpc_file *fl, unsigned long arg) +{ + struct fastrpc_ioctl_dspsignal_signal __user *uc = compat_ptr(arg); + struct fastrpc_ioctl_dspsignal_signal c; + int err = 0; + + err = copy_from_user(&c, uc, sizeof(c)); + if (err) + return -EFAULT; + return fastrpc_dspsignal_signal(fl, &c); +} + +static long compat_fastrpc_dspsignal_wait(struct fastrpc_file *fl, unsigned long arg) +{ + struct fastrpc_ioctl_dspsignal_wait __user *uc = compat_ptr(arg); + struct fastrpc_ioctl_dspsignal_wait c; + int err = 0; + + err = copy_from_user(&c, uc, sizeof(c)); + if (err) + return -EFAULT; + return fastrpc_dspsignal_wait(fl, &c); +} + +static long compat_fastrpc_dspsignal_cancel_wait(struct fastrpc_file *fl, unsigned long arg) +{ + struct fastrpc_ioctl_dspsignal_cancel_wait __user *uc = compat_ptr(arg); + struct fastrpc_ioctl_dspsignal_cancel_wait c; + int err = 0; + + err = copy_from_user(&c, uc, sizeof(c)); + if (err) + return -EFAULT; + return fastrpc_dspsignal_cancel_wait(fl, &c); +} + +long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int err = 0; + struct fastrpc_file *fl = (struct fastrpc_file *)filp->private_data; + + if (!filp->f_op || !filp->f_op->unlocked_ioctl) + return -ENOTTY; + + fl->is_compat = true; + switch (cmd) { + case COMPAT_FASTRPC_IOCTL_INVOKE: + case COMPAT_FASTRPC_IOCTL_INVOKE_FD: + case COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS: + case COMPAT_FASTRPC_IOCTL_INVOKE_CRC: + case COMPAT_FASTRPC_IOCTL_INVOKE_PERF: + { + return compat_fastrpc_ioctl_invoke(filp, cmd, arg); + } + case COMPAT_FASTRPC_IOCTL_INVOKE2: + { + return compat_fastrpc_ioctl_invoke2(filp, cmd, arg); + } + case COMPAT_FASTRPC_IOCTL_MUNMAP_64: + { + struct compat_fastrpc_ioctl_munmap_64 __user *unmap32; + struct fastrpc_ioctl_munmap *unmap; + + unmap32 = compat_ptr(arg); + VERIFY(err, NULL != (unmap = kmalloc( + sizeof(*unmap), GFP_KERNEL))); + + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32, + unmap)); + if (err) + return err; + + VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, + unmap))); + return err; + } + case COMPAT_FASTRPC_IOCTL_INIT: + fallthrough; + case COMPAT_FASTRPC_IOCTL_INIT_ATTRS: + { + struct compat_fastrpc_ioctl_init_attrs __user *init32; + struct fastrpc_ioctl_init_attrs *init; + + init32 = compat_ptr(arg); + VERIFY(err, NULL != (init = kmalloc( + sizeof(*init), GFP_KERNEL))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_init(init32, + init, cmd)); + if (err) + return err; + VERIFY(err, 0 == (err = fastrpc_init_process(fl, init))); + + return err; + + } + case FASTRPC_IOCTL_GETINFO: + { + compat_uptr_t __user *info32; + uint32_t *info; + compat_uint_t u; + + info32 = compat_ptr(arg); + VERIFY(err, NULL != (info = kmalloc( + sizeof(*info), GFP_KERNEL))); + if (err) + return -EFAULT; + err = get_user(u, info32); + memcpy(info, &u, sizeof(u)); + if (err) + return err; + VERIFY(err, 0 == (err = fastrpc_get_info(fl, info))); + memcpy(&u, info, sizeof(*info)); + err |= put_user(u, info32); + return err; + } + case FASTRPC_IOCTL_SETMODE: + return fastrpc_setmode(arg, fl); + case COMPAT_FASTRPC_IOCTL_CONTROL: + { + return compat_fastrpc_control(fl, arg); + } + case COMPAT_FASTRPC_IOCTL_GET_DSP_INFO: + { + return compat_fastrpc_get_dsp_info(fl, arg); + } + case COMPAT_FASTRPC_IOCTL_MEM_MAP: + fallthrough; + case COMPAT_FASTRPC_IOCTL_MEM_UNMAP: + fallthrough; + case COMPAT_FASTRPC_IOCTL_MMAP: + fallthrough; + case COMPAT_FASTRPC_IOCTL_MMAP_64: + fallthrough; + case COMPAT_FASTRPC_IOCTL_MUNMAP: + return compat_fastrpc_mmap_device_ioctl(fl, cmd, arg); + case COMPAT_FASTRPC_IOCTL_DSPSIGNAL_CREATE: + return compat_fastrpc_dspsignal_create(fl, arg); + case COMPAT_FASTRPC_IOCTL_DSPSIGNAL_DESTROY: + return compat_fastrpc_dspsignal_destroy(fl, arg); + case COMPAT_FASTRPC_IOCTL_DSPSIGNAL_SIGNAL: + return compat_fastrpc_dspsignal_signal(fl, arg); + case COMPAT_FASTRPC_IOCTL_DSPSIGNAL_WAIT: + return compat_fastrpc_dspsignal_wait(fl, arg); + case COMPAT_FASTRPC_IOCTL_DSPSIGNAL_CANCEL_WAIT: + return compat_fastrpc_dspsignal_cancel_wait(fl, arg); + default: + return -ENOTTY; + } +} diff --git a/dsp/adsprpc_compat.h b/dsp/adsprpc_compat.h new file mode 100644 index 0000000000..1366751d77 --- /dev/null +++ b/dsp/adsprpc_compat.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, 2018-2019 The Linux Foundation. All rights reserved. + */ +#ifndef ADSPRPC_COMPAT_H +#define ADSPRPC_COMPAT_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); +#else + +#define compat_fastrpc_device_ioctl NULL + +#endif /* CONFIG_COMPAT */ +#endif /* ADSPRPC_COMPAT_H */ diff --git a/dsp/adsprpc_rpmsg.c b/dsp/adsprpc_rpmsg.c new file mode 100644 index 0000000000..68ef1c5180 --- /dev/null +++ b/dsp/adsprpc_rpmsg.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "adsprpc_shared.h" + +struct frpc_transport_session_control { + struct rpmsg_device *rpdev; + struct mutex rpmsg_mutex; + char *subsys; + /* Flags for DSP up mutex */ + wait_queue_head_t wait_for_rpmsg_ch; + atomic_t is_rpmsg_ch_up; +}; + +static struct frpc_transport_session_control rpmsg_session_control[NUM_CHANNELS]; + +inline int verify_transport_device(int cid, bool trusted_vm) +{ + int err = 0; + struct frpc_transport_session_control *rpmsg_session = &rpmsg_session_control[cid]; + + mutex_lock(&rpmsg_session->rpmsg_mutex); + VERIFY(err, NULL != rpmsg_session->rpdev); + if (err) { + err = -ENODEV; + mutex_unlock(&rpmsg_session->rpmsg_mutex); + goto bail; + } + mutex_unlock(&rpmsg_session->rpmsg_mutex); +bail: + return err; +} + +static inline int get_cid_from_rpdev(struct rpmsg_device *rpdev) +{ + int err = 0, cid = -1; + const char *label = 0; + + VERIFY(err, !IS_ERR_OR_NULL(rpdev)); + if (err) + return -ENODEV; + + err = of_property_read_string(rpdev->dev.parent->of_node, "label", + &label); + + if (err) + label = rpdev->dev.parent->of_node->name; + + if (!strcmp(label, "cdsp")) + cid = CDSP_DOMAIN_ID; + else if (!strcmp(label, "adsp")) + cid = ADSP_DOMAIN_ID; + else if (!strcmp(label, "slpi")) + cid = SDSP_DOMAIN_ID; + else if (!strcmp(label, "mdsp")) + cid = MDSP_DOMAIN_ID; + + return cid; +} + +static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) +{ + int err = 0; + int cid = -1; + struct frpc_transport_session_control *transport_session_control = NULL; + + VERIFY(err, !IS_ERR_OR_NULL(rpdev)); + if (err) + return -ENODEV; + + cid = get_cid_from_rpdev(rpdev); + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + transport_session_control = &rpmsg_session_control[cid]; + mutex_lock(&transport_session_control->rpmsg_mutex); + transport_session_control->rpdev = rpdev; + mutex_unlock(&transport_session_control->rpmsg_mutex); + + /* + * Set atomic variable to 1 when rpmsg channel is up + * and wake up all threads waiting for rpmsg channel + */ + atomic_set(&transport_session_control->is_rpmsg_ch_up, 1); + wake_up_interruptible(&transport_session_control->wait_for_rpmsg_ch); + + ADSPRPC_INFO("opened rpmsg channel for %s\n", + rpmsg_session_control[cid].subsys); +bail: + if (err) + ADSPRPC_ERR("rpmsg probe of %s cid %d failed\n", + rpdev->dev.parent->of_node->name, cid); + return err; +} + +static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) +{ + int err = 0; + int cid = -1; + struct frpc_transport_session_control *transport_session_control = NULL; + + VERIFY(err, !IS_ERR_OR_NULL(rpdev)); + if (err) { + err = -ENODEV; + return; + } + + cid = get_cid_from_rpdev(rpdev); + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + transport_session_control = &rpmsg_session_control[cid]; + mutex_lock(&transport_session_control->rpmsg_mutex); + transport_session_control->rpdev = NULL; + mutex_unlock(&transport_session_control->rpmsg_mutex); + + /* + * Set atomic variable to 0 when rpmsg channel is down and + * make threads wait on is_rpmsg_ch_up + */ + atomic_set(&transport_session_control->is_rpmsg_ch_up, 0); + + ADSPRPC_INFO("closed rpmsg channel of %s\n", + rpmsg_session_control[cid].subsys); +bail: + if (err) + ADSPRPC_ERR("rpmsg remove of %s cid %d failed\n", + rpdev->dev.parent->of_node->name, cid); +} + +static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, + int len, void *priv, u32 addr) +{ + int err = 0; + int rpmsg_err = 0; + int cid = -1; + + trace_fastrpc_msg("rpmsg_callback: begin"); + cid = get_cid_from_rpdev(rpdev); + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + + rpmsg_err = fastrpc_handle_rpc_response(data, len, cid); +bail: + if (err) { + err = -ENOKEY; + ADSPRPC_ERR( + "invalid response data %pK, len %d from remote subsystem err %d\n", + data, len, err); + } else + err = rpmsg_err; + + trace_fastrpc_msg("rpmsg_callback: end"); + return err; +} + +/* + * This function is called from fastrpc_channel open to wait + * for rpmsg channel in the respective domain. The wait in this + * function is done only for CDSP, Audio and Sensors Daemons. + */ +int fastrpc_wait_for_transport_interrupt(int cid, + unsigned int flags) +{ + struct frpc_transport_session_control *transport_session_control = NULL; + int err = 0; + + /* + * The flags which are applicable only for daemons are checked. + * Dynamic PDs will fail and return immediately if the + * remote subsystem is not up. + */ + if (flags == FASTRPC_INIT_ATTACH || flags == FASTRPC_INIT_ATTACH_SENSORS + || flags == FASTRPC_INIT_CREATE_STATIC) { + transport_session_control = &rpmsg_session_control[cid]; + ADSPRPC_DEBUG("Thread waiting for cid %d rpmsg channel", cid); + err = wait_event_interruptible(transport_session_control->wait_for_rpmsg_ch, + atomic_read(&transport_session_control->is_rpmsg_ch_up)); + ADSPRPC_DEBUG("Thread received signal for cid %d rpmsg channel (interrupted %d)", + cid, err); + } + + return err; +} + +int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool trusted_vm) +{ + int err = 0; + struct frpc_transport_session_control *rpmsg_session = &rpmsg_session_control[cid]; + + mutex_lock(&rpmsg_session->rpmsg_mutex); + VERIFY(err, !IS_ERR_OR_NULL(rpmsg_session->rpdev)); + if (err) { + err = -ENODEV; + ADSPRPC_ERR("No rpmsg device for %s, err %d\n", current->comm, err); + mutex_unlock(&rpmsg_session->rpmsg_mutex); + goto bail; + } + err = rpmsg_send(rpmsg_session->rpdev->ept, rpc_msg, rpc_msg_size); + mutex_unlock(&rpmsg_session->rpmsg_mutex); +bail: + return err; +} + +static const struct rpmsg_device_id fastrpc_rpmsg_match[] = { + { FASTRPC_GLINK_GUID }, + { }, +}; + +static const struct of_device_id fastrpc_rpmsg_of_match[] = { + { .compatible = "qcom,msm-fastrpc-rpmsg" }, + { }, +}; +MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match); + +static struct rpmsg_driver fastrpc_rpmsg_client = { + .id_table = fastrpc_rpmsg_match, + .probe = fastrpc_rpmsg_probe, + .remove = fastrpc_rpmsg_remove, + .callback = fastrpc_rpmsg_callback, + .drv = { + .name = "qcom,msm_fastrpc_rpmsg", + .of_match_table = fastrpc_rpmsg_of_match, + }, +}; + +void fastrpc_rproc_trace_events(const char *name, const char *event, + const char *subevent) +{ + trace_rproc_qcom_event(name, event, subevent); +} + +inline void fastrpc_transport_session_init(int cid, char *subsys) +{ + rpmsg_session_control[cid].subsys = subsys; + mutex_init(&rpmsg_session_control[cid].rpmsg_mutex); + init_waitqueue_head(&rpmsg_session_control[cid].wait_for_rpmsg_ch); +} + +inline void fastrpc_transport_session_deinit(int cid) +{ + mutex_destroy(&rpmsg_session_control[cid].rpmsg_mutex); +} + +int fastrpc_transport_init(void) +{ + int err = 0; + + err = register_rpmsg_driver(&fastrpc_rpmsg_client); + if (err) { + pr_err("Error: adsprpc: %s: register_rpmsg_driver failed with err %d\n", + __func__, err); + goto bail; + } +bail: + return err; +} + +void fastrpc_transport_deinit(void) +{ + unregister_rpmsg_driver(&fastrpc_rpmsg_client); +} diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h new file mode 100644 index 0000000000..98728c358a --- /dev/null +++ b/dsp/adsprpc_shared.h @@ -0,0 +1,1140 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ADSPRPC_SHARED_H +#define ADSPRPC_SHARED_H + +#include +#include + +#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke) +#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap) +#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap) +#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64) +#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64) +#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd) +#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t) +#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init) +#define FASTRPC_IOCTL_INVOKE_ATTRS \ + _IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs) +#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t) +//#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf) +#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs) +#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc) +#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control) +#define FASTRPC_IOCTL_MUNMAP_FD _IOWR('R', 13, struct fastrpc_ioctl_munmap_fd) +#define FASTRPC_IOCTL_GET_DSP_INFO \ + _IOWR('R', 17, struct fastrpc_ioctl_capability) +#define FASTRPC_IOCTL_INVOKE2 _IOWR('R', 18, struct fastrpc_ioctl_invoke2) +#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 19, struct fastrpc_ioctl_mem_map) +#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 20, struct fastrpc_ioctl_mem_unmap) +#define FASTRPC_IOCTL_INVOKE_PERF \ + _IOWR('R', 21, struct fastrpc_ioctl_invoke_perf) +#define FASTRPC_IOCTL_NOTIF_RSP \ + _IOWR('R', 22, struct fastrpc_ioctl_notif_rsp) +#define FASTRPC_IOCTL_DSPSIGNAL_CREATE _IOWR('R', 23, struct fastrpc_ioctl_dspsignal_create) +#define FASTRPC_IOCTL_DSPSIGNAL_DESTROY _IOWR('R', 24, struct fastrpc_ioctl_dspsignal_destroy) +#define FASTRPC_IOCTL_DSPSIGNAL_SIGNAL _IOWR('R', 25, struct fastrpc_ioctl_dspsignal_signal) +#define FASTRPC_IOCTL_DSPSIGNAL_WAIT _IOWR('R', 26, struct fastrpc_ioctl_dspsignal_wait) +#define FASTRPC_IOCTL_DSPSIGNAL_CANCEL_WAIT \ + _IOWR('R', 27, struct fastrpc_ioctl_dspsignal_cancel_wait) + +#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp" +#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp" +#define DEVICE_NAME "adsprpc-smd" +#define DEVICE_NAME_SECURE "adsprpc-smd-secure" + +/* Pre-defined parameter for print gfa structure*/ + +#define smq_invoke_ctx_params "pid: %d, tgid: %d, handle: %p, sc: 0x%x, fl: %p, fd: %p, magic: %d\n" + +#define fastrpc_file_params "fl->tgid: %d, fl->cid: %d, fl->ssrcount: %p, fl->pd: %d, fl->profile: %p, fl->mode: %p, fl->tgid_open: %d, fl->num_cached_buf: %d, num_pers_hdrs: %d, fl->sessionid: %d, fl->servloc_name: %s, fl->file_close: %d, fl->dsp_proc_init: %d,fl->apps: %p, fl->qos_request: %d, fl->dev_minor: %d, fl->debug_buf: %s fl->debug_buf_alloced_attempted: %d, fl->wake_enable: %d, fl->ws_timeout: %d, fl->untrusted_process: %d\n" + +#define fastrpc_mmap_params "fl: %p, apps: %p, fd: %d, flags: %p, buf: %p, phys: %p, size : %d, va : %p, map->raddr: %p, len : %d, refs : %d, secure: %d\n" + +#define fastrpc_buf_params "buf->fl: %p, buf->phys: %p, buf->virt: %p, buf->size: %d, buf->dma_attr: %ld, buf->raddr: %p, buf->flags: %d, buf->type: %d, buf->in_use: %d\n" +/* Set for buffers that have no virtual mapping in userspace */ +#define FASTRPC_ATTR_NOVA 0x1 + +/* Set for buffers that are NOT dma coherent */ +#define FASTRPC_ATTR_NON_COHERENT 0x2 + +/* Set for buffers that are dma coherent */ +#define FASTRPC_ATTR_COHERENT 0x4 + +/* Fastrpc attribute for keeping the map persistent */ +#define FASTRPC_ATTR_KEEP_MAP 0x8 + +/* Fastrpc attribute for no mapping of fd */ +#define FASTRPC_ATTR_NOMAP (16) + +/* + * Fastrpc attribute to skip flush by fastrpc + */ +#define FASTRPC_ATTR_FORCE_NOFLUSH (32) + +/* + * Fastrpc attribute to skip invalidate by fastrpc + */ +#define FASTRPC_ATTR_FORCE_NOINVALIDATE (64) + +/* Driver should operate in parallel with the co-processor */ +#define FASTRPC_MODE_PARALLEL 0 + +/* Driver should operate in serial mode with the co-processor */ +#define FASTRPC_MODE_SERIAL 1 + +/* Driver should operate in profile mode with the co-processor */ +#define FASTRPC_MODE_PROFILE 2 + +/* Set FastRPC session ID to 1 */ +#define FASTRPC_MODE_SESSION 4 + +/* Retrives number of input buffers from the scalars parameter */ +#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) + +/* Retrives number of output buffers from the scalars parameter */ +#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff) + +/* Retrives number of input handles from the scalars parameter */ +#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f) + +/* Retrives number of output handles from the scalars parameter */ +#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f) + +/* Remote domains ID */ +#define ADSP_DOMAIN_ID (0) +#define MDSP_DOMAIN_ID (1) +#define SDSP_DOMAIN_ID (2) +#define CDSP_DOMAIN_ID (3) +#define MAX_DOMAIN_ID CDSP_DOMAIN_ID + +#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/ +#define NUM_SESSIONS 13 /* max 12 compute, 1 cpz */ + +#define VALID_FASTRPC_CID(cid) \ + (cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS) + +#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) +\ + REMOTE_SCALARS_OUTBUFS(sc) +\ + REMOTE_SCALARS_INHANDLES(sc) +\ + REMOTE_SCALARS_OUTHANDLES(sc)) + +#define REMOTE_SCALARS_MAKEX(attr, method, in, out, oin, oout) \ + ((((uint32_t) (attr) & 0x7) << 29) | \ + (((uint32_t) (method) & 0x1f) << 24) | \ + (((uint32_t) (in) & 0xff) << 16) | \ + (((uint32_t) (out) & 0xff) << 8) | \ + (((uint32_t) (oin) & 0x0f) << 4) | \ + ((uint32_t) (oout) & 0x0f)) + +#define REMOTE_SCALARS_MAKE(method, in, out) \ + REMOTE_SCALARS_MAKEX(0, method, in, out, 0, 0) + +#ifdef VERIFY_PRINT_ERROR +#define VERIFY_EPRINTF(format, ...) pr_err(format, ##__VA_ARGS__) +#else +#define VERIFY_EPRINTF(format, args) ((void)0) +#endif + +#ifndef VERIFY_PRINT_INFO +#define VERIFY_IPRINTF(args) ((void)0) +#endif + +#ifndef VERIFY +#define __STR__(x) #x ":" +#define __TOSTR__(x) __STR__(x) +#define __FILE_LINE__ __FILE__ ":" __TOSTR__(__LINE__) +#define __ADSPRPC_LINE__ "adsprpc:" __TOSTR__(__LINE__) + +#define VERIFY(err, val) \ +do {\ + VERIFY_IPRINTF(__FILE_LINE__"info: calling: " #val "\n");\ + if ((val) == 0) {\ + (err) = (err) == 0 ? -1 : (err);\ + VERIFY_EPRINTF(__ADSPRPC_LINE__" error: %d: "#val "\n", (err));\ + } else {\ + VERIFY_IPRINTF(__FILE_LINE__"info: passed: " #val "\n");\ + } \ +} while (0) +#endif + +#define ADSPRPC_ERR(fmt, args...)\ + pr_err("Error: adsprpc (%d): %s: %s: " fmt, __LINE__,\ + current->comm, __func__, ##args) +#define ADSPRPC_INFO(fmt, args...)\ + pr_info("Info: adsprpc (%d): %s: %s: " fmt, __LINE__,\ + current->comm, __func__, ##args) +#define ADSPRPC_WARN(fmt, args...)\ + pr_warn("Warning: adsprpc (%d): %s: %s: " fmt, __LINE__,\ + current->comm, __func__, ##args) +#define ADSPRPC_DEBUG(fmt, args...)\ + pr_debug("Debug: adsprpc (%d): %s: %s: " fmt, __LINE__,\ + current->comm, __func__, ##args) + +#define DEBUG_PRINT_SIZE_LIMIT (512*1024) + +#define remote_arg64_t union remote_arg64 + +struct remote_buf64 { + uint64_t pv; + uint64_t len; +}; + +struct remote_dma_handle64 { + int fd; + uint32_t offset; + uint32_t len; +}; + +union remote_arg64 { + struct remote_buf64 buf; + struct remote_dma_handle64 dma; + uint32_t h; +}; + +#define remote_arg_t union remote_arg + +struct remote_buf { + void *pv; /* buffer pointer */ + size_t len; /* length of buffer */ +}; + +struct remote_dma_handle { + int fd; + uint32_t offset; +}; + +union remote_arg { + struct remote_buf buf; /* buffer info */ + struct remote_dma_handle dma; + uint32_t h; /* remote handle */ +}; + +struct fastrpc_ioctl_invoke { + uint32_t handle; /* remote handle */ + uint32_t sc; /* scalars describing the data */ + remote_arg_t *pra; /* remote arguments list */ +}; + +struct fastrpc_ioctl_invoke_fd { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ +}; + +struct fastrpc_ioctl_invoke_attrs { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ +}; + +struct fastrpc_ioctl_invoke_crc { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; +}; + +struct fastrpc_ioctl_invoke_perf { + struct fastrpc_ioctl_invoke inv; + int *fds; + unsigned int *attrs; + unsigned int *crc; + uint64_t *perf_kernel; + uint64_t *perf_dsp; +}; + +struct fastrpc_async_job { + uint32_t isasyncjob; /* flag to distinguish async job */ + uint64_t jobid; /* job id generated by user */ + uint32_t reserved; /* reserved */ +}; + +struct fastrpc_ioctl_invoke_async { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; + uint64_t *perf_kernel; + uint64_t *perf_dsp; + struct fastrpc_async_job *job; /* async job*/ +}; + +struct fastrpc_ioctl_invoke_async_no_perf { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; + struct fastrpc_async_job *job; /* async job*/ +}; + +struct fastrpc_ioctl_async_response { + uint64_t jobid;/* job id generated by user */ + int result; /* result from DSP */ + uint64_t *perf_kernel; + uint64_t *perf_dsp; + uint32_t handle; + uint32_t sc; +}; + +struct fastrpc_ioctl_notif_rsp { + int domain; /* Domain of User PD */ + int session; /* Session ID of User PD */ + uint32_t status; /* Status of the process */ +}; + +/* INIT a new process or attach to guestos */ +enum fastrpc_init_flags { + FASTRPC_INIT_NO_CREATE = -1, + FASTRPC_INIT_ATTACH = 0, + FASTRPC_INIT_CREATE = 1, + FASTRPC_INIT_CREATE_STATIC = 2, + FASTRPC_INIT_ATTACH_SENSORS = 3, +}; + +enum fastrpc_invoke2_type { + FASTRPC_INVOKE2_ASYNC = 1, + FASTRPC_INVOKE2_ASYNC_RESPONSE = 2, + FASTRPC_INVOKE2_KERNEL_OPTIMIZATIONS, + FASTRPC_INVOKE2_STATUS_NOTIF, +}; + +struct fastrpc_ioctl_invoke2 { + uint32_t req; /* type of invocation request */ + uintptr_t invparam; /* invocation request param */ + uint32_t size; /* size of invocation param */ + int err; /* reserved */ +}; + +struct fastrpc_ioctl_init { + uint32_t flags; /* one of FASTRPC_INIT_* macros */ + uintptr_t file; /* pointer to elf file */ + uint32_t filelen; /* elf file length */ + int32_t filefd; /* ION fd for the file */ + uintptr_t mem; /* mem for the PD */ + uint32_t memlen; /* mem length */ + int32_t memfd; /* ION fd for the mem */ +}; + +struct fastrpc_ioctl_init_attrs { + struct fastrpc_ioctl_init init; + int attrs; + unsigned int siglen; +}; + +struct fastrpc_ioctl_munmap { + uintptr_t vaddrout; /* address to unmap */ + size_t size; /* size */ +}; + +struct fastrpc_ioctl_munmap_64 { + uint64_t vaddrout; /* address to unmap */ + size_t size; /* size */ +}; + +struct fastrpc_ioctl_mmap { + int fd; /* ion fd */ + uint32_t flags; /* flags for dsp to map with */ + uintptr_t vaddrin; /* optional virtual address */ + size_t size; /* size */ + uintptr_t vaddrout; /* dsps virtual address */ +}; + +struct fastrpc_ioctl_mmap_64 { + int fd; /* ion fd */ + uint32_t flags; /* flags for dsp to map with */ + uint64_t vaddrin; /* optional virtual address */ + size_t size; /* size */ + uint64_t vaddrout; /* dsps virtual address */ +}; + +struct fastrpc_ioctl_munmap_fd { + int fd; /* fd */ + uint32_t flags; /* control flags */ + uintptr_t va; /* va */ + ssize_t len; /* length */ +}; + +struct fastrpc_ioctl_dspsignal_create { + uint32_t signal_id; /* Signal ID */ + uint32_t flags; /* Flags, currently unused */ +}; + +struct fastrpc_ioctl_dspsignal_destroy { + uint32_t signal_id; /* Signal ID */ +}; + +struct fastrpc_ioctl_dspsignal_signal { + uint32_t signal_id; /* Signal ID */ +}; + +struct fastrpc_ioctl_dspsignal_wait { + uint32_t signal_id; /* Signal ID */ + uint32_t timeout_usec; /* Timeout in microseconds. UINT32_MAX for an infinite wait */ +}; + +struct fastrpc_ioctl_dspsignal_cancel_wait { + uint32_t signal_id; /* Signal ID */ +}; + +/** + * Control flags for mapping memory on DSP user process + */ +enum fastrpc_map_flags { + /** + * Map memory pages with RW- permission and CACHE WRITEBACK. + * The driver is responsible for cache maintenance when passed + * the buffer to FastRPC calls. Same virtual address will be + * assigned for subsequent FastRPC calls. + */ + FASTRPC_MAP_STATIC = 0, + + /* Reserved */ + FASTRPC_MAP_RESERVED, + + /** + * Map memory pages with RW- permission and CACHE WRITEBACK. + * Mapping tagged with a file descriptor. User is responsible for + * CPU and DSP cache maintenance for the buffer. Get virtual address + * of buffer on DSP using HAP_mmap_get() and HAP_mmap_put() APIs. + */ + FASTRPC_MAP_FD = 2, + + /** + * Mapping delayed until user call HAP_mmap() and HAP_munmap() + * functions on DSP. It is useful to map a buffer with cache modes + * other than default modes. User is responsible for CPU and DSP + * cache maintenance for the buffer. + */ + FASTRPC_MAP_FD_DELAYED, + + /** + * This flag is used to skip CPU mapping, + * otherwise behaves similar to FASTRPC_MAP_FD_DELAYED flag. + */ + FASTRPC_MAP_FD_NOMAP = 16, + + FASTRPC_MAP_MAX, +}; + +struct fastrpc_mem_map { + int fd; /* ion fd */ + int offset; /* buffer offset */ + uint32_t flags; /* flags defined in enum fastrpc_map_flags */ + int attrs; /* buffer attributes used for SMMU mapping */ + uintptr_t vaddrin; /* buffer virtual address */ + size_t length; /* buffer length */ + uint64_t vaddrout; /* [out] remote virtual address */ +}; + +/* Map and unmap IOCTL methods reserved memory size for future extensions */ +#define MAP_RESERVED_NUM (14) +#define UNMAP_RESERVED_NUM (10) + +/* map memory to DSP device */ +struct fastrpc_ioctl_mem_map { + int version; /* Initial version 0 */ + union { + struct fastrpc_mem_map m; + int reserved[MAP_RESERVED_NUM]; + }; +}; + +struct fastrpc_mem_unmap { + int fd; /* ion fd */ + uint64_t vaddr; /* remote process (dsp) virtual address */ + size_t length; /* buffer size */ +}; + +/* unmap memory to DSP device */ +struct fastrpc_ioctl_mem_unmap { + int version; /* Initial version 0 */ + union { + struct fastrpc_mem_unmap um; + int reserved[UNMAP_RESERVED_NUM]; + }; +}; + +/* + * This enum is shared with DSP. So, existing values should NOT + * be modified. Only new members can be added. + */ +enum dsp_map_flags { + /* Add memory to static PD pool, protection thru XPU */ + ADSP_MMAP_HEAP_ADDR = 4, + + /* MAP static DMA buffer on DSP User PD */ + ADSP_MMAP_DMA_BUFFER = 6, + + /* Add memory to static PD pool, protection thru hypervisor */ + ADSP_MMAP_REMOTE_HEAP_ADDR = 8, + + /* Add memory to userPD pool, for user heap */ + ADSP_MMAP_ADD_PAGES = 0x1000, + + /* Add memory to userPD pool, for LLC heap */ + ADSP_MMAP_ADD_PAGES_LLC = 0x3000, + + /* Map persistent header buffer on DSP */ + ADSP_MMAP_PERSIST_HDR = 0x4000, +}; + +enum fastrpc_control_type { + FASTRPC_CONTROL_LATENCY = 1, + FASTRPC_CONTROL_SMMU = 2, + FASTRPC_CONTROL_KALLOC = 3, + FASTRPC_CONTROL_WAKELOCK = 4, + FASTRPC_CONTROL_PM = 5, +/* Clean process on DSP */ + FASTRPC_CONTROL_DSPPROCESS_CLEAN = 6, + FASTRPC_CONTROL_RPC_POLL = 7, +}; + +struct fastrpc_ctrl_latency { + uint32_t enable; /* latency control enable */ + uint32_t latency; /* latency request in us */ +}; + +struct fastrpc_ctrl_kalloc { + uint32_t kalloc_support; /* Remote memory allocation from kernel */ +}; + +struct fastrpc_ctrl_wakelock { + uint32_t enable; /* wakelock control enable */ +}; + +struct fastrpc_ctrl_pm { + uint32_t timeout; /* timeout(in ms) for PM to keep system awake */ +}; + +struct fastrpc_ioctl_control { + uint32_t req; + union { + struct fastrpc_ctrl_latency lp; + struct fastrpc_ctrl_kalloc kalloc; + struct fastrpc_ctrl_wakelock wp; + struct fastrpc_ctrl_pm pm; + }; +}; + +#define FASTRPC_MAX_DSP_ATTRIBUTES (256) +#define FASTRPC_MAX_ATTRIBUTES (260) + +enum fastrpc_dsp_capability { + ASYNC_FASTRPC_CAP = 9, + DMA_HANDLE_REVERSE_RPC_CAP = 129, +}; + +struct fastrpc_ioctl_capability { + uint32_t domain; + uint32_t attribute_ID; + uint32_t capability; +}; + +struct smq_null_invoke { + uint64_t ctx; /* invoke caller context */ + uint32_t handle; /* handle to invoke */ + uint32_t sc; /* scalars structure describing the data */ +}; + +struct smq_phy_page { + uint64_t addr; /* physical address */ + uint64_t size; /* size of contiguous region */ +}; + +struct smq_invoke_buf { + int num; /* number of contiguous regions */ + int pgidx; /* index to start of contiguous region */ +}; + +struct smq_invoke { + struct smq_null_invoke header; + struct smq_phy_page page; /* remote arg and list of pages address */ +}; + +struct smq_msg { + uint32_t pid; /* process group id */ + uint32_t tid; /* thread id */ + struct smq_invoke invoke; +}; + +struct smq_invoke_rsp { + uint64_t ctx; /* invoke caller context */ + int retval; /* invoke return value */ +}; + +enum fastrpc_response_flags { + NORMAL_RESPONSE = 0, + EARLY_RESPONSE = 1, + USER_EARLY_SIGNAL = 2, + COMPLETE_SIGNAL = 3, + STATUS_RESPONSE = 4, + POLL_MODE = 5, +}; + +struct smq_invoke_rspv2 { + uint64_t ctx; /* invoke caller context */ + int retval; /* invoke return value */ + uint32_t flags; /* early response flags */ + uint32_t early_wake_time; /* user predicted early wakeup time in us */ + uint32_t version; /* Version number for validation */ +}; + +enum fastrpc_status_flags { + FASTRPC_USERPD_UP = 0, + FASTRPC_USERPD_EXIT = 1, + FASTRPC_USERPD_FORCE_KILL = 2, + FASTRPC_USERPD_EXCEPTION = 3, + FASTRPC_DSP_SSR = 4, +}; + +struct smq_notif_rspv3 { + uint64_t ctx; /* response context */ + uint32_t type; /* Notification type */ + int pid; /* user process pid */ + uint32_t status; /* userpd status notification */ +}; + +enum fastrpc_process_exit_states { + /* Process Default State */ + FASTRPC_PROCESS_DEFAULT_STATE = 0, + /* Process exit initiated */ + FASTRPC_PROCESS_EXIT_START = 1, + /* Process exit issued to DSP */ + FASTRPC_PROCESS_DSP_EXIT_INIT = 2, + /* Process exit in DSP complete */ + FASTRPC_PROCESS_DSP_EXIT_COMPLETE = 3, + /* Process exit in DSP error */ + FASTRPC_PROCESS_DSP_EXIT_ERROR = 4, +}; + +inline int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool trusted_vm); +inline int fastrpc_handle_rpc_response(void *data, int len, int cid); +inline int verify_transport_device(int cid, bool trusted_vm); +int fastrpc_transport_init(void); +void fastrpc_transport_deinit(void); +void fastrpc_transport_session_init(int cid, char *subsys); +void fastrpc_transport_session_deinit(int cid); +int fastrpc_wait_for_transport_interrupt(int cid, unsigned int flags); + +static inline struct smq_invoke_buf *smq_invoke_buf_start(remote_arg64_t *pra, + uint32_t sc) +{ + unsigned int len = REMOTE_SCALARS_LENGTH(sc); + + return (struct smq_invoke_buf *)(&pra[len]); +} + +static inline struct smq_phy_page *smq_phy_page_start(uint32_t sc, + struct smq_invoke_buf *buf) +{ + unsigned int nTotal = REMOTE_SCALARS_LENGTH(sc); + + return (struct smq_phy_page *)(&buf[nTotal]); +} + +/* + * Fastrpc context ID bit-map: + * + * bits 0-3 : type of remote PD + * bit 4 : type of job (sync/async) + * bit 5 : reserved + * bits 6-15 : index in context table + * bits 16-63 : incrementing context ID + */ +#define FASTRPC_CTX_MAX (1024) + +/* Length of glink transaction history to store */ +#define GLINK_MSG_HISTORY_LEN (128) + + +/* Type of fastrpc DMA bufs sent to DSP */ +enum fastrpc_buf_type { + METADATA_BUF, + COPYDATA_BUF, + INITMEM_BUF, + USERHEAP_BUF, +}; + + +/* Types of RPC calls to DSP */ +enum fastrpc_msg_type { + USER_MSG = 0, + KERNEL_MSG_WITH_ZERO_PID, + KERNEL_MSG_WITH_NONZERO_PID, +}; + +#define DSPSIGNAL_TIMEOUT_NONE 0xffffffff +#define DSPSIGNAL_NUM_SIGNALS 1024 + +// Signal state and completions are stored in groups of DSPSIGNAL_GROUP_SIZE. +// Must be a power of two. +#define DSPSIGNAL_GROUP_SIZE 256 + + +struct secure_vm { + int *vmid; + int *vmperm; + int vmcount; +}; + +struct gid_list { + unsigned int *gids; + unsigned int gidcount; +}; + +struct qos_cores { + int *coreno; + int corecount; +}; + +struct fastrpc_file; + +struct fastrpc_buf { + struct hlist_node hn; + struct hlist_node hn_rem; + struct hlist_node hn_init; + struct fastrpc_file *fl; + void *virt; + uint64_t phys; + size_t size; + unsigned long dma_attr; + uintptr_t raddr; + uint32_t flags; + int type; /* One of "fastrpc_buf_type" */ + bool in_use; /* Used only for persistent header buffers */ + struct timespec64 buf_start_time; + struct timespec64 buf_end_time; +}; + +struct fastrpc_ctx_lst; + +struct fastrpc_tx_msg { + struct smq_msg msg; /* Msg sent to remote subsystem */ + int transport_send_err; /* transport error */ + int64_t ns; /* Timestamp (in ns) of msg */ + uint64_t xo_time_in_us; /* XO Timestamp (in us) of sent message */ +}; + +struct fastrpc_rx_msg { + struct smq_invoke_rspv2 rsp; /* Response from remote subsystem */ + int64_t ns; /* Timestamp (in ns) of response */ + uint64_t xo_time_in_us; /* XO Timestamp (in us) of response */ +}; + +struct fastrpc_transport_log { + unsigned int tx_index; /* Current index of 'tx_msgs' array */ + unsigned int rx_index; /* Current index of 'rx_msgs' array */ + + /* Rolling history of messages sent to remote subsystem */ + struct fastrpc_tx_msg tx_msgs[GLINK_MSG_HISTORY_LEN]; + + /* Rolling history of responses from remote subsystem */ + struct fastrpc_rx_msg rx_msgs[GLINK_MSG_HISTORY_LEN]; + spinlock_t lock; +}; + +struct overlap { + uintptr_t start; + uintptr_t end; + int raix; + uintptr_t mstart; + uintptr_t mend; + uintptr_t offset; + int do_cmo; /*used for cache maintenance of inrout buffers*/ +}; + +struct fastrpc_perf { + uint64_t count; + uint64_t flush; + uint64_t map; + uint64_t copy; + uint64_t link; + uint64_t getargs; + uint64_t putargs; + uint64_t invargs; + uint64_t invoke; + uint64_t tid; +}; + +struct smq_notif_rsp { + struct list_head notifn; + int domain; + int session; + enum fastrpc_status_flags status; +}; + +struct smq_invoke_ctx { + struct hlist_node hn; + /* Async node to add to async job ctx list */ + struct list_head asyncn; + struct completion work; + int retval; + int pid; + int tgid; + remote_arg_t *lpra; + remote_arg64_t *rpra; + remote_arg64_t *lrpra; /* Local copy of rpra for put_args */ + int *fds; + unsigned int *attrs; + struct fastrpc_mmap **maps; + struct fastrpc_buf *buf; + struct fastrpc_buf *copybuf; /*used to copy non-ion buffers */ + size_t used; + struct fastrpc_file *fl; + uint32_t handle; + uint32_t sc; + struct overlap *overs; + struct overlap **overps; + struct smq_msg msg; + uint32_t *crc; + uint64_t *perf_kernel; + uint64_t *perf_dsp; + unsigned int magic; + uint64_t ctxid; + struct fastrpc_perf *perf; + /* response flags from remote processor */ + enum fastrpc_response_flags rsp_flags; + /* user hint of completion time in us */ + uint32_t early_wake_time; + /* work done status flag */ + bool is_work_done; + /* Store Async job in the context*/ + struct fastrpc_async_job asyncjob; + /* Async early flag to check the state of context */ + bool is_early_wakeup; + uint32_t sc_interrupted; + struct fastrpc_file *fl_interrupted; + uint32_t handle_interrupted; +}; + +struct fastrpc_ctx_lst { + struct hlist_head pending; + struct hlist_head interrupted; + /* Number of active contexts queued to DSP */ + uint32_t num_active_ctxs; + /* Queue which holds all async job contexts of process */ + struct list_head async_queue; + /* Queue which holds all status notifications of process */ + struct list_head notif_queue; +}; + +struct fastrpc_smmu { + struct device *dev; + const char *dev_name; + int cb; + int enabled; + int faults; + int secure; + int coherent; +}; + +struct fastrpc_session_ctx { + struct device *dev; + struct fastrpc_smmu smmu; + int used; +}; + +struct fastrpc_static_pd { + char *servloc_name; + char *spdname; + void *pdrhandle; + uint64_t pdrcount; + uint64_t prevpdrcount; + atomic_t ispdup; + int cid; + wait_queue_head_t wait_for_pdup; +}; + +struct fastrpc_dsp_capabilities { + uint32_t is_cached; //! Flag if dsp attributes are cached + uint32_t dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES]; +}; + +struct fastrpc_channel_ctx { + char *name; + char *subsys; + struct device *dev; + struct fastrpc_session_ctx session[NUM_SESSIONS]; + struct fastrpc_static_pd spd[NUM_SESSIONS]; + struct completion work; + struct completion workport; + struct notifier_block nb; + struct mutex smd_mutex; + uint64_t sesscount; + uint64_t ssrcount; + void *handle; + uint64_t prevssrcount; + int issubsystemup; + int vmid; + struct secure_vm rhvm; + void *rh_dump_dev; + /* Indicates, if channel is restricted to secure node only */ + int secure; + /* Indicates whether the channel supports unsigned PD */ + bool unsigned_support; + struct fastrpc_dsp_capabilities dsp_cap_kernel; + /* cpu capabilities shared to DSP */ + uint64_t cpuinfo_todsp; + bool cpuinfo_status; + struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX]; + spinlock_t ctxlock; + struct fastrpc_transport_log gmsg_log; + struct hlist_head initmems; + /* Store gfa structure debug details */ + struct fastrpc_buf *buf; +}; + +struct fastrpc_apps { + struct fastrpc_channel_ctx *channel; + struct cdev cdev; + struct class *class; + struct smq_phy_page range; + struct hlist_head maps; + uint32_t staticpd_flags; + dev_t dev_no; + int compat; + struct hlist_head drivers; + spinlock_t hlock; + struct device *dev; + /* Indicates fastrpc device node info */ + struct device *dev_fastrpc; + unsigned int latency; + int transport_initialized; + /* Flag to determine fastrpc bus registration */ + int fastrpc_bus_register; + bool legacy_remote_heap; + /* Unique job id for each message */ + uint64_t jobid[NUM_CHANNELS]; + struct gid_list gidlist; + struct device *secure_dev; + struct device *non_secure_dev; + /* Secure subsystems like ADSP/SLPI will use secure client */ + struct wakeup_source *wake_source_secure; + /* Non-secure subsystem like CDSP will use regular client */ + struct wakeup_source *wake_source; + uint32_t duplicate_rsp_err_cnt; + struct qos_cores silvercores; + uint32_t max_size_limit; + struct hlist_head frpc_devices; + struct hlist_head frpc_drivers; + struct mutex mut_uid; + /* Indicates cdsp device status */ + int remote_cdsp_status; +}; + +struct fastrpc_mmap { + struct hlist_node hn; + struct fastrpc_file *fl; + struct fastrpc_apps *apps; + int fd; + uint32_t flags; + struct dma_buf *buf; + struct sg_table *table; + struct dma_buf_attachment *attach; + struct ion_handle *handle; + uint64_t phys; + size_t size; + uintptr_t va; + size_t len; + int refs; + uintptr_t raddr; + int secure; + /* Minidump unique index */ + int frpc_md_index; + uintptr_t attr; + struct timespec64 map_start_time; + struct timespec64 map_end_time; + /* Mapping for fastrpc shell */ + bool is_filemap; +}; + +enum fastrpc_perfkeys { + PERF_COUNT = 0, + PERF_FLUSH = 1, + PERF_MAP = 2, + PERF_COPY = 3, + PERF_LINK = 4, + PERF_GETARGS = 5, + PERF_PUTARGS = 6, + PERF_INVARGS = 7, + PERF_INVOKE = 8, + PERF_TID = 9, + PERF_KEY_MAX = 10, +}; + +struct fastrpc_notif_queue { + /* Number of pending status notifications in queue */ + atomic_t notif_queue_count; + + /* Wait queue to synchronize notifier thread and response */ + wait_queue_head_t notif_wait_queue; + + /* IRQ safe spin lock for protecting notif queue */ + spinlock_t nqlock; +}; + +enum fastrpc_dspsignal_state { + DSPSIGNAL_STATE_UNUSED = 0, + DSPSIGNAL_STATE_PENDING, + DSPSIGNAL_STATE_SIGNALED, + DSPSIGNAL_STATE_CANCELED +}; + +struct fastrpc_dspsignal { + struct completion comp; + int state; +}; + +struct fastrpc_file { + struct hlist_node hn; + spinlock_t hlock; + struct hlist_head maps; + struct hlist_head cached_bufs; + uint32_t num_cached_buf; + struct hlist_head remote_bufs; + struct fastrpc_ctx_lst clst; + struct fastrpc_session_ctx *sctx; + struct fastrpc_buf *init_mem; + + /* No. of persistent headers */ + unsigned int num_pers_hdrs; + /* Pre-allocated header buffer */ + struct fastrpc_buf *pers_hdr_buf; + /* Pre-allocated buffer divided into N chunks */ + struct fastrpc_buf *hdr_bufs; + + struct fastrpc_session_ctx *secsctx; + uint32_t mode; + uint32_t profile; + int sessionid; + int tgid_open; /* Process ID during device open */ + int tgid; /* Process ID that uses device for RPC calls */ + int cid; + bool trusted_vm; + uint64_t ssrcount; + int pd; + char *servloc_name; + int file_close; + int dsp_proc_init; + struct fastrpc_apps *apps; + struct dentry *debugfs_file; + struct dev_pm_qos_request *dev_pm_qos_req; + int qos_request; + struct mutex map_mutex; + struct mutex internal_map_mutex; + /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */ + int dev_minor; + char *debug_buf; + /* Flag to indicate attempt has been made to allocate memory for debug_buf*/ + int debug_buf_alloced_attempted; + /* Flag to enable PM wake/relax voting for every remote invoke */ + int wake_enable; + struct gid_list gidlist; + /* Number of jobs pending in Async Queue */ + atomic_t async_queue_job_count; + /* Async wait queue to synchronize glink response and async thread */ + wait_queue_head_t async_wait_queue; + /* IRQ safe spin lock for protecting async queue */ + spinlock_t aqlock; + /* Process status notification queue */ + struct fastrpc_notif_queue proc_state_notif; + uint32_t ws_timeout; + bool untrusted_process; + struct fastrpc_device *device; + /* Process kill will wait on work when ram dump collection in progress */ + struct completion work; + /* Flag to indicate ram dump collection status*/ + bool is_ramdump_pend; + /* Flag to indicate type of process (static, dynamic) */ + uint32_t proc_flags; + /* If set, threads will poll for DSP response instead of glink wait */ + bool poll_mode; + /* Threads poll for specified timeout and fall back to glink wait */ + uint32_t poll_timeout; + /* Flag to indicate dynamic process creation status*/ + bool in_process_create; + bool is_unsigned_pd; + /* Flag to indicate 32 bit driver*/ + bool is_compat; + /* Completion objects and state for dspsignals */ + struct fastrpc_dspsignal *signal_groups[DSPSIGNAL_NUM_SIGNALS / DSPSIGNAL_GROUP_SIZE]; + spinlock_t dspsignals_lock; + struct mutex signal_create_mutex; +}; + +union fastrpc_ioctl_param { + struct fastrpc_ioctl_invoke_async inv; + struct fastrpc_ioctl_mem_map mem_map; + struct fastrpc_ioctl_mem_unmap mem_unmap; + struct fastrpc_ioctl_mmap mmap; + struct fastrpc_ioctl_mmap_64 mmap64; + struct fastrpc_ioctl_munmap munmap; + struct fastrpc_ioctl_munmap_64 munmap64; + struct fastrpc_ioctl_munmap_fd munmap_fd; + struct fastrpc_ioctl_init_attrs init; + struct fastrpc_ioctl_control cp; + struct fastrpc_ioctl_capability cap; + struct fastrpc_ioctl_invoke2 inv2; + struct fastrpc_ioctl_dspsignal_signal sig; + struct fastrpc_ioctl_dspsignal_wait wait; + struct fastrpc_ioctl_dspsignal_create cre; + struct fastrpc_ioctl_dspsignal_destroy des; + struct fastrpc_ioctl_dspsignal_cancel_wait canc; +}; + +int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, + uint32_t kernel, + struct fastrpc_ioctl_invoke_async *inv); + +int fastrpc_internal_invoke2(struct fastrpc_file *fl, + struct fastrpc_ioctl_invoke2 *inv2); + +int fastrpc_internal_munmap(struct fastrpc_file *fl, + struct fastrpc_ioctl_munmap *ud); + +int fastrpc_internal_mem_map(struct fastrpc_file *fl, + struct fastrpc_ioctl_mem_map *ud); + +int fastrpc_internal_mem_unmap(struct fastrpc_file *fl, + struct fastrpc_ioctl_mem_unmap *ud); + +int fastrpc_internal_mmap(struct fastrpc_file *fl, + struct fastrpc_ioctl_mmap *ud); + +int fastrpc_init_process(struct fastrpc_file *fl, + struct fastrpc_ioctl_init_attrs *uproc); + +int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info); + +int fastrpc_internal_control(struct fastrpc_file *fl, + struct fastrpc_ioctl_control *cp); + +int fastrpc_setmode(unsigned long ioctl_param, + struct fastrpc_file *fl); + +int fastrpc_get_info_from_kernel( + struct fastrpc_ioctl_capability *cap, + struct fastrpc_file *fl); + +int fastrpc_dspsignal_signal(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_signal *sig); + +int fastrpc_dspsignal_wait(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_wait *wait); + +int fastrpc_dspsignal_create(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_create *create); + +int fastrpc_dspsignal_destroy(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_destroy *destroy); + +int fastrpc_dspsignal_cancel_wait(struct fastrpc_file *fl, + struct fastrpc_ioctl_dspsignal_cancel_wait *cancel); + +void fastrpc_rproc_trace_events(const char *name, const char *event, + const char *subevent); + +#endif diff --git a/dsp/adsprpc_socket.c b/dsp/adsprpc_socket.c new file mode 100644 index 0000000000..f4c7fc470b --- /dev/null +++ b/dsp/adsprpc_socket.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "adsprpc_shared.h" + +// Registered QRTR service ID +#define FASTRPC_REMOTE_SERVER_SERVICE_ID 5012 + +// Number of remote domains +#define REMOTE_DOMAINS (2) + +/* + * Fastrpc remote server instance ID bit-map: + * + * bits 0-1 : channel ID + * bits 2-7 : reserved + * bits 8-9 : remote domains (SECURE_PD, GUEST_OS) + * bits 10-31 : reserved + */ +#define REMOTE_DOMAIN_INSTANCE_INDEX (8) +#define GET_SERVER_INSTANCE(remote_domain, cid) \ + ((remote_domain << REMOTE_DOMAIN_INSTANCE_INDEX) | cid) +#define GET_CID_FROM_SERVER_INSTANCE(remote_server_instance) \ + (remote_server_instance & 0x3) + +// Maximun received fastprc packet size +#define FASTRPC_SOCKET_RECV_SIZE sizeof(union rsp) + +union rsp { + struct smq_invoke_rsp rsp; + struct smq_invoke_rspv2 rsp2; + struct smq_notif_rspv3 rsp3; +}; + +enum fastrpc_remote_domains_id { + SECURE_PD = 0, + GUEST_OS = 1, +}; + +struct fastrpc_socket { + struct socket *sock; // Socket used to communicate with remote domain + struct sockaddr_qrtr local_sock_addr; // Local socket address on kernel side + struct sockaddr_qrtr remote_sock_addr; // Remote socket address on remote domain side + struct mutex socket_mutex; // Mutex for socket synchronization + void *recv_buf; // Received packet buffer +}; + +struct frpc_transport_session_control { + struct fastrpc_socket frpc_socket; // Fastrpc socket data structure + uint32_t remote_server_instance; // Unique remote server instance ID + bool remote_domain_available; // Flag to indicate if remote domain is enabled + bool remote_server_online; // Flag to indicate remote server status +}; + +/** + * glist_session_ctrl + * Static list containing socket session information for all remote domains. + * Update session flag remote_domain_available whenever a remote domain will be using + * kernel sockets. + */ +static struct frpc_transport_session_control glist_session_ctrl[NUM_CHANNELS][REMOTE_DOMAINS] = { + [CDSP_DOMAIN_ID][SECURE_PD].remote_domain_available = true +}; + +/** + * verify_transport_device() + * @cid: Channel ID. + * @trusted_vm: Flag to indicate whether session is for secure PD or guest OS. + * + * Obtain remote session information given channel ID and trusted_vm + * and verify that socket has been created and remote server is up. + * + * Return: 0 on success or negative errno value on failure. + */ +inline int verify_transport_device(int cid, bool trusted_vm) +{ + int remote_domain, err = 0; + struct frpc_transport_session_control *session_control = NULL; + + remote_domain = (trusted_vm) ? SECURE_PD : GUEST_OS; + VERIFY(err, remote_domain < REMOTE_DOMAINS); + if (err) { + err = -ECHRNG; + goto bail; + } + + session_control = &glist_session_ctrl[cid][remote_domain]; + VERIFY(err, session_control->remote_domain_available); + if (err) { + err = -ECHRNG; + goto bail; + } + + mutex_lock(&session_control->frpc_socket.socket_mutex); + VERIFY(err, session_control->frpc_socket.sock); + VERIFY(err, session_control->remote_server_online); + if (err) { + err = -EPIPE; + mutex_unlock(&session_control->frpc_socket.socket_mutex); + goto bail; + } + mutex_unlock(&session_control->frpc_socket.socket_mutex); + +bail: + return err; +} + +static void fastrpc_recv_new_server(struct frpc_transport_session_control *session_control, + unsigned int service, unsigned int instance, + unsigned int node, unsigned int port) +{ + uint32_t remote_server_instance = session_control->remote_server_instance; + + /* Ignore EOF marker */ + if (!node && !port) + return; + + if (service != FASTRPC_REMOTE_SERVER_SERVICE_ID || + instance != remote_server_instance) + return; + + mutex_lock(&session_control->frpc_socket.socket_mutex); + session_control->frpc_socket.remote_sock_addr.sq_family = AF_QIPCRTR; + session_control->frpc_socket.remote_sock_addr.sq_node = node; + session_control->frpc_socket.remote_sock_addr.sq_port = port; + session_control->remote_server_online = true; + mutex_unlock(&session_control->frpc_socket.socket_mutex); + ADSPRPC_INFO("Remote server is up: remote ID (0x%x)", remote_server_instance); +} + +static void fastrpc_recv_del_server(struct frpc_transport_session_control *session_control, + unsigned int node, unsigned int port) +{ + uint32_t remote_server_instance = session_control->remote_server_instance; + + /* Ignore EOF marker */ + if (!node && !port) + return; + + if (node != session_control->frpc_socket.remote_sock_addr.sq_node || + port != session_control->frpc_socket.remote_sock_addr.sq_port) + return; + + mutex_lock(&session_control->frpc_socket.socket_mutex); + session_control->frpc_socket.remote_sock_addr.sq_node = 0; + session_control->frpc_socket.remote_sock_addr.sq_port = 0; + session_control->remote_server_online = false; + mutex_unlock(&session_control->frpc_socket.socket_mutex); + ADSPRPC_WARN("Remote server is down: remote ID (0x%x)", remote_server_instance); +} + +/** + * fastrpc_recv_ctrl_pkt() + * @session_control: Data structure that contains information related to socket and + * remote server availability. + * @buf: Control packet. + * @len: Control packet length. + * + * Handle control packet status notifications from remote domain. + */ +static void fastrpc_recv_ctrl_pkt(struct frpc_transport_session_control *session_control, + const void *buf, size_t len) +{ + const struct qrtr_ctrl_pkt *pkt = buf; + + if (len < sizeof(struct qrtr_ctrl_pkt)) { + ADSPRPC_WARN("Ignoring short control packet (%d bytes)", len); + return; + } + + switch (le32_to_cpu(pkt->cmd)) { + case QRTR_TYPE_NEW_SERVER: + fastrpc_recv_new_server(session_control, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance), + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_DEL_SERVER: + fastrpc_recv_del_server(session_control, + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + } +} + +/** + * fastrpc_socket_callback() + * @sk: Sock data structure with information related to the callback response. + * + * Callback function to receive responses from socket layer. + * We expect to receive control packets with remote domain status notifications or + * RPC data packets from remote domain. + */ +static void fastrpc_socket_callback(struct sock *sk) +{ + int err = 0, cid = 0; + struct kvec msg = {0}; + struct sockaddr_qrtr remote_sock_addr = {0}; + struct msghdr remote_server = {0}; + struct frpc_transport_session_control *session_control = NULL; + + remote_server.msg_name = &remote_sock_addr; + remote_server.msg_namelen = sizeof(remote_sock_addr); + trace_fastrpc_msg("socket_callback: begin"); + VERIFY(err, sk); + if (err) { + err = -EFAULT; + goto bail; + } + + rcu_read_lock(); + session_control = rcu_dereference_sk_user_data(sk); + rcu_read_unlock(); + VERIFY(err, session_control); + if (err) { + err = -EFAULT; + goto bail; + } + + msg.iov_base = session_control->frpc_socket.recv_buf; + msg.iov_len = FASTRPC_SOCKET_RECV_SIZE; + err = kernel_recvmsg(session_control->frpc_socket.sock, &remote_server, &msg, 1, + msg.iov_len, MSG_DONTWAIT); + if (err < 0) + goto bail; + + if (remote_sock_addr.sq_node == session_control->frpc_socket.local_sock_addr.sq_node && + remote_sock_addr.sq_port == QRTR_PORT_CTRL) { + fastrpc_recv_ctrl_pkt(session_control, session_control->frpc_socket.recv_buf, + FASTRPC_SOCKET_RECV_SIZE); + } else { + cid = GET_CID_FROM_SERVER_INSTANCE(session_control->remote_server_instance); + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + fastrpc_handle_rpc_response(msg.iov_base, msg.iov_len, cid); + } +bail: + if (err < 0) { + ADSPRPC_ERR( + "invalid response data %pK, len %d from remote ID (0x%x) err %d\n", + msg.iov_base, msg.iov_len, session_control->remote_server_instance, err); + } + + trace_fastrpc_msg("socket_callback: end"); +} + +/** + * fastrpc_transport_send() + * @cid: Channel ID. + * @rpc_msg: RPC message to send to remote domain. + * @rpc_msg_size: RPC message size. + * @trusted_vm: Flag to indicate whether to send message to secure PD or guest OS. + * + * Send RPC message to remote domain. Depending on trusted_vm flag message will be + * sent to secure PD or guest OS on remote subsystem. + * Depending on the channel ID and remote domain, a corresponding socket is retrieved + * from glist_session_ctrl and is use to send RPC message. + * + * Return: 0 on success or negative errno value on failure. + */ +int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool trusted_vm) +{ + int err = 0, remote_domain; + struct fastrpc_socket *frpc_socket = NULL; + struct frpc_transport_session_control *session_control = NULL; + struct msghdr remote_server = {0}; + struct kvec msg = {0}; + + remote_domain = (trusted_vm) ? SECURE_PD : GUEST_OS; + VERIFY(err, remote_domain < REMOTE_DOMAINS); + if (err) { + err = -ECHRNG; + goto bail; + } + session_control = &glist_session_ctrl[cid][remote_domain]; + VERIFY(err, session_control->remote_domain_available); + if (err) { + err = -ECHRNG; + goto bail; + } + frpc_socket = &session_control->frpc_socket; + remote_server.msg_name = &frpc_socket->remote_sock_addr; + remote_server.msg_namelen = sizeof(frpc_socket->remote_sock_addr); + + msg.iov_base = rpc_msg; + msg.iov_len = rpc_msg_size; + + mutex_lock(&frpc_socket->socket_mutex); + VERIFY(err, frpc_socket->sock); + VERIFY(err, session_control->remote_server_online); + if (err) { + err = -EPIPE; + mutex_unlock(&frpc_socket->socket_mutex); + goto bail; + } + err = kernel_sendmsg(frpc_socket->sock, &remote_server, &msg, 1, msg.iov_len); + mutex_unlock(&frpc_socket->socket_mutex); +bail: + return err; +} + +/** + * create_socket() + * @session_control: Data structure that contains information related to socket and + * remote server availability. + * + * Initializes and creates a kernel socket. + * + * Return: pointer to a socket on success or negative errno value on failure. + */ +static struct socket *create_socket(struct frpc_transport_session_control *session_control) +{ + int err = 0; + struct socket *sock = NULL; + struct fastrpc_socket *frpc_socket = NULL; + + err = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, + PF_QIPCRTR, &sock); + if (err < 0) { + ADSPRPC_ERR("sock_create_kern failed with err %d\n", err); + goto bail; + } + frpc_socket = &session_control->frpc_socket; + err = kernel_getsockname(sock, (struct sockaddr *)&frpc_socket->local_sock_addr); + if (err < 0) { + sock_release(sock); + ADSPRPC_ERR("kernel_getsockname failed with err %d\n", err); + goto bail; + } + rcu_assign_sk_user_data(sock->sk, session_control); + sock->sk->sk_data_ready = fastrpc_socket_callback; + sock->sk->sk_error_report = fastrpc_socket_callback; +bail: + if (err < 0) + return ERR_PTR(err); + else + return sock; +} + +/** + * register_remote_server_notifications() + * @frpc_socket: Socket to send message to register for remote service notifications. + * @remote_server_instance: ID to uniquely identify remote server + * + * Register socket to receive status notifications from remote service + * using remote service ID FASTRPC_REMOTE_SERVER_SERVICE_ID and instance ID. + * + * Return: 0 on success or negative errno value on failure. + */ +static int register_remote_server_notifications(struct fastrpc_socket *frpc_socket, + uint32_t remote_server_instance) +{ + struct qrtr_ctrl_pkt pkt = {0}; + struct sockaddr_qrtr sq = {0}; + struct msghdr remote_server = {0}; + struct kvec msg = { &pkt, sizeof(pkt) }; + int err = 0; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP); + pkt.server.service = cpu_to_le32(FASTRPC_REMOTE_SERVER_SERVICE_ID); + pkt.server.instance = cpu_to_le32(remote_server_instance); + + sq.sq_family = frpc_socket->local_sock_addr.sq_family; + sq.sq_node = frpc_socket->local_sock_addr.sq_node; + sq.sq_port = QRTR_PORT_CTRL; + + remote_server.msg_name = &sq; + remote_server.msg_namelen = sizeof(sq); + + err = kernel_sendmsg(frpc_socket->sock, &remote_server, &msg, 1, sizeof(pkt)); + if (err < 0) + goto bail; + +bail: + if (err < 0) + ADSPRPC_ERR("failed to send lookup registration: %d\n", err); + + return err; +} + +inline void fastrpc_transport_session_init(int cid, char *subsys) +{ +} + +inline void fastrpc_transport_session_deinit(int cid) +{ +} + +int fastrpc_wait_for_transport_interrupt(int cid, unsigned int flags) +{ + return 0; +} + +void fastrpc_rproc_trace_events(const char *name, const char *event, + const char *subevent) +{ +} + +/** + * fastrpc_transport_init() - Initialize sockets for fastrpc driver. + * + * Initialize and create all sockets that are enabled from all channels + * and remote domains. + * Traverse array glist_session_ctrl and initialize session if remote + * domain is enabled. + * + * Return: 0 on success or negative errno value on failure. + */ +int fastrpc_transport_init(void) +{ + int err = 0, cid = 0, ii = 0; + struct socket *sock = NULL; + struct fastrpc_socket *frpc_socket = NULL; + struct frpc_transport_session_control *session_control = NULL; + + for (cid = 0; cid < NUM_CHANNELS; cid++) { + for (ii = 0; ii < REMOTE_DOMAINS; ii++) { + session_control = &glist_session_ctrl[cid][ii]; + if (!session_control->remote_domain_available) + continue; + + session_control->remote_server_online = false; + frpc_socket = &session_control->frpc_socket; + mutex_init(&frpc_socket->socket_mutex); + + sock = create_socket(session_control); + if (IS_ERR_OR_NULL(sock)) { + err = PTR_ERR(sock); + goto bail; + } + + frpc_socket->sock = sock; + frpc_socket->recv_buf = kzalloc(FASTRPC_SOCKET_RECV_SIZE, GFP_KERNEL); + if (!frpc_socket->recv_buf) { + err = -ENOMEM; + goto bail; + } + session_control->remote_server_instance = GET_SERVER_INSTANCE(ii, cid); + err = register_remote_server_notifications(frpc_socket, + session_control->remote_server_instance); + if (err < 0) + goto bail; + } + } + err = 0; +bail: + if (err) + ADSPRPC_ERR("fastrpc_socket_init failed with err %d\n", err); + return err; +} + +/** + * fastrpc_transport_deinit() - Deinitialize sockets for fastrpc driver. + * + * Deinitialize and release all sockets that are enabled from all channels + * and remote domains. + * Traverse array glist_session_ctrl and deinitialize session if remote + * domain is enabled. + */ +void fastrpc_transport_deinit(void) +{ + int ii = 0; + struct fastrpc_socket *frpc_socket = NULL; + struct frpc_transport_session_control *session_control = NULL; + int cid = -1; + + for (cid = 0; cid < NUM_CHANNELS; cid++) { + for (ii = 0; ii < REMOTE_DOMAINS; ii++) { + session_control = &glist_session_ctrl[cid][ii]; + frpc_socket = &session_control->frpc_socket; + if (!session_control->remote_domain_available) + continue; + + if (frpc_socket->sock) + sock_release(frpc_socket->sock); + + kfree(frpc_socket->recv_buf); + frpc_socket->recv_buf = NULL; + frpc_socket->sock = NULL; + mutex_destroy(&frpc_socket->socket_mutex); + } + } +} From 7e0eff3f2b70603296f605533b80ace8f2b25fdd Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Tue, 16 Aug 2022 10:30:47 -0700 Subject: [PATCH 003/146] adsprpc: Modify code to pass compilation Add missing module import to macro for DMA_BUF and add fallthrough statements Change-Id: Ied625ee1000afca9f269989ff2635ea04a8fe196 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 32b51572c3..f1804a5658 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -251,6 +251,8 @@ enum fastrpc_proc_attr { #define DSPSIGNAL_VERBOSE(x, ...) /*#define DSPSIGNAL_VERBOSE ADSPRPC_INFO*/ +MODULE_IMPORT_NS(DMA_BUF); + static struct dentry *debugfs_root; static struct dentry *debugfs_global_file; @@ -1913,6 +1915,7 @@ static void context_notify_user(struct smq_invoke_ctx *ctx, ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc); switch (rsp_flags) { case NORMAL_RESPONSE: + fallthrough; case COMPLETE_SIGNAL: /* normal and complete response with return value */ ctx->is_work_done = true; @@ -1927,6 +1930,7 @@ static void context_notify_user(struct smq_invoke_ctx *ctx, ctx->early_wake_time = early_wake_time; if (ctx->asyncjob.isasyncjob) break; + fallthrough; case EARLY_RESPONSE: /* rpc framework early response with return value */ if (ctx->asyncjob.isasyncjob) @@ -6562,6 +6566,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, case FASTRPC_IOCTL_INVOKE_CRC: if (!size) size = sizeof(struct fastrpc_ioctl_invoke_crc); + fallthrough; case FASTRPC_IOCTL_INVOKE_PERF: if (!size) size = sizeof(struct fastrpc_ioctl_invoke_perf); From cabe8eabfb45cf52f25c0f843006906c3afe4dee Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Thu, 22 Sep 2022 11:37:36 -0700 Subject: [PATCH 004/146] Add trace header file to kernel component Moved trace header file from kernel branch to fastrpc kernel component Change-Id: I3c1dafe9daec89f1164e64606748cb47483e1670 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 2 +- dsp/adsprpc_rpmsg.c | 2 +- dsp/adsprpc_socket.c | 2 +- dsp/fastrpc_trace.h | 385 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 388 insertions(+), 3 deletions(-) create mode 100644 dsp/fastrpc_trace.h diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index f1804a5658..7471e4e2ce 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -56,7 +56,7 @@ #include #define CREATE_TRACE_POINTS -#include +#include "fastrpc_trace.h" #define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C #define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D diff --git a/dsp/adsprpc_rpmsg.c b/dsp/adsprpc_rpmsg.c index 68ef1c5180..f5ea5b2b6f 100644 --- a/dsp/adsprpc_rpmsg.c +++ b/dsp/adsprpc_rpmsg.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include "fastrpc_trace.h" #include #include "adsprpc_shared.h" diff --git a/dsp/adsprpc_socket.c b/dsp/adsprpc_socket.c index f4c7fc470b..9ff3fa28f8 100644 --- a/dsp/adsprpc_socket.c +++ b/dsp/adsprpc_socket.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include "fastrpc_trace.h" #include "adsprpc_shared.h" // Registered QRTR service ID diff --git a/dsp/fastrpc_trace.h b/dsp/fastrpc_trace.h new file mode 100644 index 0000000000..5bfe2f9779 --- /dev/null +++ b/dsp/fastrpc_trace.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#if !defined(TRACE_FASTRPC_H) || defined(TRACE_HEADER_MULTI_READ) +#define TRACE_FASTRPC_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fastrpc + +/* Path must be relative to location of 'define_trace.h' header in kernel */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/dsp-kernel/dsp + +/* Name of trace header file */ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE fastrpc_trace + +#include + +TRACE_EVENT(fastrpc_transport_send, + + TP_PROTO(int cid, uint64_t smq_ctx, + uint64_t ctx, uint32_t handle, + uint32_t sc, uint64_t addr, uint64_t size), + + TP_ARGS(cid, smq_ctx, ctx, handle, sc, addr, size), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, smq_ctx) + __field(u64, ctx) + __field(u32, handle) + __field(u32, sc) + __field(u64, addr) + __field(u64, size) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->smq_ctx = smq_ctx; + __entry->ctx = ctx; + __entry->handle = handle; + __entry->sc = sc; + __entry->addr = addr; + __entry->size = size; + ), + + TP_printk("to cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x, addr 0x%llx, size %llu", + __entry->cid, __entry->smq_ctx, __entry->ctx, __entry->handle, + __entry->sc, __entry->addr, __entry->size) +); + +TRACE_EVENT(fastrpc_transport_response, + + TP_PROTO(int cid, uint64_t ctx, int retval, + uint32_t rsp_flags, uint32_t early_wake_time), + + TP_ARGS(cid, ctx, retval, rsp_flags, early_wake_time), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, ctx) + __field(int, retval) + __field(u32, rsp_flags) + __field(u32, early_wake_time) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->ctx = ctx; + __entry->retval = retval; + __entry->rsp_flags = rsp_flags; + __entry->early_wake_time = early_wake_time; + ), + + TP_printk("from cid %d: ctx 0x%llx, retval 0x%x, rsp_flags %u, early_wake_time %u", + __entry->cid, __entry->ctx, __entry->retval, + __entry->rsp_flags, __entry->early_wake_time) +); + +TRACE_EVENT(fastrpc_context_interrupt, + + TP_PROTO(int cid, uint64_t smq_ctx, uint64_t ctx, + uint32_t handle, uint32_t sc), + + TP_ARGS(cid, smq_ctx, ctx, handle, sc), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, smq_ctx) + __field(u64, ctx) + __field(u32, handle) + __field(u32, sc) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->smq_ctx = smq_ctx; + __entry->ctx = ctx; + __entry->handle = handle; + __entry->sc = sc; + ), + + TP_printk("to cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x", + __entry->cid, __entry->smq_ctx, + __entry->ctx, __entry->handle, __entry->sc) +); + +TRACE_EVENT(fastrpc_context_restore, + + TP_PROTO(int cid, uint64_t smq_ctx, uint64_t ctx, + uint32_t handle, uint32_t sc), + + TP_ARGS(cid, smq_ctx, ctx, handle, sc), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, smq_ctx) + __field(u64, ctx) + __field(u32, handle) + __field(u32, sc) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->smq_ctx = smq_ctx; + __entry->ctx = ctx; + __entry->handle = handle; + __entry->sc = sc; + ), + + TP_printk("for cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x", + __entry->cid, __entry->smq_ctx, + __entry->ctx, __entry->handle, __entry->sc) +); + +TRACE_EVENT(fastrpc_dma_map, + + TP_PROTO(int cid, int fd, uint64_t phys, size_t size, + size_t len, unsigned int attr, int mflags), + + TP_ARGS(cid, fd, phys, size, len, attr, mflags), + + TP_STRUCT__entry( + __field(int, cid) + __field(int, fd) + __field(u64, phys) + __field(size_t, size) + __field(size_t, len) + __field(unsigned int, attr) + __field(int, mflags) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->fd = fd; + __entry->phys = phys; + __entry->size = size; + __entry->len = len; + __entry->attr = attr; + __entry->mflags = mflags; + ), + + TP_printk("cid %d, fd %d, phys 0x%llx, size %zu (len %zu), attr 0x%x, flags 0x%x", + __entry->cid, __entry->fd, __entry->phys, __entry->size, + __entry->len, __entry->attr, __entry->mflags) +); + +TRACE_EVENT(fastrpc_dma_unmap, + + TP_PROTO(int cid, uint64_t phys, size_t size), + + TP_ARGS(cid, phys, size), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, phys) + __field(size_t, size) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->phys = phys; + __entry->size = size; + ), + + TP_printk("cid %d, phys 0x%llx, size %zu", + __entry->cid, __entry->phys, __entry->size) +); + +TRACE_EVENT(fastrpc_dma_alloc, + + TP_PROTO(int cid, uint64_t phys, size_t size, + unsigned long attr, int mflags), + + TP_ARGS(cid, phys, size, attr, mflags), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, phys) + __field(size_t, size) + __field(unsigned long, attr) + __field(int, mflags) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->phys = phys; + __entry->size = size; + __entry->attr = attr; + __entry->mflags = mflags; + ), + + TP_printk("cid %d, phys 0x%llx, size %zu, attr 0x%lx, flags 0x%x", + __entry->cid, __entry->phys, __entry->size, + __entry->attr, __entry->mflags) +); + +TRACE_EVENT(fastrpc_dma_free, + + TP_PROTO(int cid, uint64_t phys, size_t size), + + TP_ARGS(cid, phys, size), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, phys) + __field(size_t, size) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->phys = phys; + __entry->size = size; + ), + + TP_printk("cid %d, phys 0x%llx, size %zu", + __entry->cid, __entry->phys, __entry->size) +); + +TRACE_EVENT(fastrpc_context_complete, + + TP_PROTO(int cid, uint64_t smq_ctx, int retval, + uint64_t ctx, uint32_t handle, uint32_t sc), + + TP_ARGS(cid, smq_ctx, retval, ctx, handle, sc), + + TP_STRUCT__entry( + __field(int, cid) + __field(u64, smq_ctx) + __field(int, retval) + __field(u64, ctx) + __field(u32, handle) + __field(u32, sc) + ), + + TP_fast_assign( + __entry->cid = cid; + __entry->smq_ctx = smq_ctx; + __entry->retval = retval; + __entry->ctx = ctx; + __entry->handle = handle; + __entry->sc = sc; + ), + + TP_printk("from cid %d: smq_ctx 0x%llx, retval 0x%x, ctx 0x%llx, handle 0x%x, sc 0x%x", + __entry->cid, __entry->smq_ctx, __entry->retval, + __entry->ctx, __entry->handle, __entry->sc) +); + +TRACE_EVENT(fastrpc_context_alloc, + + TP_PROTO(uint64_t smq_ctx, uint64_t ctx, + uint32_t handle, uint32_t sc), + + TP_ARGS(smq_ctx, ctx, handle, sc), + + TP_STRUCT__entry( + __field(u64, smq_ctx) + __field(u64, ctx) + __field(u32, handle) + __field(u32, sc) + ), + + TP_fast_assign( + __entry->smq_ctx = smq_ctx; + __entry->ctx = ctx; + __entry->handle = handle; + __entry->sc = sc; + ), + + TP_printk("for: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x", + __entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc) +); + +TRACE_EVENT(fastrpc_context_free, + + TP_PROTO(uint64_t smq_ctx, uint64_t ctx, + uint32_t handle, uint32_t sc), + + TP_ARGS(smq_ctx, ctx, handle, sc), + + TP_STRUCT__entry( + __field(u64, smq_ctx) + __field(u64, ctx) + __field(u32, handle) + __field(u32, sc) + ), + + TP_fast_assign( + __entry->smq_ctx = smq_ctx; + __entry->ctx = ctx; + __entry->handle = handle; + __entry->sc = sc; + ), + + TP_printk("for: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x", + __entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc) +); + +TRACE_EVENT(fastrpc_perf_counters, + + TP_PROTO(uint32_t handle, uint32_t sc, + uint64_t count, uint64_t flush, uint64_t map, + uint64_t copy, uint64_t link, uint64_t getargs, + uint64_t putargs, uint64_t invargs, uint64_t invoke, + uint64_t tid), + + TP_ARGS(handle, sc, count, flush, map, copy, link, getargs, + putargs, invargs, invoke, tid), + + TP_STRUCT__entry( + __field(u32, handle) + __field(u32, sc) + __field(u64, count) + __field(u64, flush) + __field(u64, map) + __field(u64, copy) + __field(u64, link) + __field(u64, getargs) + __field(u64, putargs) + __field(u64, invargs) + __field(u64, invoke) + __field(u64, tid) + ), + + TP_fast_assign( + __entry->handle = handle; + __entry->sc = sc; + __entry->count = count; + __entry->flush = flush; + __entry->map = map; + __entry->copy = copy; + __entry->link = link; + __entry->getargs = getargs; + __entry->putargs = putargs; + __entry->invargs = invargs; + __entry->invoke = invoke; + __entry->tid = tid; + ), + + TP_printk("for: handle 0x%x, sc 0x%x, count %lld, flush %lld ns, map %lld ns, copy %lld ns, link %lld ns, getargs %lld ns, putargs %lld ns, invargs %lld ns, invoke %lld ns, tid %lld", + __entry->handle, __entry->sc, __entry->count, + __entry->flush, __entry->map, __entry->copy, __entry->link, + __entry->getargs, __entry->putargs, __entry->invargs, + __entry->invoke, __entry->tid) +); + +TRACE_EVENT(fastrpc_msg, + TP_PROTO(const char *message), + TP_ARGS(message), + TP_STRUCT__entry(__string(buf, message)), + TP_fast_assign( + __assign_str(buf, message); + ), + TP_printk(" %s", __get_str(buf)) +); + +#endif + +/* This part must be outside protection */ +#include From c64cff5b57fe8521c459eb65dd12e968b8a3b0f2 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Sun, 4 Sep 2022 08:16:38 -0700 Subject: [PATCH 005/146] adsprpc: msm: Add rules for compilation Adding rules for compiling frpc_adsprpc ko and cdsploader ko. Change-Id: Ie0d13018fea971ffa20f0183c17a299ca47f29c7 Signed-off-by: Anirudh Raghavendra --- Android.mk | 41 +++++++++++ Kbuild | 39 ++++++++++ Makefile | 14 ++++ config/pineappledsp.conf | 6 ++ config/pineappledspconf.h | 6 ++ dsp/adsprpc.c | 2 +- dsp_kernel_board.mk | 5 ++ dsp_kernel_product.mk | 3 + include/linux/fastrpc.h | 149 ++++++++++++++++++++++++++++++++++++++ product.mk | 2 + 10 files changed, 266 insertions(+), 1 deletion(-) create mode 100644 Android.mk create mode 100644 Kbuild create mode 100644 Makefile create mode 100644 config/pineappledsp.conf create mode 100644 config/pineappledspconf.h create mode 100644 dsp_kernel_board.mk create mode 100644 dsp_kernel_product.mk create mode 100644 include/linux/fastrpc.h create mode 100644 product.mk diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000000..dfddf359fd --- /dev/null +++ b/Android.mk @@ -0,0 +1,41 @@ +DLKM_DIR := device/qcom/common/dlkm + +LOCAL_PATH := $(call my-dir) + +DSP_BLD_DIR := $(abspath .)/vendor/qcom/opensource/dsp-kernel + +include $(CLEAR_VARS) +$(info DLKM_DIR = $(DLKM_DIR)) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := dsp-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk + +include $(CLEAR_VARS) +$(info DLKM_DIR = $(DLKM_DIR)) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := frpc-adsprpc.ko +LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/include/linux +LOCAL_MODULE_KBUILD_NAME := frpc-adsprpc.ko +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +KBUILD_OPTIONS += DSP_ROOT=$(DSP_BLD_DIR) +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +include $(DLKM_DIR)/Build_external_kernelmodule.mk + +#include $(CLEAR_VARS) +#$(info DLKM_DIR = $(DLKM_DIR)) +#LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +#LOCAL_MODULE := cdsp-loader.ko +#LOCAL_MODULE_KBUILD_NAME := cdsp-loader.ko +#LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +#KBUILD_OPTIONS += DSP_ROOT=$(DSP_BLD_DIR) +#KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +#include $(DLKM_DIR)/Build_external_kernelmodule.mk + +# print out variables +$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) +$(info intermediates dsp symvers path = $(call intermediates-dir-for,DLKM,dsp-module-symvers)) +$(info DLKM_DIR = $(DLKM_DIR)) + diff --git a/Kbuild b/Kbuild new file mode 100644 index 0000000000..868bfd5ede --- /dev/null +++ b/Kbuild @@ -0,0 +1,39 @@ +# ported from Android.mk +$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS)) + +ifeq ($(CONFIG_ARCH_WAIPIO), y) +$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO)) +KBUILD_CPPFLAGS += -DCONFIG_DSP_WAIPIO=1 +ccflags-y += -DCONFIG_DSP_WAIPIO=1 +endif + +ifeq ($(CONFIG_ARCH_KALAMA), y) +$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA)) +KBUILD_CPPFLAGS += -DCONFIG_DSP_KALAMA=1 +ccflags-y += -DCONFIG_DSP_KALAMA=1 +endif + +ifeq ($(CONFIG_ARCH_PINEAPPLE), y) +$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE)) +KBUILD_CPPFLAGS += -DCONFIG_DSP_PINEAPPLE=1 +ccflags-y += -DCONFIG_DSP_PINEAPPLE=1 +endif + +LINUXINCLUDE += -I$(DSP_ROOT)/include/linux + +frpc-adsprpc-y := dsp/adsprpc.o \ + dsp/adsprpc_rpmsg.o \ + +frpc-adsprpc-$(CONFIG_COMPAT) += dsp/adsprpc_compat.o \ + +frpc_trusted-adsprpc-y := dsp/adsprpc.o \ + dsp/adsprpc_compat.o \ + dsp/adsprpc_socket.o \ + +#cdsp-loader-y := dsp/cdsp-loader.o + +#obj-m := frpc-adsprpc.o cdsp-loader.o +obj-m := frpc-adsprpc.o + +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-adsprpc.ko +#BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..b35c11f27b --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +KBUILD_OPTIONS += DSP_ROOT=$(KERNEL_SRC)/$(M) V=1 + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/config/pineappledsp.conf b/config/pineappledsp.conf new file mode 100644 index 0000000000..9cb6a359e8 --- /dev/null +++ b/config/pineappledsp.conf @@ -0,0 +1,6 @@ +ifeq ($(CONFIG_QGKI),y) +export CONFIG_MSM_ADSPRPC =y +else +export CONFIG_MSM_ADSPRPC =m +endif +#export CONFIG_MSM_DSP=y \ No newline at end of file diff --git a/config/pineappledspconf.h b/config/pineappledspconf.h new file mode 100644 index 0000000000..5bcc9fd1db --- /dev/null +++ b/config/pineappledspconf.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_MSM_ADSPRPC 1 diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 7471e4e2ce..8b4d70fefa 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -41,7 +41,7 @@ #include #include "adsprpc_compat.h" #include "adsprpc_shared.h" -#include +#include "fastrpc.h" #include #include #include diff --git a/dsp_kernel_board.mk b/dsp_kernel_board.mk new file mode 100644 index 0000000000..d5d3dc0d14 --- /dev/null +++ b/dsp_kernel_board.mk @@ -0,0 +1,5 @@ +ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-adsprpc.ko +#BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-trusted-adsprpc.ko +#BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko +endif diff --git a/dsp_kernel_product.mk b/dsp_kernel_product.mk new file mode 100644 index 0000000000..0f18b541d9 --- /dev/null +++ b/dsp_kernel_product.mk @@ -0,0 +1,3 @@ +PRODUCT_PACKAGES += frpc-adsprpc.ko +#PRODUCT_PACKAGES += frpc_trusted-adsprpc.ko +#PRODUCT_PACKAGES += cdsp-loader.ko diff --git a/include/linux/fastrpc.h b/include/linux/fastrpc.h new file mode 100644 index 0000000000..b828d9d8a7 --- /dev/null +++ b/include/linux/fastrpc.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __LINUX_fastrpc_H +#define __LINUX_fastrpc_H + +#include +#include +#include + +#define FASTRPC_DRV_NAME_SIZE 32 + +enum fastrpc_driver_status { + FASTRPC_PROC_DOWN = 0, +}; +enum fastrpc_driver_invoke_nums { + FASTRPC_DEV_MAP_DMA = 1, + FASTRPC_DEV_UNMAP_DMA, +}; + +/** + * struct fastrpc_dev_map_dma - fastrpc dma buffer map structure + * @buf : Shared DMA buf object + * @attrs : Attributes to map buffer on IOMMU + * @size : Size of DMA buffer + * @v_dsp_addr : Virtual addr of DSP after mapping the buffer on DSP + */ +struct fastrpc_dev_map_dma { + struct dma_buf *buf; + uint32_t attrs; + size_t size; + uint64_t v_dsp_addr; +}; + +/** + * struct fastrpc_dev_unmap_dma - fastrpc dma buffer unmap structure + * @buf : Shared DMA buf object + * @size : Size of DMA buffer + */ +struct fastrpc_dev_unmap_dma { + struct dma_buf *buf; + size_t size; +}; + +/** + * fastrpc_device - device that belong to the fastrpc bus + * @hn: Head node to add to fastrpc device list + * @dev: the device struct + * @handle: handle of the process + * @fl: process file of fastrpc device + * @dev_close: flag to determine if device is closed + * @refs: reference count of drivers using the device + */ +struct fastrpc_device { + struct hlist_node hn; + struct device dev; + int handle; + struct fastrpc_file *fl; + bool dev_close; + unsigned int refs; +}; + +#define to_fastrpc_device(d) container_of(d, struct fastrpc_device, dev) + +/** + * struct fastrpc_driver - fastrpc driver struct + * @hn: Node to add to fastrpc driver list + * @driver: underlying device driver + * @device: device that is matching to driver + * @handle: handle of the process + * @create: 0 to attach, 1 to create process + * @probe: invoked when a matching fastrpc device (i.e. device) is found + * @callback: invoked when there is a status change in the process + */ +struct fastrpc_driver { + struct hlist_node hn; + struct device_driver driver; + struct device *device; + int handle; + int create; + int (*probe)(struct fastrpc_device *dev); + int (*callback)(struct fastrpc_device *dev, + enum fastrpc_driver_status status); +}; + +#define to_fastrpc_driver(x) container_of((x), struct fastrpc_driver, driver) + +//#if IS_ENABLED(CONFIG_MSM_ADSPRPC) || IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) +/** + * function fastrpc_driver_register - Register fastrpc driver + * @drv: Initialized fastrpc driver structure pointer + */ +int fastrpc_driver_register(struct fastrpc_driver *drv); + +/** + * function fastrpc_driver_unregister - Un-register fastrpc driver + * @drv: fastrpc driver structure pointer + */ +void fastrpc_driver_unregister(struct fastrpc_driver *drv); + +/** + * function fastrpc_driver_invoke - fastrpc driver invocation function + * Invoke fastrpc driver using fastrpc_device received in probe of registration + * @dev : Device received in probe of registration. + * @invoke_num : Invocation number of operation, + * one of "fastrpc_driver_invoke_nums" + * @invoke_param: Address of invocation structure corresponding to invoke_num + * (struct fastrpc_dev_map_dma *) for FASTRPC_DEV_MAP_DMA + * (struct fastrpc_dev_unmap_dma *) for FASTRPC_DEV_UNMAP_DMA. + */ +long fastrpc_driver_invoke(struct fastrpc_device *dev, + enum fastrpc_driver_invoke_nums invoke_num, unsigned long invoke_param); + +/* +#else +static inline int fastrpc_driver_register(struct fastrpc_driver *drv) +{ return 0; } + +static inline void fastrpc_driver_unregister(struct fastrpc_driver *drv) +{ return; } + +static inline long fastrpc_driver_invoke(struct fastrpc_device *dev, + enum fastrpc_driver_invoke_nums invoke_num, unsigned long invoke_param) +{ return 0; } +#endif +*/ +/** + * module_fastrpc_driver() - Helper macro for registering a fastrpc driver + * @__fastrpc_driver: fastrpc_driver struct + * + * Helper macro for fastrpc drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate code. Each module may only + * use this macro once, and calling it replaces module_init and module_exit. + */ +#define module_fastrpc_driver(__fastrpc_driver) \ +static int __init __fastrpc_driver##_init(void) \ +{ \ + return fastrpc_driver_register(&(__fastrpc_driver)); \ +} \ +module_init(__fastrpc_driver##_init); \ +static void __exit __fastrpc_driver##_exit(void) \ +{ \ + fastrpc_driver_unregister(&(__fastrpc_driver)); \ +} \ +module_exit(__fastrpc_driver##_exit) + +#endif /* __LINUX_fastrpc_H */ diff --git a/product.mk b/product.mk new file mode 100644 index 0000000000..34d12e010c --- /dev/null +++ b/product.mk @@ -0,0 +1,2 @@ +PRODUCT_PACKAGES += frpc-adsprpc.ko +#PRODUCT_PACKAGES += cdsp-loader.ko From 8ac83763ceebbd50bff828257e4838f2e0d9bf77 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Wed, 14 Sep 2022 12:13:06 -0700 Subject: [PATCH 006/146] Enable wait for PD only when PDR mechanism is up When the PDR mechanism is down, the value of ispdup is always 0. This makes the sensors or audio daemon wait indefinitely in the kernel. Disabling wait in kernel when PDR mechanism is not working. Change-Id: I556dc0aa3908bc97b433cbd6060b88a710923b58 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 8b4d70fefa..c51826d13f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5998,6 +5998,7 @@ static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name) err = fastrpc_get_spd_session(sloc_name, &session, &cid); if (err || cid != fl->cid) goto bail; +#if IS_ENABLED(CONFIG_QCOM_PDR_HELPERS) if (!strcmp(fl->servloc_name, AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME) || !strcmp(fl->servloc_name, SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME) || @@ -6008,6 +6009,9 @@ static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name) atomic_read(&me->channel[cid].spd[session].ispdup)); goto bail; } +#else + (void)me; +#endif } bail: return err; From 135c71aeb2e84f98b0696c7023fae93850fe761a Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Mon, 3 Oct 2022 16:33:21 -0700 Subject: [PATCH 007/146] Add cdsp loader and enable compilation Added cdsp loader file and enabled compilation. Shared object not yet shipped to superimg. Change-Id: Iadcc81a925b198115db627eccbd7181eec053f47 Signed-off-by: Anirudh Raghavendra --- Android.mk | 18 +-- Kbuild | 5 +- dsp/cdsp-loader.c | 301 ++++++++++++++++++++++++++++++++++++++++++ dsp_kernel_board.mk | 2 +- dsp_kernel_product.mk | 2 +- 5 files changed, 314 insertions(+), 14 deletions(-) create mode 100644 dsp/cdsp-loader.c diff --git a/Android.mk b/Android.mk index dfddf359fd..6b9a3fe586 100644 --- a/Android.mk +++ b/Android.mk @@ -24,15 +24,15 @@ KBUILD_OPTIONS += DSP_ROOT=$(DSP_BLD_DIR) KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) include $(DLKM_DIR)/Build_external_kernelmodule.mk -#include $(CLEAR_VARS) -#$(info DLKM_DIR = $(DLKM_DIR)) -#LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) -#LOCAL_MODULE := cdsp-loader.ko -#LOCAL_MODULE_KBUILD_NAME := cdsp-loader.ko -#LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) -#KBUILD_OPTIONS += DSP_ROOT=$(DSP_BLD_DIR) -#KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) -#include $(DLKM_DIR)/Build_external_kernelmodule.mk +include $(CLEAR_VARS) +$(info DLKM_DIR = $(DLKM_DIR)) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := cdsp-loader.ko +LOCAL_MODULE_KBUILD_NAME := cdsp-loader.ko +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +KBUILD_OPTIONS += DSP_ROOT=$(DSP_BLD_DIR) +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +include $(DLKM_DIR)/Build_external_kernelmodule.mk # print out variables $(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) diff --git a/Kbuild b/Kbuild index 868bfd5ede..62fa146890 100644 --- a/Kbuild +++ b/Kbuild @@ -30,10 +30,9 @@ frpc_trusted-adsprpc-y := dsp/adsprpc.o \ dsp/adsprpc_compat.o \ dsp/adsprpc_socket.o \ -#cdsp-loader-y := dsp/cdsp-loader.o +cdsp-loader-y := dsp/cdsp-loader.o -#obj-m := frpc-adsprpc.o cdsp-loader.o -obj-m := frpc-adsprpc.o +obj-m := frpc-adsprpc.o cdsp-loader.o BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-adsprpc.ko #BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko diff --git a/dsp/cdsp-loader.c b/dsp/cdsp-loader.c new file mode 100644 index 0000000000..0caad0e2b2 --- /dev/null +++ b/dsp/cdsp-loader.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2014, 2017-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BOOT_CMD 1 +#define IMAGE_UNLOAD_CMD 0 + +#define CDSP_SUBSYS_DOWN 0 +#define CDSP_SUBSYS_LOADED 1 + +static ssize_t cdsp_boot_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count); + +struct cdsp_loader_private { + void *pil_h; + struct kobject *boot_cdsp_obj; + struct attribute_group *attr_group; +}; + +static struct kobj_attribute cdsp_boot_attribute = + __ATTR(boot, 0220, NULL, cdsp_boot_store); + +static struct attribute *attrs[] = { + &cdsp_boot_attribute.attr, + NULL, +}; + +static u32 cdsp_state = CDSP_SUBSYS_DOWN; +static struct platform_device *cdsp_private; +static void cdsp_loader_unload(struct platform_device *pdev); + +static int cdsp_loader_do(struct platform_device *pdev) +{ + struct cdsp_loader_private *priv = NULL; + phandle rproc_phandle; + int rc = 0, sz = 0; + const char *img_name; + + if (!pdev) { + pr_err("%s: Platform device null\n", __func__); + goto fail; + } + + if (!pdev->dev.of_node) { + dev_err(&pdev->dev, + "%s: Device tree information missing\n", __func__); + goto fail; + } + + rc = of_property_read_string(pdev->dev.of_node, + "qcom,proc-img-to-load", + &img_name); + if (rc) + goto fail; + + if (!strcmp(img_name, "cdsp")) { + /* cdsp_state always returns "0".*/ + if (cdsp_state == CDSP_SUBSYS_DOWN) { + priv = platform_get_drvdata(pdev); + if (!priv) { + dev_err(&pdev->dev, + "%s: Private data get failed\n", __func__); + goto fail; + } + + sz = of_property_read_u32(pdev->dev.of_node, "qcom,rproc-handle", + &rproc_phandle); + if (sz) { + pr_err("%s: of_property_read failed, returned value %d\n", + __func__, sz); + dev_err(&pdev->dev, "error reading rproc phandle\n"); + goto fail; + } + + priv->pil_h = rproc_get_by_phandle(rproc_phandle); + if (!priv->pil_h) { + dev_err(&pdev->dev, "rproc not found\n"); + goto fail; + } + + dev_dbg(&pdev->dev, "%s: calling rproc_boot on %s\n", + __func__, img_name); + rc = rproc_boot(priv->pil_h); + if (rc) { + dev_err(&pdev->dev, "%s: rproc_boot failed with error %d\n", + __func__, rc); + goto fail; + } + + /* Set the state of the CDSP.*/ + cdsp_state = CDSP_SUBSYS_LOADED; + } else if (cdsp_state == CDSP_SUBSYS_LOADED) { + dev_dbg(&pdev->dev, + "%s: CDSP state = 0x%x\n", __func__, cdsp_state); + } + + dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__); + return rc; + } + +fail: + if (pdev) + dev_err(&pdev->dev, + "%s: CDSP image loading failed\n", __func__); + return rc; +} + + +static ssize_t cdsp_boot_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + int ret = 0; + uint32_t boot = 0; + + ret = kstrtou32(buf, 0, &boot); + if (ret) { + pr_debug("%s: invalid arguments for cdsp_loader.\n", __func__); + return ret; + } + + if (boot == BOOT_CMD) { + pr_debug("%s: going to call cdsp_loader_do\n", __func__); + cdsp_loader_do(cdsp_private); + } else if (boot == IMAGE_UNLOAD_CMD) { + pr_debug("%s: going to call cdsp_unloader\n", __func__); + cdsp_loader_unload(cdsp_private); + } + return count; +} + +static void cdsp_loader_unload(struct platform_device *pdev) +{ + struct cdsp_loader_private *priv = NULL; + + priv = platform_get_drvdata(pdev); + + if (!priv) + return; + + if (priv->pil_h) { + dev_dbg(&pdev->dev, "%s: calling subsystem_put\n", __func__); + rproc_shutdown(priv->pil_h); + priv->pil_h = NULL; + } +} + +static int cdsp_loader_init_sysfs(struct platform_device *pdev) +{ + int ret = -EINVAL; + struct cdsp_loader_private *priv = NULL; + + cdsp_private = NULL; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + return ret; + } + + platform_set_drvdata(pdev, priv); + + priv->pil_h = NULL; + priv->boot_cdsp_obj = NULL; + priv->attr_group = devm_kzalloc(&pdev->dev, + sizeof(*(priv->attr_group)), + GFP_KERNEL); + if (!priv->attr_group) { + ret = -ENOMEM; + goto error_return; + } + + priv->attr_group->attrs = attrs; + + priv->boot_cdsp_obj = kobject_create_and_add("boot_cdsp", kernel_kobj); + if (!priv->boot_cdsp_obj) { + dev_err(&pdev->dev, "%s: sysfs create and add failed\n", + __func__); + ret = -ENOMEM; + goto error_return; + } + + ret = sysfs_create_group(priv->boot_cdsp_obj, priv->attr_group); + if (ret) { + dev_err(&pdev->dev, "%s: sysfs create group failed %d\n", + __func__, ret); + goto error_return; + } + + cdsp_private = pdev; + + return 0; + +error_return: + + if (priv->boot_cdsp_obj) { + kobject_del(priv->boot_cdsp_obj); + priv->boot_cdsp_obj = NULL; + } + if (ret) + dev_err(&pdev->dev, "%s failed with ret %d\n", + __func__, ret); + return ret; +} + +static int cdsp_loader_remove(struct platform_device *pdev) +{ + struct cdsp_loader_private *priv = NULL; + + priv = platform_get_drvdata(pdev); + + if (!priv) + return 0; + + if (priv->pil_h) { + rproc_shutdown(priv->pil_h); + priv->pil_h = NULL; + } + + if (priv->boot_cdsp_obj) { + sysfs_remove_group(priv->boot_cdsp_obj, priv->attr_group); + kobject_del(priv->boot_cdsp_obj); + priv->boot_cdsp_obj = NULL; + } + + return 0; +} + +static int cdsp_loader_probe(struct platform_device *pdev) +{ + phandle rproc_phandle; + struct property *prop = NULL; + int size = 0; + struct rproc *cdsp = NULL; + int ret = 0; + + prop = of_find_property(pdev->dev.of_node, "qcom,rproc-handle", &size); + if (!prop) { + dev_err(&pdev->dev, "%s: error reading rproc phandle\n", __func__); + return -ENOPARAM; + } + + rproc_phandle = be32_to_cpup(prop->value); + cdsp = rproc_get_by_phandle(rproc_phandle); + if (!cdsp) { + dev_err(&pdev->dev, "%s: rproc not found\n", __func__); + return -EPROBE_DEFER; + } + + ret = cdsp_loader_init_sysfs(pdev); + + if (ret != 0) { + dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__); + return ret; + } + + return 0; +} + +static const struct of_device_id cdsp_loader_dt_match[] = { + { .compatible = "qcom,cdsp-loader" }, + { } +}; +MODULE_DEVICE_TABLE(of, cdsp_loader_dt_match); + +static struct platform_driver cdsp_loader_driver = { + .driver = { + .name = "cdsp-loader", + .of_match_table = cdsp_loader_dt_match, + }, + .probe = cdsp_loader_probe, + .remove = cdsp_loader_remove, +}; + +static int __init cdsp_loader_init(void) +{ + return platform_driver_register(&cdsp_loader_driver); +} +module_init(cdsp_loader_init); + +static void __exit cdsp_loader_exit(void) +{ + platform_driver_unregister(&cdsp_loader_driver); +} +module_exit(cdsp_loader_exit); + +MODULE_DESCRIPTION("CDSP Loader module"); +MODULE_LICENSE("GPL v2"); diff --git a/dsp_kernel_board.mk b/dsp_kernel_board.mk index d5d3dc0d14..6c0a48bf85 100644 --- a/dsp_kernel_board.mk +++ b/dsp_kernel_board.mk @@ -1,5 +1,5 @@ ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-adsprpc.ko #BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-trusted-adsprpc.ko -#BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko endif diff --git a/dsp_kernel_product.mk b/dsp_kernel_product.mk index 0f18b541d9..b7c2644e47 100644 --- a/dsp_kernel_product.mk +++ b/dsp_kernel_product.mk @@ -1,3 +1,3 @@ PRODUCT_PACKAGES += frpc-adsprpc.ko #PRODUCT_PACKAGES += frpc_trusted-adsprpc.ko -#PRODUCT_PACKAGES += cdsp-loader.ko +PRODUCT_PACKAGES += cdsp-loader.ko From 905ce47596c889d15feef73c8cb705cacb6b1b12 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Thu, 20 Oct 2022 10:27:57 -0700 Subject: [PATCH 008/146] Change copyright in config file Changed copyright in config file to quic markings. Change-Id: I92aee971907b545dae038284ac1f76936a7ab123 Signed-off-by: Anirudh Raghavendra --- config/pineappledspconf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/pineappledspconf.h b/config/pineappledspconf.h index 5bcc9fd1db..22586fa1ff 100644 --- a/config/pineappledspconf.h +++ b/config/pineappledspconf.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #define CONFIG_MSM_ADSPRPC 1 From 12862dfca918dfa8bd00fbd8f5a9dd74f05084eb Mon Sep 17 00:00:00 2001 From: Bruce Levy Date: Tue, 1 Nov 2022 16:16:03 -0700 Subject: [PATCH 009/146] adsprpc: Fix compilation error Change-Id: I12af79e309f8f0f0d50bad2d88a437f62ecd1d46 --- dsp/adsprpc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index c51826d13f..c6250f212b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6741,10 +6741,14 @@ static void fastrpc_smq_ctx_detail(struct smq_invoke_ctx *smq_ctx, int cid, void if (map) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), - smq_invoke_ctx_params, fastrpc_mmap_params, + smq_invoke_ctx_params, smq_ctx->pid, smq_ctx->tgid, smq_ctx->handle, smq_ctx->sc, smq_ctx->fl, smq_ctx->fds, - smq_ctx->magic, map->fd, map->flags, map->buf, + smq_ctx->magic); + scnprintf(mini_dump_buff + strlen(mini_dump_buff), + MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), + fastrpc_mmap_params, + map->fd, map->flags, map->buf, map->phys, map->size, map->va, map->raddr, map->len, map->refs, map->secure); From 3a10a6b261a991132c2d1f26a49e9ee8777347ed Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Mon, 31 Oct 2022 12:16:10 -0700 Subject: [PATCH 010/146] Resolve compilation errors in adsprpc Number of parameters in print statement was less than number of format specifiers. This was causing an error with the new kernel compiler. Change-Id: Iad4ed0ad373559ecd64e8d9bfb1da37532f443c9 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 6 +++--- dsp/adsprpc_shared.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index c6250f212b..0111672c9b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3019,7 +3019,7 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, if (err) { err = -EINVAL; ADSPRPC_ERR( - "user application %s trying to send a kernel RPC message to channel %d, handle 0x%x\n", + "user application trying to send a kernel RPC message to channel %d, handle 0x%x\n", cid, invoke->handle); goto bail; } @@ -5455,7 +5455,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, } mutex_unlock(&fl->map_mutex); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%-20s|%-20s|%-20s|%-20s\n", + "%-20s|%-20s|%-20s\n", "len", "refs", "raddr"); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, @@ -5465,7 +5465,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, mutex_lock(&fl->map_mutex); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%-20zu|%-20d|0x%-20lX|%-20d\n\n", + "%-20zu|%-20d|0x%-20lX\n\n", map->len, map->refs, map->raddr); } mutex_unlock(&fl->map_mutex); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 98728c358a..67149b4943 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -48,11 +48,11 @@ /* Pre-defined parameter for print gfa structure*/ -#define smq_invoke_ctx_params "pid: %d, tgid: %d, handle: %p, sc: 0x%x, fl: %p, fd: %p, magic: %d\n" +#define smq_invoke_ctx_params "pid: %d, tgid: %d, handle: %d, sc: 0x%x, fl: %p, fd: %p, magic: %d\n" #define fastrpc_file_params "fl->tgid: %d, fl->cid: %d, fl->ssrcount: %p, fl->pd: %d, fl->profile: %p, fl->mode: %p, fl->tgid_open: %d, fl->num_cached_buf: %d, num_pers_hdrs: %d, fl->sessionid: %d, fl->servloc_name: %s, fl->file_close: %d, fl->dsp_proc_init: %d,fl->apps: %p, fl->qos_request: %d, fl->dev_minor: %d, fl->debug_buf: %s fl->debug_buf_alloced_attempted: %d, fl->wake_enable: %d, fl->ws_timeout: %d, fl->untrusted_process: %d\n" -#define fastrpc_mmap_params "fl: %p, apps: %p, fd: %d, flags: %p, buf: %p, phys: %p, size : %d, va : %p, map->raddr: %p, len : %d, refs : %d, secure: %d\n" +#define fastrpc_mmap_params "fd: %d, flags: %p, buf: %p, phys: %p, size : %d, va : %p, map->raddr: %p, len : %d, refs : %d, secure: %d\n" #define fastrpc_buf_params "buf->fl: %p, buf->phys: %p, buf->virt: %p, buf->size: %d, buf->dma_attr: %ld, buf->raddr: %p, buf->flags: %d, buf->type: %d, buf->in_use: %d\n" /* Set for buffers that have no virtual mapping in userspace */ From f8b3c018583b1e3d212271027350f8c28e3bb151 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Fri, 28 Oct 2022 16:08:10 -0700 Subject: [PATCH 011/146] Add snapshot from Kailua Added snapshot of fastrpc code on Kailua to Lanai component. Change-Id: I50bd699ef1c502116368c383ed9dc28f675d393c Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 653 ++++++++++++++++++++++++++++++++++--------- dsp/adsprpc_shared.h | 32 ++- 2 files changed, 544 insertions(+), 141 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 0111672c9b..5a412d2884 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -53,7 +53,14 @@ #include #include #include +#include #include +#include + +#ifdef CONFIG_HIBERNATION +#include +#include +#endif #define CREATE_TRACE_POINTS #include "fastrpc_trace.h" @@ -63,6 +70,8 @@ #define TZ_PIL_AUTH_QDSP6_PROC 1 #define FASTRPC_ENOSUCH 39 +#define VMID_SSC_Q6 5 +#define VMID_ADSP_Q6 6 #define DEBUGFS_SIZE 3072 #define PID_SIZE 10 @@ -197,6 +206,8 @@ /* Convert the 19.2MHz clock count to micro-seconds */ #define CONVERT_CNT_TO_US(CNT) (CNT * 10ull / 192ull) +#define FASTRPC_USER_PD_FORCE_KILL 2 + /* Unique index flag used for mini dump */ static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; @@ -625,7 +636,7 @@ static int fastrpc_minidump_remove_region(struct fastrpc_mmap *map) map->frpc_md_index = -1; } } else { - ADSPRPC_ERR("mini-dump enabled with invalid unique id: %d\n", map->frpc_md_index); + ADSPRPC_WARN("mini-dump enabled with invalid unique id: %d\n", map->frpc_md_index); } return err; } @@ -682,7 +693,7 @@ skip_buf_cache: goto bail; } vmid = fl->apps->channel[cid].vmid; - if (vmid) { + if ((vmid) && (fl->apps->channel[cid].in_hib == 0)) { int srcVM[2] = {VMID_HLOS, vmid}; int hyp_err = 0; @@ -871,7 +882,7 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va, hlist_for_each_entry_safe(map, n, &me->maps, hn) { if ((fd < 0 || map->fd == fd) && map->raddr == va && map->raddr + map->len == va + len && - map->refs == 1 && + map->refs == 1 && !map->is_persistent && /* Skip unmap if it is fastrpc shell memory */ !map->is_filemap) { match = map; @@ -928,7 +939,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { spin_lock_irqsave(&me->hlock, irq_flags); map->refs--; - if (!map->refs) + if (!map->refs && !map->is_persistent) hlist_del_init(&map->hn); spin_unlock_irqrestore(&me->hlock, irq_flags); if (map->refs > 0) { @@ -937,6 +948,10 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) map->size, map->va, map->refs); return; } + spin_lock_irqsave(&me->hlock, irq_flags); + if (map->is_persistent && map->in_use) + map->in_use = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); } else { map->refs--; if (!map->refs) @@ -953,11 +968,11 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) return; } - if (msm_minidump_enabled()) { + if (msm_minidump_enabled() && !map->is_persistent) err = fastrpc_minidump_remove_region(map); - } - trace_fastrpc_dma_free(-1, map->phys, map->size); - if (map->phys) { + + if (map->phys && !map->is_persistent) { + trace_fastrpc_dma_free(-1, map->phys, map->size); dma_free_attrs(me->dev, map->size, (void *)map->va, (dma_addr_t)map->phys, (unsigned long)map->attr); } @@ -973,6 +988,8 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) } else { int destVM[1] = {VMID_HLOS}; int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + if (!fl) + goto bail; if (map->secure) sess = fl->secsctx; @@ -980,7 +997,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) sess = fl->sctx; vmid = fl->apps->channel[cid].vmid; - if (vmid && map->phys) { + if (vmid && map->phys && (me->channel[cid].in_hib == 0)) { int hyp_err = 0; int srcVM[2] = {VMID_HLOS, vmid}; @@ -1002,11 +1019,35 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) if (!IS_ERR_OR_NULL(map->buf)) dma_buf_put(map->buf); } - kfree(map); +bail: + if (!map->is_persistent) + kfree(map); } static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure, - struct fastrpc_session_ctx **session); + int sharedcb, struct fastrpc_session_ctx **session); + +static inline bool fastrpc_get_persistent_map(size_t len, struct fastrpc_mmap **pers_map) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_mmap *map = NULL; + struct hlist_node *n = NULL; + bool found = false; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + if (len == map->len && + map->is_persistent && !map->in_use) { + *pers_map = map; + map->in_use = true; + found = true; + break; + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + return found; +} static int fastrpc_mmap_create_remote_heap(struct fastrpc_file *fl, struct fastrpc_mmap *map, size_t len, int mflags) @@ -1028,6 +1069,7 @@ static int fastrpc_mmap_create_remote_heap(struct fastrpc_file *fl, map->phys = (uintptr_t)region_phys; map->size = len; map->va = (uintptr_t)region_vaddr; + map->servloc_name = fl->servloc_name; bail: return err; } @@ -1103,8 +1145,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * dma_buf_attach(map->buf, me->dev))); if (err) { ADSPRPC_ERR( - "dma_buf_attach for fd %d failed to map buffer on SMMU device %s ret %ld\n", - fd, dev_name(me->dev), PTR_ERR(map->attach)); + "dma_buf_attach for fd %d for len 0x%zx failed to map buffer on SMMU device %s ret %ld\n", + fd, len, dev_name(me->dev), PTR_ERR(map->attach)); err = -EFAULT; goto bail; } @@ -1115,8 +1157,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * DMA_BIDIRECTIONAL))); if (err) { ADSPRPC_ERR( - "dma_buf_map_attachment for fd %d failed on device %s ret %ld\n", - fd, dev_name(me->dev), PTR_ERR(map->table)); + "dma_buf_map_attachment for fd %d for len 0x%zx failed on device %s ret %ld\n", + fd, len, dev_name(me->dev), PTR_ERR(map->table)); err = -EFAULT; goto bail; } @@ -1132,7 +1174,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * map->size = len; map->flags = FASTRPC_MAP_FD_DELAYED; trace_fastrpc_dma_map(cid, fd, map->phys, map->size, - len, mflags, map->attach->dma_map_attrs); + len, map->attach->dma_map_attrs, mflags); } else { if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) { ADSPRPC_INFO("buffer mapped with persist attr 0x%x\n", @@ -1163,7 +1205,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; if (map->secure) { if (!fl->secsctx) - err = fastrpc_session_alloc(chan, 1, + err = fastrpc_session_alloc(chan, 1, 0, &fl->secsctx); if (err) { ADSPRPC_ERR( @@ -1191,8 +1233,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * dma_buf_attach(map->buf, sess->smmu.dev))); if (err) { ADSPRPC_ERR( - "dma_buf_attach for fd %d failed to map buffer on SMMU device %s ret %ld\n", - fd, dev_name(sess->smmu.dev), + "dma_buf_attach for fd %d failed for len 0x%zx to map buffer on SMMU device %s ret %ld\n", + fd, len, dev_name(sess->smmu.dev), PTR_ERR(map->attach)); err = -EFAULT; goto bail; @@ -1211,8 +1253,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * DMA_BIDIRECTIONAL))); if (err) { ADSPRPC_ERR( - "dma_buf_map_attachment for fd %d failed on device %s ret %ld\n", - fd, dev_name(sess->smmu.dev), + "dma_buf_map_attachment for fd %d failed for len 0x%zx on device %s ret %ld\n", + fd, len, dev_name(sess->smmu.dev), PTR_ERR(map->table)); err = -EFAULT; goto bail; @@ -1238,7 +1280,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * map->size = buf_page_size(len); } trace_fastrpc_dma_map(cid, fd, map->phys, map->size, - len, mflags, map->attach->dma_map_attrs); + len, map->attach->dma_map_attrs, mflags); VERIFY(err, map->size >= len && map->size < me->max_size_limit); if (err) { @@ -1728,7 +1770,11 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, if (err) goto bail; } - + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } chan = &me->channel[cid]; spin_lock_irqsave(&chan->ctxlock, irq_flags); @@ -2008,17 +2054,13 @@ static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me) spin_unlock_irqrestore(&me->hlock, irq_flags); } -static void fastrpc_ramdump_collection(int cid) +static void fastrpc_update_ramdump_status(int cid) { struct fastrpc_file *fl = NULL; struct hlist_node *n = NULL; struct fastrpc_apps *me = &gfa; struct fastrpc_channel_ctx *chan = &me->channel[cid]; - struct qcom_dump_segment ramdump_entry; - struct fastrpc_buf *buf = NULL; - int ret = 0; unsigned long irq_flags = 0; - struct list_head head; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { @@ -2032,6 +2074,19 @@ static void fastrpc_ramdump_collection(int cid) if (chan->buf) hlist_add_head(&chan->buf->hn_init, &chan->initmems); spin_unlock_irqrestore(&me->hlock, irq_flags); +} + +static void fastrpc_ramdump_collection(int cid) +{ + struct fastrpc_file *fl = NULL; + struct hlist_node *n = NULL; + struct fastrpc_apps *me = &gfa; + struct fastrpc_channel_ctx *chan = &me->channel[cid]; + struct qcom_dump_segment ramdump_entry; + struct fastrpc_buf *buf = NULL; + int ret = 0; + unsigned long irq_flags = 0; + struct list_head head; hlist_for_each_entry_safe(buf, n, &chan->initmems, hn_init) { fl = buf->fl; @@ -3239,7 +3294,7 @@ static int fastrpc_wait_on_notif_queue( read_notif_status: interrupted = wait_event_interruptible(fl->proc_state_notif.notif_wait_queue, atomic_read(&fl->proc_state_notif.notif_queue_count)); - if (!fl || fl->file_close >= FASTRPC_PROCESS_EXIT_START) { + if (!fl) { err = -EBADF; goto bail; } @@ -3536,16 +3591,9 @@ static int fastrpc_init_attach_process(struct fastrpc_file *fl, if (init->flags == FASTRPC_INIT_ATTACH) fl->pd = 0; - else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) { - if (fl->cid == ADSP_DOMAIN_ID) - fl->servloc_name = - SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME; - else if (fl->cid == SDSP_DOMAIN_ID) - fl->servloc_name = - SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME; + else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) /* Setting to 2 will route the message to sensorsPD */ fl->pd = 2; - } err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); if (err) @@ -3585,13 +3633,13 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, } inbuf; spin_lock(&fl->hlock); - if (fl->in_process_create) { + if (fl->dsp_process_state) { err = -EALREADY; ADSPRPC_ERR("Already in create dynamic process\n"); spin_unlock(&fl->hlock); return err; } - fl->in_process_create = true; + fl->dsp_process_state = PROCESS_CREATE_IS_INPROGRESS; spin_unlock(&fl->hlock); inbuf.pgid = fl->tgid; @@ -3748,9 +3796,11 @@ bail: fastrpc_mmap_free(file, 0); mutex_unlock(&fl->map_mutex); } + + spin_lock(&fl->hlock); + locked = 1; if (err) { - spin_lock(&fl->hlock); - locked = 1; + fl->dsp_process_state = PROCESS_CREATE_DEFAULT; if (!IS_ERR_OR_NULL(fl->init_mem)) { init_mem = fl->init_mem; fl->init_mem = NULL; @@ -3758,14 +3808,13 @@ bail: locked = 0; fastrpc_buf_free(init_mem, 0); } - if (locked) { - spin_unlock(&fl->hlock); - locked = 0; - } + } else { + fl->dsp_process_state = PROCESS_CREATE_SUCCESS; + } + if (locked) { + spin_unlock(&fl->hlock); + locked = 0; } - spin_lock(&fl->hlock); - fl->in_process_create = false; - spin_unlock(&fl->hlock); return err; } @@ -3793,6 +3842,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, unsigned int namelen; unsigned int pageslen; } inbuf; + unsigned long irq_flags = 0; if (fl->dev_minor == MINOR_NUM_DEV) { err = -ECONNREFUSED; @@ -3823,7 +3873,6 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, inbuf.pageslen = 0; if (!strcmp(proc_name, "audiopd")) { - fl->servloc_name = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME; /* * Remove any previous mappings in case process is trying * to reconnect after a PD restart on remote subsystem. @@ -3842,19 +3891,24 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, if (!fl->trusted_vm && (!me->staticpd_flags && !me->legacy_remote_heap)) { inbuf.pageslen = 1; - mutex_lock(&fl->map_mutex); - err = fastrpc_mmap_create(fl, -1, NULL, 0, init->mem, - init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem); - mutex_unlock(&fl->map_mutex); - if (err) - goto bail; + if (!fastrpc_get_persistent_map(init->memlen, &mem)) { + mutex_lock(&fl->map_mutex); + err = fastrpc_mmap_create(fl, -1, NULL, 0, init->mem, + init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + spin_lock_irqsave(&me->hlock, irq_flags); + mem->in_use = true; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } phys = mem->phys; size = mem->size; /* * If remote-heap VMIDs are defined in DTSI, then do * hyp_assign from HLOS to those VMs (LPASS, ADSP). */ - if (rhvm->vmid && mem->refs == 1 && size) { + if (rhvm->vmid && mem && mem->refs == 1 && size) { err = hyp_assign_phys(phys, (uint64_t)size, hlosvm, 1, rhvm->vmid, rhvm->vmperm, rhvm->vmcount); @@ -3868,6 +3922,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, rh_hyp_done = 1; } me->staticpd_flags = 1; + mem->is_persistent = true; } /* @@ -3917,13 +3972,53 @@ bail: "rh hyp unassign failed with %d for phys 0x%llx of size %zu\n", hyp_err, phys, size); } - mutex_lock(&fl->map_mutex); - fastrpc_mmap_free(mem, 0); - mutex_unlock(&fl->map_mutex); + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(mem, 0); + mutex_unlock(&fl->map_mutex); } return err; } +/* + * This function sets fastrpc service location name + * based on ioctl init flags. + */ +static void fastrpc_set_servloc(struct fastrpc_file *fl, + struct fastrpc_ioctl_init *init) +{ + char *proc_name = NULL; + int err = 0; + + if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) { + if (fl->cid == ADSP_DOMAIN_ID) + fl->servloc_name = + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME; + else if (fl->cid == SDSP_DOMAIN_ID) + fl->servloc_name = + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME; + } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) { + if (!init->filelen) + goto bail; + + proc_name = kzalloc(init->filelen + 1, GFP_KERNEL); + VERIFY(err, !IS_ERR_OR_NULL(proc_name)); + if (err) { + err = -ENOMEM; + goto bail; + } + err = copy_from_user((void *)proc_name, + (void __user *)init->file, init->filelen); + if (err) { + err = -EFAULT; + goto bail; + } + if (!strcmp(proc_name, "audiopd")) + fl->servloc_name = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME; + } +bail: + kfree(proc_name); +} + int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_ioctl_init_attrs *uproc) { @@ -3959,6 +4054,16 @@ int fastrpc_init_process(struct fastrpc_file *fl, } } + if (fl->sharedcb == 1) { + // Only attach sensors pd use cases can share CB + VERIFY(err, init->flags == FASTRPC_INIT_ATTACH_SENSORS); + if (err) { + err = -EACCES; + goto bail; + } + } + + fastrpc_set_servloc(fl, init); err = fastrpc_channel_open(fl, init->flags); if (err) goto bail; @@ -4173,6 +4278,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) VERIFY(err, fl->apps->channel[cid].issubsystemup == 1); if (err) { + wait_for_completion(&fl->shutdown); err = -ECONNRESET; goto bail; } @@ -4444,11 +4550,14 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, size_t size, uint32_t flags, int locked) { int err = 0; - struct fastrpc_apps *me = &gfa; int tgid = 0; - int destVM[1] = {VMID_HLOS}; - int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + struct fastrpc_apps *me = &gfa; int cid = -1; + struct fastrpc_ioctl_invoke_async ioctl; + remote_arg_t ra[2]; + struct { + uint8_t skey; + } routargs; if (!fl) { err = -EBADF; @@ -4463,64 +4572,64 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, cid); goto bail; } - if (flags == ADSP_MMAP_HEAP_ADDR) { - struct fastrpc_ioctl_invoke_async ioctl; - remote_arg_t ra[2]; - int err = 0; - struct { - uint8_t skey; - } routargs; - tgid = fl->tgid; - ra[0].buf.pv = (void *)&tgid; - ra[0].buf.len = sizeof(tgid); - - ra[1].buf.pv = (void *)&routargs; - ra[1].buf.len = sizeof(routargs); - - ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; - ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1); - ioctl.inv.pra = ra; - ioctl.fds = NULL; - ioctl.attrs = NULL; - ioctl.crc = NULL; - ioctl.perf_kernel = NULL; - ioctl.perf_dsp = NULL; - ioctl.job = NULL; - - if (locked) { - mutex_unlock(&fl->map_mutex); - mutex_unlock(&me->channel[cid].smd_mutex); - } - VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, - FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); - if (locked) { - mutex_lock(&me->channel[cid].smd_mutex); - mutex_lock(&fl->map_mutex); - } - if (err) - goto bail; - } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - if (me->channel[cid].rhvm.vmid) { - err = hyp_assign_phys(phys, - (uint64_t)size, - me->channel[cid].rhvm.vmid, - me->channel[cid].rhvm.vmcount, - destVM, destVMperm, 1); - if (err) { - ADSPRPC_ERR( - "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", - err, phys, size); - err = -EADDRNOTAVAIL; - goto bail; - } - } + tgid = fl->tgid; + ra[0].buf.pv = (void *)&tgid; + ra[0].buf.len = sizeof(tgid); + ra[1].buf.pv = (void *)&routargs; + ra[1].buf.len = sizeof(routargs); + ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; + ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1); + ioctl.inv.pra = ra; + ioctl.fds = NULL; + ioctl.attrs = NULL; + ioctl.crc = NULL; + ioctl.perf_kernel = NULL; + ioctl.perf_dsp = NULL; + ioctl.job = NULL; + if (locked) { + mutex_unlock(&fl->map_mutex); + mutex_unlock(&me->channel[cid].smd_mutex); } + VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, + FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl))); + if (locked) { + mutex_lock(&me->channel[cid].smd_mutex); + mutex_lock(&fl->map_mutex); + } + if (err) + goto bail; bail: return err; } +static int fastrpc_munmap_rh(uint64_t phys, size_t size, + uint32_t flags) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + int destVM[1] = {VMID_HLOS}; + int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + + if ((me->channel[RH_CID].rhvm.vmid) + && (me->channel[RH_CID].in_hib == 0)) { + err = hyp_assign_phys(phys, + (uint64_t)size, + me->channel[RH_CID].rhvm.vmid, + me->channel[RH_CID].rhvm.vmcount, + destVM, destVMperm, 1); + if (err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + return err; + } + } + return err; +} + static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr, uint64_t phys, size_t size, uint32_t flags) { @@ -4530,12 +4639,16 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr, size, flags))); if (err) goto bail; - if (flags == ADSP_MMAP_HEAP_ADDR || - flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + if (flags == ADSP_MMAP_HEAP_ADDR) { VERIFY(err, !(err = fastrpc_munmap_on_dsp_rh(fl, phys, size, flags, 0))); if (err) goto bail; + } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + VERIFY(err, !(err = fastrpc_munmap_rh(phys, + size, flags))); + if (err) + goto bail; } bail: return err; @@ -4552,24 +4665,69 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) unsigned long irq_flags = 0; INIT_LIST_HEAD(&head); - VERIFY(err, fl->cid == RH_CID); - if (err) { - err = -EBADR; - goto bail; + if (fl) { + VERIFY(err, fl->cid == RH_CID); + if (err) { + err = -EBADR; + goto bail; + } } do { match = NULL; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(map, n, &me->maps, hn) { - match = map; - hlist_del_init(&map->hn); - break; + if (map->servloc_name && + fl->servloc_name && !strcmp(map->servloc_name, fl->servloc_name)) { + match = map; + if (map->is_persistent && map->in_use) { + int destVM[1] = {VMID_HLOS}; + int destVMperm[1] = {PERM_READ | PERM_WRITE + | PERM_EXEC}; + uint64_t phys = map->phys; + size_t size = map->size; + + spin_unlock_irqrestore(&me->hlock, irq_flags); + //hyp assign it back to HLOS + if (me->channel[RH_CID].rhvm.vmid) { + err = hyp_assign_phys(phys, + (uint64_t)size, + me->channel[RH_CID].rhvm.vmid, + me->channel[RH_CID].rhvm.vmcount, + destVM, destVMperm, 1); + } + if (err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + return err; + } + spin_lock_irqsave(&me->hlock, irq_flags); + map->in_use = false; + } + if (map->is_persistent) { + match = NULL; + continue; + } + hlist_del_init(&map->hn); + break; + } } spin_unlock_irqrestore(&me->hlock, irq_flags); if (match) { - err = fastrpc_munmap_on_dsp_rh(fl, match->phys, - match->size, match->flags, locked); + if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + err = fastrpc_munmap_rh(match->phys, + match->size, match->flags); + } else if (match->flags == ADSP_MMAP_HEAP_ADDR) { + if (fl) + err = fastrpc_munmap_on_dsp_rh(fl, match->phys, + match->size, match->flags, 0); + else { + pr_err("Cannot communicate with DSP, ADSP is down\n"); + fastrpc_mmap_add(match); + } + } if (err) goto bail; memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); @@ -4714,8 +4872,10 @@ int fastrpc_internal_munmap(struct fastrpc_file *fl, err = -EINVAL; goto bail; } - VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, - map->phys, map->size, map->flags))); + if (!map->is_persistent) { + VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, + map->phys, map->size, map->flags))); + } if (err) goto bail; mutex_lock(&fl->map_mutex); @@ -4964,7 +5124,7 @@ int fastrpc_internal_mmap(struct fastrpc_file *fl, static void fastrpc_context_list_dtor(struct fastrpc_file *fl); static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan, - int secure, struct fastrpc_session_ctx **session) + int secure, int sharedcb, struct fastrpc_session_ctx **session) { struct fastrpc_apps *me = &gfa; uint64_t idx = 0; @@ -4973,11 +5133,21 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan, if (chan->sesscount) { for (idx = 0; idx < chan->sesscount; ++idx) { if (!chan->session[idx].used && - chan->session[idx].smmu.secure == secure) { + chan->session[idx].smmu.secure == secure && + chan->session[idx].smmu.sharedcb == sharedcb) { chan->session[idx].used = 1; break; } } + if (idx >= chan->sesscount) { + for (idx = 0; idx < chan->sesscount; ++idx) { + if (!chan->session[idx].used && + chan->session[idx].smmu.secure == secure) { + chan->session[idx].used = 1; + break; + } + } + } if (idx >= chan->sesscount) { err = -EUSERS; goto bail; @@ -5156,13 +5326,13 @@ bail: } static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure, - struct fastrpc_session_ctx **session) + int sharedcb, struct fastrpc_session_ctx **session) { int err = 0; mutex_lock(&chan->smd_mutex); if (!*session) - err = fastrpc_session_alloc_locked(chan, secure, session); + err = fastrpc_session_alloc_locked(chan, secure, sharedcb, session); mutex_unlock(&chan->smd_mutex); if (err == -EUSERS) { ADSPRPC_WARN( @@ -5226,7 +5396,7 @@ skip_dump_wait: } hlist_del_init(&fl->hn); fl->is_ramdump_pend = false; - fl->in_process_create = false; + fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -5596,6 +5766,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) if (cid == ADSP_DOMAIN_ID && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { + mutex_unlock(&me->channel[cid].smd_mutex); mutex_lock(&fl->map_mutex); err = fastrpc_mmap_remove_ssr(fl, 1); mutex_unlock(&fl->map_mutex); @@ -5603,9 +5774,11 @@ static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) ADSPRPC_WARN( "failed to unmap remote heap for %s (err %d)\n", me->channel[cid].subsys, err); + mutex_lock(&me->channel[cid].smd_mutex); me->channel[cid].prevssrcount = me->channel[cid].ssrcount; } + me->channel[cid].in_hib = 0; mutex_unlock(&me->channel[cid].smd_mutex); bail: @@ -5675,7 +5848,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->qos_request = 0; fl->dsp_proc_init = 0; fl->is_ramdump_pend = false; - fl->in_process_create = false; + fl->dsp_process_state = PROCESS_CREATE_DEFAULT; fl->is_unsigned_pd = false; fl->is_compat = false; init_completion(&fl->work); @@ -5691,7 +5864,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) GFP_KERNEL); spin_lock_init(&fl->dspsignals_lock); mutex_init(&fl->signal_create_mutex); - + init_completion(&fl->shutdown); return 0; } @@ -5828,7 +6001,7 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) fl->ssrcount = fl->apps->channel[cid].ssrcount; mutex_lock(&fl->apps->channel[cid].smd_mutex); err = fastrpc_session_alloc_locked(&fl->apps->channel[cid], - 0, &fl->sctx); + 0, fl->sharedcb, &fl->sctx); mutex_unlock(&fl->apps->channel[cid].smd_mutex); if (err == -EUSERS) { ADSPRPC_WARN( @@ -5885,6 +6058,7 @@ int fastrpc_internal_control(struct fastrpc_file *fl, int err = 0; unsigned int latency; struct fastrpc_apps *me = &gfa; + int sessionid = 0; u32 silver_core_count = me->silvercores.corecount, ii = 0, cpu; VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps)); @@ -5974,12 +6148,18 @@ int fastrpc_internal_control(struct fastrpc_file *fl, break; case FASTRPC_CONTROL_DSPPROCESS_CLEAN: (void)fastrpc_release_current_dsp_process(fl); + if (fl->tgid & SESSION_ID_MASK) + sessionid = 1; + fastrpc_queue_pd_status(fl, fl->cid, FASTRPC_USER_PD_FORCE_KILL, sessionid); break; case FASTRPC_CONTROL_RPC_POLL: err = fastrpc_manage_poll_mode(fl, cp->lp.enable, cp->lp.latency); if (err) goto bail; break; + case FASTRPC_CONTROL_SMMU: + fl->sharedcb = cp->smmu.sharedcb; + break; default: err = -EBADRQC; break; @@ -6976,7 +7156,10 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, { struct fastrpc_apps *me = &gfa; struct fastrpc_channel_ctx *ctx; + struct fastrpc_file *fl; + struct hlist_node *n; int cid = -1; + struct timespec64 startT = {0}; ctx = container_of(nb, struct fastrpc_channel_ctx, nb); cid = ctx - &me->channel[0]; @@ -6996,21 +7179,36 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, case QCOM_SSR_AFTER_SHUTDOWN: fastrpc_rproc_trace_events(gcinfo[cid].subsys, "QCOM_SSR_AFTER_SHUTDOWN", "fastrpc_restart_notifier-enter"); + spin_lock(&me->hlock); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->cid != cid) + continue; + complete(&fl->shutdown); + } + spin_unlock(&me->hlock); pr_info("adsprpc: %s: received RAMDUMP notification for %s\n", __func__, gcinfo[cid].subsys); break; case QCOM_SSR_BEFORE_POWERUP: fastrpc_rproc_trace_events(gcinfo[cid].subsys, "QCOM_SSR_BEFORE_POWERUP", "fastrpc_restart_notifier-enter"); + pr_info("adsprpc: %s: subsystem %s is about to start\n", + __func__, gcinfo[cid].subsys); + if (cid == CDSP_DOMAIN_ID && dump_enabled() && + ctx->ssrcount) + fastrpc_update_ramdump_status(cid); + fastrpc_notify_drivers(me, cid); /* Skip ram dump collection in first boot */ if (cid == CDSP_DOMAIN_ID && dump_enabled() && ctx->ssrcount) { mutex_lock(&me->channel[cid].smd_mutex); fastrpc_print_debug_data(cid); mutex_unlock(&me->channel[cid].smd_mutex); + ktime_get_real_ts64(&startT); fastrpc_ramdump_collection(cid); + pr_info("adsprpc: %s: fastrpc ramdump finished in %lu (us)\n", + __func__, getnstimediff(&startT)); } - fastrpc_notify_drivers(me, cid); break; case QCOM_SSR_AFTER_POWERUP: fastrpc_rproc_trace_events(gcinfo[cid].subsys, @@ -7097,10 +7295,13 @@ static const struct of_device_id fastrpc_match_table[] = { static int fastrpc_cb_probe(struct device *dev) { - struct fastrpc_channel_ctx *chan; - struct fastrpc_session_ctx *sess; + struct fastrpc_channel_ctx *chan = NULL; + struct fastrpc_session_ctx *sess = NULL; struct of_phandle_args iommuspec; struct fastrpc_apps *me = &gfa; + struct fastrpc_buf *buf = NULL; + struct gen_pool *gen_pool = NULL; + struct iommu_domain *domain = NULL; const char *name; int err = 0, cid = -1, i = 0; u32 sharedcb_count = 0, j = 0; @@ -7162,6 +7363,7 @@ static int fastrpc_cb_probe(struct device *dev) dma_addr_pool[1]); if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { + sess->smmu.sharedcb = 1; err = of_property_read_u32(dev->of_node, "shared-cb", &sharedcb_count); if (err) @@ -7178,6 +7380,79 @@ static int fastrpc_cb_probe(struct device *dev) } } } + if (of_get_property(dev->of_node, "qrtr-gen-pool", NULL) != NULL) { + u32 frpc_gen_addr_pool[2] = {0, 0}; + struct sg_table sgt; + + err = of_property_read_u32_array(dev->of_node, "frpc-gen-addr-pool", + frpc_gen_addr_pool, 2); + if (err) { + pr_err("Error: adsprpc: %s: parsing frpc-gen-addr-pool arguments failed for %s with err %d\n", + __func__, dev_name(dev), err); + goto bail; + } + sess->smmu.genpool_iova = frpc_gen_addr_pool[0]; + sess->smmu.genpool_size = frpc_gen_addr_pool[1]; + + VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL))); + if (err) { + err = -ENOMEM; + ADSPRPC_ERR( + "allocation failed for size 0x%zx\n", sizeof(*buf)); + goto bail; + } + INIT_HLIST_NODE(&buf->hn); + buf->virt = NULL; + buf->phys = 0; + buf->size = frpc_gen_addr_pool[1]; + buf->dma_attr = DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_NO_KERNEL_MAPPING; + /* Allocate memory for adding to genpool */ + buf->virt = dma_alloc_attrs(sess->smmu.dev, buf->size, + (dma_addr_t *)&buf->phys, + GFP_KERNEL, buf->dma_attr); + if (IS_ERR_OR_NULL(buf->virt)) { + ADSPRPC_ERR( + "dma_alloc_attrs failed for size 0x%zx, returned %pK\n", + buf->size, buf->virt); + err = -ENOBUFS; + goto dma_alloc_bail; + } + err = dma_get_sgtable_attrs(sess->smmu.dev, &sgt, buf->virt, + buf->phys, buf->size, buf->dma_attr); + if (err) { + ADSPRPC_ERR("dma_get_sgtable_attrs failed with err %d", err); + goto iommu_map_bail; + } + domain = iommu_get_domain_for_dev(sess->smmu.dev); + if (!domain) { + ADSPRPC_ERR("iommu_get_domain_for_dev failed "); + goto iommu_map_bail; + } + /* Map the allocated memory with fixed IOVA and is shared to remote subsystem */ + err = iommu_map_sg(domain, frpc_gen_addr_pool[0], sgt.sgl, + sgt.nents, IOMMU_READ | IOMMU_WRITE); + if (err < 0) { + ADSPRPC_ERR("iommu_map_sg failed with err %d", err); + goto iommu_map_bail; + } + /* Create genpool using SMMU device */ + gen_pool = devm_gen_pool_create(sess->smmu.dev, 0, + NUMA_NO_NODE, NULL); + if (IS_ERR(gen_pool)) { + err = PTR_ERR(gen_pool); + ADSPRPC_ERR("devm_gen_pool_create failed with err %d", err); + goto genpool_create_bail; + } + /* Add allocated memory to genpool */ + err = gen_pool_add_virt(gen_pool, (unsigned long)buf->virt, + buf->phys, buf->size, NUMA_NO_NODE); + if (err) { + ADSPRPC_ERR("gen_pool_add_virt failed with err %d", err); + goto genpool_add_bail; + } + sess->smmu.frpc_genpool = gen_pool; + sess->smmu.frpc_genpool_buf = buf; + } chan->sesscount++; if (debugfs_root && !debugfs_global_file) { @@ -7191,6 +7466,17 @@ static int fastrpc_cb_probe(struct device *dev) } bail: return err; +genpool_add_bail: + gen_pool_destroy(gen_pool); +genpool_create_bail: + iommu_unmap(domain, sess->smmu.genpool_iova, + sess->smmu.genpool_size); +iommu_map_bail: + dma_free_attrs(sess->smmu.dev, buf->size, buf->virt, + buf->phys, buf->dma_attr); +dma_alloc_bail: + kfree(buf); + return err; } static void init_secure_vmid_list(struct device *dev, char *prop_name, @@ -7489,6 +7775,35 @@ bail: return err; } +/* + * Function to free fastrpc genpool buffer + */ +static void fastrpc_genpool_free(struct fastrpc_session_ctx *sess) +{ + struct fastrpc_buf *buf = NULL; + struct iommu_domain *domain = NULL; + + if (!sess) + goto bail; + buf = sess->smmu.frpc_genpool_buf; + if (sess->smmu.frpc_genpool) { + gen_pool_destroy(sess->smmu.frpc_genpool); + sess->smmu.frpc_genpool = NULL; + } + if (buf && sess->smmu.dev) { + domain = iommu_get_domain_for_dev(sess->smmu.dev); + iommu_unmap(domain, sess->smmu.genpool_iova, + sess->smmu.genpool_size); + if (buf->phys) + dma_free_attrs(sess->smmu.dev, buf->size, buf->virt, + buf->phys, buf->dma_attr); + kfree(buf); + sess->smmu.frpc_genpool_buf = NULL; + } +bail: + return; +} + static void fastrpc_deinit(void) { struct fastrpc_channel_ctx *chan = gcinfo; @@ -7498,7 +7813,7 @@ static void fastrpc_deinit(void) for (i = 0; i < NUM_CHANNELS; i++, chan++) { for (j = 0; j < NUM_SESSIONS; j++) { struct fastrpc_session_ctx *sess = &chan->session[j]; - + fastrpc_genpool_free(sess); if (sess->smmu.dev) sess->smmu.dev = NULL; } @@ -7513,12 +7828,66 @@ static void fastrpc_deinit(void) mutex_destroy(&me->mut_uid); } +#ifdef CONFIG_HIBERNATION +static bool hibernation; + +static int fastrpc_hibernation_notifier(struct notifier_block *nb, + unsigned long event, void *dummy) +{ + if (event == PM_HIBERNATION_PREPARE) + hibernation = true; + else if (event == PM_POST_HIBERNATION) + hibernation = false; + + return NOTIFY_OK; +} + +static struct notifier_block fastrpc_notif_block = { + .notifier_call = fastrpc_hibernation_notifier, +}; +#endif + +#ifdef CONFIG_PM_SLEEP +static int fastrpc_hibernation_suspend(struct device *dev) +{ + int err = 0; + + if (of_device_is_compatible(dev->of_node, + "qcom,msm-fastrpc-compute")) { + err = fastrpc_mmap_remove_ssr(NULL, 0); + if (err) + ADSPRPC_WARN("failed to unmap remote heap (err %d)\n", + err); + } + return err; +} +static int fastrpc_restore(struct device *dev) +{ + struct fastrpc_apps *me = &gfa; + int cid; + + pr_info("adsprpc: restore enter\n"); + for (cid = 0; cid < NUM_CHANNELS; cid++) + me->channel[cid].in_hib = 1; + + pr_info("adsprpc: restore exit\n"); + return 0; +} + +static const struct dev_pm_ops fastrpc_pm = { + .freeze = fastrpc_hibernation_suspend, + .restore = fastrpc_restore, +}; +#endif static struct platform_driver fastrpc_driver = { .probe = fastrpc_probe, .driver = { .name = "fastrpc", .of_match_table = fastrpc_match_table, .suppress_bind_attrs = true, +#ifdef CONFIG_PM_SLEEP + .pm = &fastrpc_pm, +#endif }, }; @@ -7858,6 +8227,7 @@ static int __init fastrpc_device_init(void) me->jobid[i] = 1; me->channel[i].dev = me->secure_dev; me->channel[i].ssrcount = 0; + me->channel[i].in_hib = 0; me->channel[i].prevssrcount = 0; me->channel[i].issubsystemup = 1; me->channel[i].rh_dump_dev = NULL; @@ -7901,8 +8271,15 @@ static int __init fastrpc_device_init(void) err = fastrpc_transport_init(); if (err) goto device_create_bail; + me->transport_initialized = 1; +#ifdef CONFIG_HIBERNATION + err = register_pm_notifier(&fastrpc_notif_block); + if (err) + goto device_create_bail; +#endif + fastrpc_register_wakeup_source(me->non_secure_dev, FASTRPC_NON_SECURE_WAKE_SOURCE_CLIENT_NAME, &me->wake_source); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 67149b4943..ad434b2407 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -483,6 +483,7 @@ enum dsp_map_flags { enum fastrpc_control_type { FASTRPC_CONTROL_LATENCY = 1, + /* Share SMMU context bank */ FASTRPC_CONTROL_SMMU = 2, FASTRPC_CONTROL_KALLOC = 3, FASTRPC_CONTROL_WAKELOCK = 4, @@ -509,6 +510,10 @@ struct fastrpc_ctrl_pm { uint32_t timeout; /* timeout(in ms) for PM to keep system awake */ }; +struct fastrpc_ctrl_smmu { + uint32_t sharedcb; /* Set to SMMU share context bank */ +}; + struct fastrpc_ioctl_control { uint32_t req; union { @@ -516,6 +521,7 @@ struct fastrpc_ioctl_control { struct fastrpc_ctrl_kalloc kalloc; struct fastrpc_ctrl_wakelock wp; struct fastrpc_ctrl_pm pm; + struct fastrpc_ctrl_smmu smmu; }; }; @@ -574,6 +580,12 @@ enum fastrpc_response_flags { POLL_MODE = 5, }; +enum fastrpc_process_create_state { + PROCESS_CREATE_DEFAULT = 0, /* Process is not created */ + PROCESS_CREATE_IS_INPROGRESS = 1, /* Process creation is in progress */ + PROCESS_CREATE_SUCCESS = 2, /* Process creation is successful */ +}; + struct smq_invoke_rspv2 { uint64_t ctx; /* invoke caller context */ int retval; /* invoke return value */ @@ -829,6 +841,15 @@ struct fastrpc_smmu { int faults; int secure; int coherent; + int sharedcb; + /* gen pool for QRTR */ + struct gen_pool *frpc_genpool; + /* fastrpc gen pool buffer */ + struct fastrpc_buf *frpc_genpool_buf; + /* fastrpc gen pool buffer fixed IOVA */ + unsigned long genpool_iova; + /* fastrpc gen pool buffer size */ + size_t genpool_size; }; struct fastrpc_session_ctx { @@ -865,6 +886,7 @@ struct fastrpc_channel_ctx { struct mutex smd_mutex; uint64_t sesscount; uint64_t ssrcount; + int in_hib; void *handle; uint64_t prevssrcount; int issubsystemup; @@ -942,13 +964,15 @@ struct fastrpc_mmap { int refs; uintptr_t raddr; int secure; - /* Minidump unique index */ - int frpc_md_index; + bool is_persistent; /* the map is persistenet across sessions */ + int frpc_md_index; /* Minidump unique index */ uintptr_t attr; + bool in_use; /* Indicates if persistent map is in use*/ struct timespec64 map_start_time; struct timespec64 map_end_time; /* Mapping for fastrpc shell */ bool is_filemap; + char *servloc_name; /* Indicate which daemon mapped this */ }; enum fastrpc_perfkeys { @@ -1019,6 +1043,7 @@ struct fastrpc_file { char *servloc_name; int file_close; int dsp_proc_init; + int sharedcb; struct fastrpc_apps *apps; struct dentry *debugfs_file; struct dev_pm_qos_request *dev_pm_qos_req; @@ -1055,7 +1080,7 @@ struct fastrpc_file { /* Threads poll for specified timeout and fall back to glink wait */ uint32_t poll_timeout; /* Flag to indicate dynamic process creation status*/ - bool in_process_create; + enum fastrpc_process_create_state dsp_process_state; bool is_unsigned_pd; /* Flag to indicate 32 bit driver*/ bool is_compat; @@ -1063,6 +1088,7 @@ struct fastrpc_file { struct fastrpc_dspsignal *signal_groups[DSPSIGNAL_NUM_SIGNALS / DSPSIGNAL_GROUP_SIZE]; spinlock_t dspsignals_lock; struct mutex signal_create_mutex; + struct completion shutdown; }; union fastrpc_ioctl_param { From b53b19bf7538b300edc068a1561749c7e9a2a333 Mon Sep 17 00:00:00 2001 From: Thyagarajan Venkatanarayanan Date: Thu, 3 Nov 2022 12:01:22 -0700 Subject: [PATCH 012/146] msm: adsprpc: launch trusted process as system unsigned PD If a trusted process is requesting for unsigned PD, set process attribute to launch it as a system unsigned PD on remote subsystem to get additional privileges. Change-Id: Ie29f02ba16f2c4e68bf4cd1c75731915d40a9241 Signed-off-by: Thyagarajan Venkatanarayanan --- dsp/adsprpc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 5a412d2884..32810caaa1 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -227,6 +227,8 @@ enum fastrpc_proc_attr { FASTRPC_MODE_SYSTEM_PROCESS = 1 << 5, /* Macro for Prvileged Process */ FASTRPC_MODE_PRIVILEGED = (1 << 6), + /* Macro for system unsigned PD */ + FASTRPC_MODE_SYSTEM_UNSIGNED_PD = 1 << 17, }; #define PERF_END ((void)0) @@ -3667,6 +3669,9 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, } inbuf.pageslen = 1; + /* Disregard any system unsigned PD attribute from userspace */ + uproc->attrs &= (~FASTRPC_MODE_SYSTEM_UNSIGNED_PD); + /* Untrusted apps are not allowed to offload to signedPD on DSP. */ if (fl->untrusted_process) { VERIFY(err, fl->is_unsigned_pd); @@ -3676,6 +3681,10 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, "untrusted app trying to offload to signed remote process\n"); goto bail; } + } else { + /* Trusted apps will be launched as system unsigned PDs */ + if (fl->is_unsigned_pd) + uproc->attrs |= FASTRPC_MODE_SYSTEM_UNSIGNED_PD; } /* Disregard any privilege bits from userspace */ From 4dc57d62a53447c897f3e50ef7b6f7d12a0a7004 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Thu, 10 Nov 2022 11:34:48 -0800 Subject: [PATCH 013/146] Added comment to daemon wait mechanism Added comment to fastrpc_check_pd_status for daemons to wait before audio or sensors PD is up. Change-Id: I2d908cbf4b642386aec97ef1afe38b6af572d9a5 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 32810caaa1..dfed99a68e 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6177,6 +6177,7 @@ bail: return err; } +/* Wait for PD to be up before audio or sensors daemons try connecting */ static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name) { int err = 0, session = -1, cid = -1; From 38ca55b8e54cf4b32e29f5514977cb3d1ec7f2c0 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Thu, 10 Nov 2022 15:33:05 -0800 Subject: [PATCH 014/146] msm: adsprpc: Share CP secure context banks Currently CPZ secure context banks are not shared on CDSP. Share CP secure context banks among applications that use secure memory, so that many CPZ applications can be spawned and offloaded to DSP. Change-Id: I77d95bab86ef527e41c9fe79058742615f4adb0e Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 5 ++++- dsp/adsprpc_shared.h | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index dfed99a68e..60cea8826f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1207,7 +1207,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; if (map->secure) { if (!fl->secsctx) - err = fastrpc_session_alloc(chan, 1, 0, + err = fastrpc_session_alloc(chan, 1, me->share_securecb, &fl->secsctx); if (err) { ADSPRPC_ERR( @@ -7374,6 +7374,9 @@ static int fastrpc_cb_probe(struct device *dev) if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { sess->smmu.sharedcb = 1; + // Set share_securecb, if the secure context bank is shared + if (sess->smmu.secure) + me->share_securecb = 1; err = of_property_read_u32(dev->of_node, "shared-cb", &sharedcb_count); if (err) diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index ad434b2407..0b66a622db 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -945,6 +945,8 @@ struct fastrpc_apps { struct mutex mut_uid; /* Indicates cdsp device status */ int remote_cdsp_status; + /* Indicates secure context bank to be shared */ + int share_securecb; }; struct fastrpc_mmap { From b98c08f0193b3a72ce112ca87dbf41428aeb26b4 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Mon, 12 Sep 2022 12:23:48 +0530 Subject: [PATCH 015/146] msm:ADSPRPC: configure signed userpd initial memory length Currently the init memeory size allocated for user PD is fixed. This may not be sufficent for some usecases. This change will allow configuring initial memory size allocated for signed user PD. Change-Id: I2b12cf98c96b1e11daaad7e1bbf1d3777f43c1b6 Acked-by: ANANDU E Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 60cea8826f..b5e81857b6 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -182,7 +182,9 @@ #define FASTRPC_CPUINFO_EARLY_WAKEUP (1) #define INIT_FILELEN_MAX (2*1024*1024) -#define INIT_MEMLEN_MAX (8*1024*1024) +#define INIT_MEMLEN_MAX_STATIC (8*1024*1024) +#define INIT_MEMLEN_MAX_DYNAMIC (200*1024*1024) +#define INIT_MEMLEN_MIN_DYNAMIC (3*1024*1024) #define MAX_CACHE_BUF_SIZE (8*1024*1024) /* Maximum buffers cached in cached buffer list */ @@ -3622,7 +3624,7 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, remote_arg_t ra[6]; int fds[6]; unsigned int gid = 0, one_mb = 1024*1024; - unsigned int dsp_userpd_memlen = 3 * one_mb; + unsigned int dsp_userpd_memlen = 0; struct fastrpc_buf *init_mem; struct { @@ -3642,6 +3644,20 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, return err; } fl->dsp_process_state = PROCESS_CREATE_IS_INPROGRESS; + + if (init->memlen) { + if(init->memlen > INIT_MEMLEN_MAX_DYNAMIC || init->memlen < INIT_MEMLEN_MIN_DYNAMIC) { + ADSPRPC_ERR( + "init memory for process %d should be between %d and %d\n", + init->memlen, INIT_MEMLEN_MIN_DYNAMIC, INIT_MEMLEN_MAX_DYNAMIC); + err = -EINVAL; + goto bail; + } + dsp_userpd_memlen = init->memlen; + } else { + dsp_userpd_memlen = 3*one_mb; + } + spin_unlock(&fl->hlock); inbuf.pgid = fl->tgid; @@ -3733,7 +3749,7 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, * additional static heap initialized within the process. */ if (fl->is_unsigned_pd) - dsp_userpd_memlen += 2*one_mb; + dsp_userpd_memlen = 5*one_mb; memlen = ALIGN(max(dsp_userpd_memlen, init->filelen * 4), one_mb); imem_dma_attr = DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_NO_KERNEL_MAPPING; err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, @@ -3859,6 +3875,14 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, "untrusted app trying to attach to audio PD\n"); return err; } + VERIFY(err, init->memlen <= INIT_MEMLEN_MAX_STATIC); + if (err) { + ADSPRPC_ERR( + "init memory for static process %d is more than max allowed init len %d\n", + init->memlen, INIT_MEMLEN_MAX_STATIC); + err = -EFBIG; + goto bail; + } if (!init->filelen) goto bail; @@ -4038,12 +4062,12 @@ int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_channel_ctx *chan = NULL; VERIFY(err, init->filelen < INIT_FILELEN_MAX - && init->memlen < INIT_MEMLEN_MAX); + && init->memlen <= INIT_MEMLEN_MAX_DYNAMIC); if (err) { ADSPRPC_ERR( "file size 0x%x or init memory 0x%x is more than max allowed file size 0x%x or init len 0x%x\n", init->filelen, init->memlen, - INIT_FILELEN_MAX, INIT_MEMLEN_MAX); + INIT_FILELEN_MAX, INIT_MEMLEN_MAX_DYNAMIC); err = -EFBIG; goto bail; } @@ -6582,7 +6606,6 @@ bail: return err; } - static inline int fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, unsigned int ioctl_num, union fastrpc_ioctl_param *p, void *param) From 45d0db44777ecb13633a331ea6eda82de548d774 Mon Sep 17 00:00:00 2001 From: jianzhou Date: Mon, 12 Dec 2022 02:32:24 -0800 Subject: [PATCH 016/146] dsp: remove private iommu/dma header iommu/dma header was made to be private, need to remove it. Change-Id: I16472ecb8af26699e1fe5a5316c44c4df9069dab Signed-off-by: Maria Yu --- dsp/adsprpc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index b5e81857b6..7c97e898a7 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -53,7 +52,7 @@ #include #include #include -#include +#include #include #include From aab073ca4937f14553d3a0a7502c01efc92321b7 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Fri, 16 Dec 2022 10:43:02 -0800 Subject: [PATCH 017/146] Update copyright for cdsp-loader driver Updated copyright to add QuIC to cdsp loader driver code. Change-Id: Ic959432b2233d3ac433ca4174367d342900a0b0f Signed-off-by: Anirudh Raghavendra --- dsp/cdsp-loader.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/cdsp-loader.c b/dsp/cdsp-loader.c index 0caad0e2b2..d3b1dec433 100644 --- a/dsp/cdsp-loader.c +++ b/dsp/cdsp-loader.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012-2014, 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include From a62aa004fbb3b737c0ddbcb92390ed9b9b88a8c2 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Tue, 6 Dec 2022 17:10:40 -0800 Subject: [PATCH 018/146] Add support for Bazel to build modules Add support for fastrpc modules to be built with bazel for pineapple Change-Id: Iaf655ee70258c745c3885ac9b0c82ff9f0830a8b Signed-off-by: Anirudh Raghavendra Signed-off-by: John Moon --- BUILD.bazel | 15 +++++++++++++++ define_modules.bzl | 42 ++++++++++++++++++++++++++++++++++++++++++ dsp/fastrpc_trace.h | 11 +++++++++-- 3 files changed, 66 insertions(+), 2 deletions(-) create mode 100644 BUILD.bazel create mode 100644 define_modules.bzl diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 0000000000..bce67067be --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,15 @@ +# Bazel file for fastrpc +# Three modules to be exported - frpc-adsprpc, +# cdsp-loader and frpc-trusted + +package( + default_visibility = [ + "//visibility:public", + ], +) + +load(":define_modules.bzl", "define_modules") + +define_modules("pineapple", "consolidate") + +define_modules("pineapple", "gki") diff --git a/define_modules.bzl b/define_modules.bzl new file mode 100644 index 0000000000..1ece612915 --- /dev/null +++ b/define_modules.bzl @@ -0,0 +1,42 @@ +# TODO +# Add ddk module definition for frpc-trusted driver + +load( + "//build/kernel/kleaf:kernel.bzl", + "ddk_headers", + "ddk_module", + "kernel_module", + "kernel_modules_install", +) + +def define_modules(target, variant): + kernel_build_variant = "{}_{}".format(target, variant) + + # Path to dsp folder from msm-kernel/include/trace directory + trace_include_path = "../../../{}/dsp".format(native.package_name()) + + ddk_module( + name = "{}_frpc-adsprpc".format(kernel_build_variant), + kernel_build = "//msm-kernel:{}".format(kernel_build_variant), + deps = ["//msm-kernel:all_headers"], + srcs = [ + "dsp/adsprpc.c", + "dsp/adsprpc_compat.c", + "dsp/adsprpc_compat.h", + "dsp/adsprpc_rpmsg.c", + "dsp/adsprpc_shared.h", + "dsp/fastrpc_trace.h", + ], + local_defines = ["DSP_TRACE_INCLUDE_PATH={}".format(trace_include_path)], + out = "frpc-adsprpc.ko", + hdrs = ["include/linux/fastrpc.h"], + includes = ["include/linux"], + ) + + ddk_module( + name = "{}_cdsp-loader".format(kernel_build_variant), + kernel_build = "//msm-kernel:{}".format(kernel_build_variant), + deps = ["//msm-kernel:all_headers"], + srcs = ["dsp/cdsp-loader.c"], + out = "cdsp-loader.ko", + ) diff --git a/dsp/fastrpc_trace.h b/dsp/fastrpc_trace.h index 5bfe2f9779..e812d2a906 100644 --- a/dsp/fastrpc_trace.h +++ b/dsp/fastrpc_trace.h @@ -10,9 +10,16 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM fastrpc -/* Path must be relative to location of 'define_trace.h' header in kernel */ +/* + * Path must be relative to location of 'define_trace.h' header in kernel + * Define path if not defined in bazel file + */ +#ifndef DSP_TRACE_INCLUDE_PATH +#define DSP_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/dsp-kernel/dsp +#endif + #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/dsp-kernel/dsp +#define TRACE_INCLUDE_PATH DSP_TRACE_INCLUDE_PATH /* Name of trace header file */ #undef TRACE_INCLUDE_FILE From e1f8f88b4be13a1ba76ec38482966be24c674d02 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Fri, 20 Jan 2023 16:25:15 +0530 Subject: [PATCH 019/146] msm: ADSPRPC: Use PD type enums instead of magic numbers Currently PD type is getting updated with some magic numbers. Add enum with different PD type information and use these types instead of magic numbers for better code readability. Change-Id: I20eb03726cabfcc88589be215c3c967b608a9cdb Acked-by: Ekansh Gupta Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 12 ++++++------ dsp/adsprpc_shared.h | 9 ++++++++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 7c97e898a7..f7550e88a4 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ /* Uncomment this block to log an error on every VERIFY failure */ @@ -137,7 +137,7 @@ #endif /* - * ctxid of every message is OR-ed with fl->pd (0/1/2) before + * ctxid of every message is OR-ed with fastrpc_remote_pd_type before * it is sent to DSP. So mask 2 LSBs to retrieve actual context */ #define CONTEXT_PD_CHECK (3) @@ -3593,10 +3593,10 @@ static int fastrpc_init_attach_process(struct fastrpc_file *fl, ioctl.job = NULL; if (init->flags == FASTRPC_INIT_ATTACH) - fl->pd = 0; + fl->pd = FASTRPC_ROOT_PD; else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) /* Setting to 2 will route the message to sensorsPD */ - fl->pd = 2; + fl->pd = FASTRPC_SENSORS_PD; err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl); if (err) @@ -3662,7 +3662,7 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, inbuf.pgid = fl->tgid; inbuf.namelen = strlen(current->comm) + 1; inbuf.filelen = init->filelen; - fl->pd = 1; + fl->pd = FASTRPC_USER_PD; if (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE) fl->is_unsigned_pd = true; @@ -3899,7 +3899,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, goto bail; } - fl->pd = 1; + fl->pd = FASTRPC_USER_PD; inbuf.pgid = fl->tgid; inbuf.namelen = init->filelen; inbuf.pageslen = 0; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 0b66a622db..c49d491144 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ADSPRPC_SHARED_H #define ADSPRPC_SHARED_H @@ -678,6 +678,13 @@ enum fastrpc_msg_type { KERNEL_MSG_WITH_NONZERO_PID, }; +/* Fastrpc remote pd type */ +enum fastrpc_remote_pd_type { + FASTRPC_ROOT_PD = 0, + FASTRPC_USER_PD, + FASTRPC_SENSORS_PD, +}; + #define DSPSIGNAL_TIMEOUT_NONE 0xffffffff #define DSPSIGNAL_NUM_SIGNALS 1024 From 6b8c6087fe57bc4f93d95a792c4a6b81c6c06ede Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Mon, 23 Jan 2023 12:56:14 -0800 Subject: [PATCH 020/146] msm: adsprpc: Add IOMMU_CACHE flag for iommy_map_sg Currently memory shared to QRTR is not mapped with IOMMU_CACHE flag. When using iommu_map_sg, IOMMU_CACHE flag makes the memory coherent. Add IOMMU_CACHE flag to iommu_mag_sg to make memory shared to QRTR coherent on APPS. Also remove DMA_ATTR_NO_KERNEL_MAPPING attribute while allocating memory as it doesn't have significance when the memory is coherent. Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index f7550e88a4..c10844841b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7440,7 +7440,7 @@ static int fastrpc_cb_probe(struct device *dev) buf->virt = NULL; buf->phys = 0; buf->size = frpc_gen_addr_pool[1]; - buf->dma_attr = DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_NO_KERNEL_MAPPING; + buf->dma_attr = DMA_ATTR_DELAYED_UNMAP; /* Allocate memory for adding to genpool */ buf->virt = dma_alloc_attrs(sess->smmu.dev, buf->size, (dma_addr_t *)&buf->phys, @@ -7465,7 +7465,7 @@ static int fastrpc_cb_probe(struct device *dev) } /* Map the allocated memory with fixed IOVA and is shared to remote subsystem */ err = iommu_map_sg(domain, frpc_gen_addr_pool[0], sgt.sgl, - sgt.nents, IOMMU_READ | IOMMU_WRITE); + sgt.nents, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); if (err < 0) { ADSPRPC_ERR("iommu_map_sg failed with err %d", err); goto iommu_map_bail; From 9f5f25be96007563ff96b9b8f45a0f80cb3dfa0d Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Mon, 23 Jan 2023 16:59:30 -0800 Subject: [PATCH 021/146] Add support for LE compilation Add .am files for LE build compilation. These files will replace Makefile and Kbuild files only on LE builds. Change-Id: Ib8dfa89523adf802acce57be0d2064f790bac6d2 Signed-off-by: Anirudh Raghavendra --- Kbuild.am | 15 +++++++++++++++ Makefile.am | 19 +++++++++++++++++++ dsp/adsprpc.c | 7 ++++++- 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 Kbuild.am create mode 100644 Makefile.am diff --git a/Kbuild.am b/Kbuild.am new file mode 100644 index 0000000000..4249e994ca --- /dev/null +++ b/Kbuild.am @@ -0,0 +1,15 @@ +# ported from Android.mk +$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS)) + +ifeq ($(CONFIG_ARCH_PINEAPPLE), y) +$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE)) +KBUILD_CPPFLAGS += -DCONFIG_DSP_PINEAPPLE=1 +ccflags-y += -DCONFIG_DSP_PINEAPPLE=1 +ccflags-y += -DCONFIG_MSM_ADSPRPC_TRUSTED=1 +endif + +frpc-trusted-adsprpc-y := dsp/adsprpc.o \ + dsp/adsprpc_compat.o \ + dsp/adsprpc_socket.o \ + +obj-m := frpc-trusted-adsprpc.o diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000000..1ef0028cb8 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,19 @@ +DSP_KERNEL_ROOT=$(ROOTDIR)vendor/qcom/opensource/dsp-kernel +KBUILD_OPTIONS := DSP_KERNEL_ROOT=$(DSP_KERNEL_ROOT) CONFIG_MSM_ADSPRPC_TRUSTED=m + +ifeq ($(TARGET_SUPPORT),genericarmv8) + KBUILD_OPTIONS += CONFIG_ARCH_PINEAPPLE=y +endif + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions \ No newline at end of file diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index c10844841b..815ff28cfb 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -40,7 +40,6 @@ #include #include "adsprpc_compat.h" #include "adsprpc_shared.h" -#include "fastrpc.h" #include #include #include @@ -64,6 +63,12 @@ #define CREATE_TRACE_POINTS #include "fastrpc_trace.h" +#ifdef CONFIG_MSM_ADSPRPC_TRUSTED +#include "../include/linux/fastrpc.h" +#else +#include "fastrpc.h" +#endif + #define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C #define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D #define TZ_PIL_AUTH_QDSP6_PROC 1 From bfd12fbf255adcff9cd10d7c66aefad044dead75 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Wed, 11 Jan 2023 19:30:36 +0530 Subject: [PATCH 022/146] msm: adsprpc: To avoid null pointer dereference To log error for fastrpc_mmap_remove_ssr and compat_fastrpc_get_dsp_info for avoiding null pointer dereferences leading to kw issues. Change-Id: I0f759ce5d9c3100a5cff3e61ad1499f6eb1b55dd Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 5 +++-- dsp/adsprpc_compat.c | 7 ++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 815ff28cfb..231764498c 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4713,7 +4713,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) match = NULL; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (map->servloc_name && + if (map->servloc_name && fl && fl->servloc_name && !strcmp(map->servloc_name, fl->servloc_name)) { match = map; if (map->is_persistent && map->in_use) { @@ -8287,8 +8287,9 @@ static int __init fastrpc_device_init(void) VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL))); if (err) { err = -ENOMEM; - ADSPRPC_WARN("%s: CMA alloc failed err 0x%x\n", + ADSPRPC_ERR("%s: CMA alloc failed err 0x%x\n", __func__, err); + goto device_create_bail; } INIT_HLIST_NODE(&buf->hn); buf->virt = region_vaddr; diff --git a/dsp/adsprpc_compat.c b/dsp/adsprpc_compat.c index 2f8cb0b18c..3cc60e220a 100644 --- a/dsp/adsprpc_compat.c +++ b/dsp/adsprpc_compat.c @@ -772,11 +772,16 @@ static int compat_fastrpc_get_dsp_info(struct fastrpc_file *fl, struct fastrpc_ioctl_capability *info = NULL; compat_uint_t u; int err = 0; + size_t info_size = 0; info32 = compat_ptr(arg); VERIFY(err, NULL != (info = kmalloc( sizeof(*info), GFP_KERNEL))); - + info_size = sizeof(*info); + if (err) { + ADSPRPC_ERR("allocation failed for size 0x%zx\n", info_size); + return err; + } err = get_user(u, &info32->domain); if (err) return err; From 6554e75e39e9b34f8d32b9374f9cda9025c4dfc2 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Fri, 3 Feb 2023 07:55:44 -0800 Subject: [PATCH 023/146] msm: ADSPRPC: Prevent mapping refcount for persistent mapping from going bad Mapping refcount goes to negative value when internal invoke fails. This prevents in hyp assigning the memory back to dsp. Change-Id: I3b0f4a80e8fd1cde43e5b6fe94030258eb01ead1 Acked-by: Abhinav Parihar Signed-off-by: Vamsi Krishna Gattupalli Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 231764498c..0c98bd18b2 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1049,6 +1049,12 @@ static inline bool fastrpc_get_persistent_map(size_t len, struct fastrpc_mmap ** map->is_persistent && !map->in_use) { *pers_map = map; map->in_use = true; + /* + * Incrementing map reference count when getting + * the map to avoid negative reference count when + * freeing the map. + */ + map->refs++; found = true; break; } @@ -4741,6 +4747,11 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) } spin_lock_irqsave(&me->hlock, irq_flags); map->in_use = false; + /* + * decrementing refcount for persistent mappings + * as incrementing it in fastrpc_get_persistent_map + */ + map->refs--; } if (map->is_persistent) { match = NULL; From 0821b24a8141cc7ad022a3df48d2d5d504aea3b8 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Tue, 10 Jan 2023 14:32:02 +0530 Subject: [PATCH 024/146] msm:ADSPRPC: adding spin_unlock when bailing. spin_lock should be released before bailing. Change-Id: I3ac6043221272fa1dda2f36f4add810df41a17f3 Acked-by: ANANDU E Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 1 + 1 file changed, 1 insertion(+) mode change 100644 => 100755 dsp/adsprpc.c diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c old mode 100644 new mode 100755 index 0c98bd18b2..8806003209 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3661,6 +3661,7 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, "init memory for process %d should be between %d and %d\n", init->memlen, INIT_MEMLEN_MIN_DYNAMIC, INIT_MEMLEN_MAX_DYNAMIC); err = -EINVAL; + spin_unlock(&fl->hlock); goto bail; } dsp_userpd_memlen = init->memlen; From 4fef9484483919722baa67f2ab43cad0e3ba504c Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Mon, 13 Feb 2023 11:08:37 +0530 Subject: [PATCH 025/146] msm: adsprpc: add ioctl control for thread exit Add ioctl control support to exit notif and async threads. Change-Id: Ifc7212fd84bdba46724252e00e1d479865cefc64 Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 25 +++++++++++++++++++++++++ dsp/adsprpc_shared.h | 6 ++++++ 2 files changed, 31 insertions(+) mode change 100755 => 100644 dsp/adsprpc.c diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c old mode 100755 new mode 100644 index 8806003209..310e3e6433 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3230,6 +3230,10 @@ read_async_job: err = -EBADF; goto bail; } + if (fl->exit_async) { + err = -EFAULT; + goto bail; + } VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; @@ -3312,6 +3316,10 @@ read_notif_status: err = -EBADF; goto bail; } + if (fl->exit_notif) { + err = -EFAULT; + goto bail; + } VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; @@ -5900,6 +5908,8 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->dsp_process_state = PROCESS_CREATE_DEFAULT; fl->is_unsigned_pd = false; fl->is_compat = false; + fl->exit_notif = false; + fl->exit_async = false; init_completion(&fl->work); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; filp->private_data = fl; @@ -6109,6 +6119,7 @@ int fastrpc_internal_control(struct fastrpc_file *fl, struct fastrpc_apps *me = &gfa; int sessionid = 0; u32 silver_core_count = me->silvercores.corecount, ii = 0, cpu; + unsigned long flags = 0; VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps)); if (err) { @@ -6209,6 +6220,20 @@ int fastrpc_internal_control(struct fastrpc_file *fl, case FASTRPC_CONTROL_SMMU: fl->sharedcb = cp->smmu.sharedcb; break; + case FASTRPC_CONTROL_ASYNC_WAKE: + fl->exit_async = true; + spin_lock_irqsave(&fl->aqlock, flags); + atomic_add(1, &fl->async_queue_job_count); + wake_up_interruptible(&fl->async_wait_queue); + spin_unlock_irqrestore(&fl->aqlock, flags); + break; + case FASTRPC_CONTROL_NOTIF_WAKE: + fl->exit_notif = true; + spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); + atomic_add(1, &fl->proc_state_notif.notif_queue_count); + wake_up_interruptible(&fl->proc_state_notif.notif_wait_queue); + spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags); + break; default: err = -EBADRQC; break; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index c49d491144..02ce55cfc6 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -491,6 +491,8 @@ enum fastrpc_control_type { /* Clean process on DSP */ FASTRPC_CONTROL_DSPPROCESS_CLEAN = 6, FASTRPC_CONTROL_RPC_POLL = 7, + FASTRPC_CONTROL_ASYNC_WAKE = 8, + FASTRPC_CONTROL_NOTIF_WAKE = 9, }; struct fastrpc_ctrl_latency { @@ -1098,6 +1100,10 @@ struct fastrpc_file { spinlock_t dspsignals_lock; struct mutex signal_create_mutex; struct completion shutdown; + /* Flag to indicate notif thread exit requested*/ + bool exit_notif; + /* Flag to indicate async thread exit requested*/ + bool exit_async; }; union fastrpc_ioctl_param { From 4024c080239bf94b95938538d19a49908a0967c4 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Fri, 17 Feb 2023 11:13:43 +0530 Subject: [PATCH 026/146] msm: adsprpc: cleanup ADSP rh memory during hibernation During hibernation suspend, we pass null fl, allow ADSP remoteheap memory cleanup in this case. Change-Id: I42b80ded3955f9d5200cb77114a476a69f5353d5 Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 310e3e6433..f53ef340f3 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4728,8 +4728,9 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) match = NULL; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (map->servloc_name && fl && - fl->servloc_name && !strcmp(map->servloc_name, fl->servloc_name)) { + /* In hibernation suspend case fl is NULL, check !fl to cleanup */ + if (!fl || (fl && map->servloc_name && fl->servloc_name + && !strcmp(map->servloc_name, fl->servloc_name))) { match = map; if (map->is_persistent && map->in_use) { int destVM[1] = {VMID_HLOS}; From c31f7a037d7b3ee9f609cc15decfa8f847f715b9 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Thu, 9 Feb 2023 16:28:27 +0530 Subject: [PATCH 027/146] msm: adsprpc: add variable to store interrupted ts Currently no debug information of when ctx interrupted, restored. This make ioctl hang/high invoke time issues hard to debug. Add variables to store interrupted, restore timestamp in pending ctx and gmsg_log. Change-Id: I32226e96c54acb5878f2b8d37110bae7f65d49f3 Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 71 ++++++++++++++++++++++++++++++++++++-------- dsp/adsprpc_shared.h | 5 ++++ 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index f53ef340f3..4a3e7eb04a 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -481,38 +481,76 @@ static inline int poll_for_remote_response(struct smq_invoke_ctx *ctx, uint32_t return err; } +enum interrupted_state { + DEFAULT_STATE = 0, + INTERRUPTED_STATE = 1, + RESTORED_STATE = 2, +}; + /** * fastrpc_update_txmsg_buf - Update history of sent glink messages - * @chan : Channel context * @msg : Pointer to RPC message to remote subsystem * @transport_send_err : Error from transport * @ns : Timestamp (in ns) of sent message * @xo_time_in_us : XO Timestamp (in us) of sent message + * @ctx : invoke ctx + * @interrupted : 0/1/2 (default/interrupted/restored) * * Returns none */ -static inline void fastrpc_update_txmsg_buf(struct fastrpc_channel_ctx *chan, - struct smq_msg *msg, int transport_send_err, int64_t ns, uint64_t xo_time_in_us) +static inline void fastrpc_update_txmsg_buf(struct smq_msg *msg, + int transport_send_err, int64_t ns, uint64_t xo_time_in_us, + struct smq_invoke_ctx *ctx, enum interrupted_state interrupted) { unsigned long flags = 0; unsigned int tx_index = 0; struct fastrpc_tx_msg *tx_msg = NULL; + struct fastrpc_channel_ctx *chan = NULL; + struct fastrpc_file *fl = ctx->fl; + int err = 0, cid = -1; + if (!fl) { + err = -EBADF; + goto bail; + } + cid = fl->cid; + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + chan = &fl->apps->channel[cid]; spin_lock_irqsave(&chan->gmsg_log.lock, flags); - tx_index = chan->gmsg_log.tx_index; - tx_msg = &chan->gmsg_log.tx_msgs[tx_index]; + if (interrupted){ + if (ctx->tx_index >= 0 && ctx->tx_index < GLINK_MSG_HISTORY_LEN) { + tx_msg = &chan->gmsg_log.tx_msgs[ctx->tx_index]; - memcpy(&tx_msg->msg, msg, sizeof(struct smq_msg)); - tx_msg->transport_send_err = transport_send_err; - tx_msg->ns = ns; - tx_msg->xo_time_in_us = xo_time_in_us; + if (tx_msg->msg.invoke.header.ctx == ctx->msg.invoke.header.ctx) { + tx_msg->xo_time_in_us_interrupted = ctx->xo_time_in_us_interrupted; + tx_msg->xo_time_in_us_restored = ctx->xo_time_in_us_restored; + } + } + } else { + tx_index = chan->gmsg_log.tx_index; + ctx->tx_index = tx_index; + tx_msg = &chan->gmsg_log.tx_msgs[tx_index]; - tx_index++; - chan->gmsg_log.tx_index = - (tx_index > (GLINK_MSG_HISTORY_LEN - 1)) ? 0 : tx_index; + memcpy(&tx_msg->msg, msg, sizeof(struct smq_msg)); + tx_msg->transport_send_err = transport_send_err; + tx_msg->ns = ns; + tx_msg->xo_time_in_us = xo_time_in_us; + + tx_index++; + chan->gmsg_log.tx_index = + (tx_index > (GLINK_MSG_HISTORY_LEN - 1)) ? 0 : tx_index; + } spin_unlock_irqrestore(&chan->gmsg_log.lock, flags); +bail: + if (err) + ADSPRPC_ERR("adsprpc: %s: unable to update txmsg buf (err %d) for ctx: 0x%x\n", + __func__, err, ctx->msg.invoke.header.ctx); } /** @@ -1534,6 +1572,11 @@ static int context_restore_interrupted(struct fastrpc_file *fl, "interrupted sc (0x%x) or fl (%pK) does not match with invoke sc (0x%x) or fl (%pK)\n", ictx->sc, ictx->fl, invoke->sc, fl); } else { + ictx->xo_time_in_us_restored = CONVERT_CNT_TO_US(__arch_counter_get_cntvct()); + fastrpc_update_txmsg_buf(NULL, 0, 0, 0, ictx, RESTORED_STATE); + ADSPRPC_DEBUG( + "restored sc (0x%x) of fl (%pK), interrupt ts 0x%llx, restore ts 0x%llx \n", + ictx->sc, ictx->fl, ictx->xo_time_in_us_interrupted, ictx->xo_time_in_us_restored); ctx = ictx; hlist_del_init(&ctx->hn); hlist_add_head(&ctx->hn, &fl->clst.pending); @@ -1832,6 +1875,8 @@ static void context_save_interrupted(struct smq_invoke_ctx *ctx) { struct fastrpc_ctx_lst *clst = &ctx->fl->clst; + ctx->xo_time_in_us_interrupted = CONVERT_CNT_TO_US(__arch_counter_get_cntvct()); + fastrpc_update_txmsg_buf(NULL, 0, 0, 0, ctx, INTERRUPTED_STATE); spin_lock(&ctx->fl->hlock); hlist_del_init(&ctx->hn); hlist_add_head(&ctx->hn, &clst->interrupted); @@ -2806,7 +2851,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx, handle, sc, msg->invoke.page.addr, msg->invoke.page.size); ns = get_timestamp_in_ns(); - fastrpc_update_txmsg_buf(channel_ctx, msg, err, ns, xo_time_in_us); + fastrpc_update_txmsg_buf(msg, err, ns, xo_time_in_us, ctx, DEFAULT_STATE); bail: return err; } diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 02ce55cfc6..b36cf92dc0 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -737,6 +737,8 @@ struct fastrpc_tx_msg { int transport_send_err; /* transport error */ int64_t ns; /* Timestamp (in ns) of msg */ uint64_t xo_time_in_us; /* XO Timestamp (in us) of sent message */ + uint64_t xo_time_in_us_interrupted; /* XO Timestamp (in us) of interrupted ctx */ + uint64_t xo_time_in_us_restored; /* XO Timestamp (in us) of restored ctx */ }; struct fastrpc_rx_msg { @@ -829,6 +831,9 @@ struct smq_invoke_ctx { uint32_t sc_interrupted; struct fastrpc_file *fl_interrupted; uint32_t handle_interrupted; + uint64_t xo_time_in_us_interrupted; /* XO Timestamp (in us) of interrupted ctx */ + uint64_t xo_time_in_us_restored; /* XO Timestamp (in us) of restored ctx */ + int tx_index; /* index of current ctx in channel gmsg_log array */ }; struct fastrpc_ctx_lst { From f093a3613d42015a39a058d758cf844bed2748d5 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Wed, 22 Feb 2023 22:34:50 +0530 Subject: [PATCH 028/146] msm: ADSPRPC: Handle potential leak information Memset the entire header buffer to fix potential leak information for Cached buffers. Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 4a3e7eb04a..66d442c7cc 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2363,7 +2363,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) VERIFY(err, !IS_ERR_OR_NULL(ctx->buf->virt)); if (err) goto bail; - memset(ctx->buf->virt, 0, metalen); + memset(ctx->buf->virt, 0, ctx->buf->size); } ctx->used = metalen; From 7d90dc2702cd2b42e35affefd562163d26d4f8ee Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Tue, 24 Jan 2023 14:55:19 +0530 Subject: [PATCH 029/146] msm: ADSPRPC: Add subsystem states for restart, up and down Current subsystem state flag cannot define all state of the subsystem. Different handling might be needed for different subsystem states. Add multiple subsystem state support. Change-Id: Id091dfded583c8cd7e95c0d306de6dd34b03485d Acked-by: Santosh Sakore Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 26 +++++++++++++++++--------- dsp/adsprpc_shared.h | 2 +- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 66d442c7cc..a2146a8ec8 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -237,6 +237,13 @@ enum fastrpc_proc_attr { FASTRPC_MODE_SYSTEM_UNSIGNED_PD = 1 << 17, }; +/* FastRPC remote subsystem state*/ +enum fastrpc_remote_subsys_state { + SUBSYSTEM_RESTARTING = 0, + SUBSYSTEM_DOWN, + SUBSYSTEM_UP, +}; + #define PERF_END ((void)0) #define PERF(enb, cnt, ff) \ @@ -4373,7 +4380,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) if (err) goto bail; - VERIFY(err, fl->apps->channel[cid].issubsystemup == 1); + VERIFY(err, fl->apps->channel[cid].subsystemstate != SUBSYSTEM_RESTARTING); if (err) { wait_for_completion(&fl->shutdown); err = -ECONNRESET; @@ -5612,8 +5619,8 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "\n%s %s %s\n", title, " CHANNEL INFO ", title); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%-7s|%-10s|%-14s|%-9s|%-13s\n", - "subsys", "sesscount", "issubsystemup", + "%-7s|%-10s|%-15s|%-9s|%-13s\n", + "subsys", "sesscount", "subsystemstate", "ssrcount", "session_used"); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "-%s%s%s%s-\n", single_line, single_line, @@ -5627,8 +5634,8 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, DEBUGFS_SIZE - len, "|%-10u", chan->sesscount); len += scnprintf(fileinfo + len, - DEBUGFS_SIZE - len, "|%-14d", - chan->issubsystemup); + DEBUGFS_SIZE - len, "|%-15d", + chan->subsystemstate); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "|%-9u", chan->ssrcount); @@ -5859,7 +5866,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) mutex_lock(&me->channel[cid].smd_mutex); if (me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { - if (!me->channel[cid].issubsystemup) { + if (me->channel[cid].subsystemstate != SUBSYSTEM_UP) { err = -ECONNREFUSED; mutex_unlock(&me->channel[cid].smd_mutex); goto bail; @@ -7291,7 +7298,7 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, __func__, gcinfo[cid].subsys); mutex_lock(&me->channel[cid].smd_mutex); ctx->ssrcount++; - ctx->issubsystemup = 0; + ctx->subsystemstate = SUBSYSTEM_RESTARTING; mutex_unlock(&me->channel[cid].smd_mutex); if (cid == RH_CID) me->staticpd_flags = 0; @@ -7306,6 +7313,7 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, complete(&fl->shutdown); } spin_unlock(&me->hlock); + ctx->subsystemstate = SUBSYSTEM_DOWN; pr_info("adsprpc: %s: received RAMDUMP notification for %s\n", __func__, gcinfo[cid].subsys); break; @@ -7335,7 +7343,7 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, "QCOM_SSR_AFTER_POWERUP", "fastrpc_restart_notifier-enter"); pr_info("adsprpc: %s: %s subsystem is up\n", __func__, gcinfo[cid].subsys); - ctx->issubsystemup = 1; + ctx->subsystemstate = SUBSYSTEM_UP; break; default: break; @@ -8352,7 +8360,7 @@ static int __init fastrpc_device_init(void) me->channel[i].ssrcount = 0; me->channel[i].in_hib = 0; me->channel[i].prevssrcount = 0; - me->channel[i].issubsystemup = 1; + me->channel[i].subsystemstate = SUBSYSTEM_UP; me->channel[i].rh_dump_dev = NULL; me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb; me->channel[i].handle = qcom_register_ssr_notifier( diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index b36cf92dc0..64cb740a61 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -903,7 +903,7 @@ struct fastrpc_channel_ctx { int in_hib; void *handle; uint64_t prevssrcount; - int issubsystemup; + int subsystemstate; int vmid; struct secure_vm rhvm; void *rh_dump_dev; From c8bd1df6af3f83cb488fa3760ab1bcbe5ec65d8a Mon Sep 17 00:00:00 2001 From: Abhinav Parihar Date: Tue, 28 Feb 2023 11:56:28 +0530 Subject: [PATCH 030/146] dsp-kernel: dsp: Update CDSP state flag after image unloading CDSP state flag is not updated after subsystem is shutdown. Due to this CDSP image loading is skipped. Update state flag after unloading. Change-Id: Ife49e6845da4a6bfe149c04459c6823c09ccde31 Signed-off-by: Abhinav Parihar --- dsp/cdsp-loader.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dsp/cdsp-loader.c b/dsp/cdsp-loader.c index d3b1dec433..e63718d27e 100644 --- a/dsp/cdsp-loader.c +++ b/dsp/cdsp-loader.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2012-2014, 2017-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -156,6 +156,7 @@ static void cdsp_loader_unload(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s: calling subsystem_put\n", __func__); rproc_shutdown(priv->pil_h); priv->pil_h = NULL; + cdsp_state = CDSP_SUBSYS_DOWN; } } @@ -229,6 +230,7 @@ static int cdsp_loader_remove(struct platform_device *pdev) if (priv->pil_h) { rproc_shutdown(priv->pil_h); priv->pil_h = NULL; + cdsp_state = CDSP_SUBSYS_DOWN; } if (priv->boot_cdsp_obj) { From 134766dab7f8317f98499a360a14f873b2261bec Mon Sep 17 00:00:00 2001 From: John Moon Date: Tue, 7 Mar 2023 20:31:53 -0800 Subject: [PATCH 031/146] Add copy_to_dist_dir() rule Add copy_to_dist_dir to Bazel build to output kernel build outputs to dist dir. Change-Id: Ic6ae4a1b98e9672aa1beba11aeff3707b0d78667 Signed-off-by: John Moon --- define_modules.bzl | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/define_modules.bzl b/define_modules.bzl index 1ece612915..9650e7b173 100644 --- a/define_modules.bzl +++ b/define_modules.bzl @@ -1,5 +1,6 @@ # TODO # Add ddk module definition for frpc-trusted driver +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") load( "//build/kernel/kleaf:kernel.bzl", @@ -40,3 +41,16 @@ def define_modules(target, variant): srcs = ["dsp/cdsp-loader.c"], out = "cdsp-loader.ko", ) + + copy_to_dist_dir( + name = "{}_dsp-kernel_dist".format(kernel_build_variant), + data = [ + ":{}_frpc-adsprpc".format(kernel_build_variant), + ":{}_cdsp-loader".format(kernel_build_variant), + ], + dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + ) From ee50316af59e0cf1bc6f95b8b9a1b46045b8b6f4 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Fri, 24 Feb 2023 10:51:48 -0800 Subject: [PATCH 032/146] msm: adsprpc: Use upstream API qcom_scm_assign_mem Currently hyp_assign_phys is being used to assign mmeory to remote subsystems. hyp_assign_phys is not upstream friendly. Use qcom_scm_assign_mem in place of hyp_assign_phys to deprecate downstream API. Signed-off-by: Himateja Reddy Change-Id: Ic4aed6570598a96e6401777836bd390ede877ff2 --- dsp/adsprpc.c | 157 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 102 insertions(+), 55 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index a2146a8ec8..a311aaa3d4 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -384,8 +384,6 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = { }, }; -static int hlosvm[1] = {VMID_HLOS}; -static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; static uint32_t kernel_capabilities[FASTRPC_MAX_ATTRIBUTES - FASTRPC_MAX_DSP_ATTRIBUTES] = { @@ -728,9 +726,6 @@ skip_buf_cache: buf->raddr = 0; } if (!IS_ERR_OR_NULL(buf->virt)) { - int destVM[1] = {VMID_HLOS}; - int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; - VERIFY(err, fl->sctx != NULL); if (err) goto bail; @@ -747,12 +742,16 @@ skip_buf_cache: } vmid = fl->apps->channel[cid].vmid; if ((vmid) && (fl->apps->channel[cid].in_hib == 0)) { - int srcVM[2] = {VMID_HLOS, vmid}; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS)| BIT(vmid); + struct qcom_scm_vmperm dest_perms = {0}; int hyp_err = 0; - hyp_err = hyp_assign_phys(buf->phys, + dest_perms.vmid = QCOM_SCM_VMID_HLOS; + dest_perms.perm = QCOM_SCM_PERM_RWX; + + hyp_err = qcom_scm_assign_mem(buf->phys, buf_page_size(buf->size), - srcVM, 2, destVM, destVMperm, 1); + &src_perms, &dest_perms, 1); if (hyp_err) { ADSPRPC_ERR( "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", @@ -1039,8 +1038,6 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) if (!IS_ERR_OR_NULL(map->buf)) dma_buf_put(map->buf); } else { - int destVM[1] = {VMID_HLOS}; - int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; if (!fl) goto bail; @@ -1052,11 +1049,14 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) vmid = fl->apps->channel[cid].vmid; if (vmid && map->phys && (me->channel[cid].in_hib == 0)) { int hyp_err = 0; - int srcVM[2] = {VMID_HLOS, vmid}; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid); + struct qcom_scm_vmperm dst_perms = {0}; - hyp_err = hyp_assign_phys(map->phys, + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; + hyp_err = qcom_scm_assign_mem(map->phys, buf_page_size(map->size), - srcVM, 2, destVM, destVMperm, 1); + &src_perms, &dst_perms, 1); if (hyp_err) { ADSPRPC_ERR( "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", @@ -1351,14 +1351,17 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * vmid = fl->apps->channel[cid].vmid; if (vmid) { - int srcVM[1] = {VMID_HLOS}; - int destVM[2] = {VMID_HLOS, vmid}; - int destVMperm[2] = {PERM_READ | PERM_WRITE, - PERM_READ | PERM_WRITE | PERM_EXEC}; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm dst_perms[2] = {0}; - err = hyp_assign_phys(map->phys, + dst_perms[0].vmid = QCOM_SCM_VMID_HLOS; + dst_perms[0].perm = QCOM_SCM_PERM_RW; + dst_perms[1].vmid = vmid; + dst_perms[1].perm = QCOM_SCM_PERM_RWX; + + err = qcom_scm_assign_mem(map->phys, buf_page_size(map->size), - srcVM, 1, destVM, destVMperm, 2); + &src_perms, dst_perms, 2); if (err) { ADSPRPC_ERR( "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", @@ -1526,13 +1529,16 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size, vmid = fl->apps->channel[cid].vmid; if (vmid) { - int srcVM[1] = {VMID_HLOS}; - int destVM[2] = {VMID_HLOS, vmid}; - int destVMperm[2] = {PERM_READ | PERM_WRITE, - PERM_READ | PERM_WRITE | PERM_EXEC}; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm dst_perms[2] = {0}; - err = hyp_assign_phys(buf->phys, buf_page_size(size), - srcVM, 1, destVM, destVMperm, 2); + dst_perms[0].vmid = QCOM_SCM_VMID_HLOS; + dst_perms[0].perm = QCOM_SCM_PERM_RW; + dst_perms[1].vmid = vmid; + dst_perms[1].perm = QCOM_SCM_PERM_RWX; + + err = qcom_scm_assign_mem(buf->phys, buf_page_size(size), + &src_perms, dst_perms, 2); if (err) { ADSPRPC_DEBUG( "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", @@ -4013,9 +4019,20 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, * hyp_assign from HLOS to those VMs (LPASS, ADSP). */ if (rhvm->vmid && mem && mem->refs == 1 && size) { - err = hyp_assign_phys(phys, (uint64_t)size, - hlosvm, 1, - rhvm->vmid, rhvm->vmperm, rhvm->vmcount); + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm *dst_perms; + uint32_t i = 0; + + VERIFY(err, NULL != (dst_perms = kcalloc(rhvm->vmcount, + sizeof(struct qcom_scm_vmperm), GFP_KERNEL))); + for (i = 0; i < rhvm->vmcount; i++) { + dst_perms[i].vmid = rhvm->vmid[i]; + dst_perms[i].perm = rhvm->vmperm[i]; + } + + err = qcom_scm_assign_mem(phys, (uint64_t)size, + &src_perms, dst_perms, rhvm->vmcount); + kfree(dst_perms); if (err) { ADSPRPC_ERR( "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", @@ -4066,11 +4083,19 @@ bail: me->staticpd_flags = 0; if (rh_hyp_done) { int hyp_err = 0; + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms; + uint32_t i = 0; + for (i = 0; i < rhvm->vmcount; i++) { + src_perms |= BIT(rhvm->vmid[i]); + } + + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; /* Assign memory back to HLOS in case of errors */ - hyp_err = hyp_assign_phys(phys, (uint64_t)size, - rhvm->vmid, rhvm->vmcount, - hlosvm, hlosvmperm, 1); + hyp_err = qcom_scm_assign_mem(phys, (uint64_t)size, + &src_perms, &dst_perms, 1); if (hyp_err) ADSPRPC_WARN( "rh hyp unassign failed with %d for phys 0x%llx of size %zu\n", @@ -4627,10 +4652,21 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, } if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR && me->channel[cid].rhvm.vmid && refs == 1) { - err = hyp_assign_phys(phys, (uint64_t)size, - hlosvm, 1, me->channel[cid].rhvm.vmid, - me->channel[cid].rhvm.vmperm, - me->channel[cid].rhvm.vmcount); + struct secure_vm *rhvm = &me->channel[cid].rhvm; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm *dst_perms; + uint32_t i = 0; + + VERIFY(err, NULL != (dst_perms = kcalloc(rhvm->vmcount, + sizeof(struct qcom_scm_vmperm), GFP_KERNEL))); + + for (i = 0; i < rhvm->vmcount; i++) { + dst_perms[i].vmid = rhvm->vmid[i]; + dst_perms[i].perm = rhvm->vmperm[i]; + } + err = qcom_scm_assign_mem(phys, (uint64_t)size, + &src_perms, dst_perms, rhvm->vmcount); + kfree(dst_perms); if (err) { ADSPRPC_ERR( "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", @@ -4713,16 +4749,22 @@ static int fastrpc_munmap_rh(uint64_t phys, size_t size, { int err = 0; struct fastrpc_apps *me = &gfa; - int destVM[1] = {VMID_HLOS}; - int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; + struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; - if ((me->channel[RH_CID].rhvm.vmid) + if ((rhvm->vmid) && (me->channel[RH_CID].in_hib == 0)) { - err = hyp_assign_phys(phys, - (uint64_t)size, - me->channel[RH_CID].rhvm.vmid, - me->channel[RH_CID].rhvm.vmcount, - destVM, destVMperm, 1); + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms = {0}; + uint32_t i = 0; + + for (i = 0; i < rhvm->vmcount; i++) { + src_perms |= BIT(rhvm->vmid[i]); + } + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; + + err = qcom_scm_assign_mem(phys, + (uint64_t)size, &src_perms, &dst_perms, 1); if (err) { ADSPRPC_ERR( "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", @@ -4785,20 +4827,25 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) && !strcmp(map->servloc_name, fl->servloc_name))) { match = map; if (map->is_persistent && map->in_use) { - int destVM[1] = {VMID_HLOS}; - int destVMperm[1] = {PERM_READ | PERM_WRITE - | PERM_EXEC}; + struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; uint64_t phys = map->phys; size_t size = map->size; spin_unlock_irqrestore(&me->hlock, irq_flags); - //hyp assign it back to HLOS - if (me->channel[RH_CID].rhvm.vmid) { - err = hyp_assign_phys(phys, - (uint64_t)size, - me->channel[RH_CID].rhvm.vmid, - me->channel[RH_CID].rhvm.vmcount, - destVM, destVMperm, 1); + //scm assign it back to HLOS + if (rhvm->vmid) { + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms = {0}; + uint32_t i = 0; + + for (i = 0; i < rhvm->vmcount; i++) { + src_perms |= BIT(rhvm->vmid[i]); + } + + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; + err = qcom_scm_assign_mem(phys, (uint64_t)size, + &src_perms, &dst_perms, 1); } if (err) { ADSPRPC_ERR( @@ -7640,7 +7687,7 @@ static void init_secure_vmid_list(struct device *dev, char *prop_name, } ADSPRPC_INFO("secure VMID = %d\n", rhvmlist[i]); - rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC; + rhvmpermlist[i] = QCOM_SCM_PERM_RWX; } destvm->vmid = rhvmlist; destvm->vmperm = rhvmpermlist; From b6137ddc631c28ae408d175785ae0293f1316c0c Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Tue, 28 Feb 2023 11:46:41 -0800 Subject: [PATCH 033/146] Stub out CMA alloc when running TVM CMA alloc will not work on TVM. Stubbing out fastrpc_alloc_cma_memory in fastrpc_init to prevent errors during loading. Change-Id: I5774f1f0333da86582b9aca8b9cdeae725eaf64f Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index a311aaa3d4..670b19a028 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -8415,10 +8415,12 @@ static int __init fastrpc_device_init(void) &me->channel[i].nb); if (i == CDSP_DOMAIN_ID) { me->channel[i].dev = me->non_secure_dev; +#if !IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) err = fastrpc_alloc_cma_memory(®ion_phys, ®ion_vaddr, MINI_DUMP_DBG_SIZE, (unsigned long)attr); +#endif if (err) ADSPRPC_WARN("%s: CMA alloc failed err 0x%x\n", __func__, err); From 7b787e07ef15d30d182f483c5a723b04a3744439 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Wed, 8 Mar 2023 17:06:49 -0800 Subject: [PATCH 034/146] Increase number of allowed sessions to 14 Increasing number of allowed sessions from 13 to 14 to accomodate the shared context banks for CPZ. Change-Id: Ie0757dae9d0876f5e827daab4fe9e9a661fc0680 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc_shared.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 64cb740a61..2f58a707a0 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -112,7 +112,7 @@ #define MAX_DOMAIN_ID CDSP_DOMAIN_ID #define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/ -#define NUM_SESSIONS 13 /* max 12 compute, 1 cpz */ +#define NUM_SESSIONS 14 /* max 11 compute, 3 cpz */ #define VALID_FASTRPC_CID(cid) \ (cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS) From ed0b528087b2098ca70c3b345ca24ebb214b09f7 Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Fri, 3 Mar 2023 17:50:42 -0800 Subject: [PATCH 035/146] adsprpc: Enable fastrpc trusted driver Fixes to fastrpc trusted driver to run on TVM. Added a workqueue for receiving kernel packets. Changed array allocation of kernel sockets to save space. Original design was allocating static 2-d array glist_session_ctrl for all possible subsystems and domains. New implementation is allocating staic 2-d reference array. Each entry in the array will only be allocated if remote domain is supported. Change-Id: I303375822714aa6f8eadf525b09326aa05714fd7 Signed-off-by: Edgar Flores --- dsp/adsprpc.c | 17 +- dsp/adsprpc_rpmsg.c | 10 +- dsp/adsprpc_shared.h | 11 +- dsp/adsprpc_socket.c | 382 +++++++++++++++++++++++++++++-------------- 4 files changed, 286 insertions(+), 134 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 670b19a028..0c440b197e 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2860,7 +2860,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, memcpy(&msg_temp, msg, sizeof(struct smq_msg)); msg = &msg_temp; } - err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->trusted_vm); + err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->tvm_remote_domain); trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx, handle, sc, msg->invoke.page.addr, msg->invoke.page.size); ns = get_timestamp_in_ns(); @@ -3990,8 +3990,6 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, err = fastrpc_mmap_remove_pdr(fl); if (err) goto bail; - } else if (!strcmp(proc_name, "securepd")) { - fl->trusted_vm = true; } else { ADSPRPC_ERR( "Create static process is failed for proc_name %s", @@ -3999,7 +3997,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, goto bail; } - if (!fl->trusted_vm && (!me->staticpd_flags && !me->legacy_remote_heap)) { + if ((!me->staticpd_flags && !me->legacy_remote_heap)) { inbuf.pageslen = 1; if (!fastrpc_get_persistent_map(init->memlen, &mem)) { mutex_lock(&fl->map_mutex); @@ -4193,6 +4191,10 @@ int fastrpc_init_process(struct fastrpc_file *fl, } fastrpc_set_servloc(fl, init); + err = fastrpc_set_tvm_remote_domain(fl, init); + if (err) + goto bail; + err = fastrpc_channel_open(fl, init->flags); if (err) goto bail; @@ -4401,7 +4403,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) err = -EBADR; goto bail; } - err = verify_transport_device(cid, fl->trusted_vm); + err = verify_transport_device(cid, fl->tvm_remote_domain); if (err) goto bail; @@ -5906,7 +5908,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) if (err) goto bail; - err = verify_transport_device(cid, fl->trusted_vm); + err = verify_transport_device(cid, fl->tvm_remote_domain); if (err) goto bail; @@ -6000,6 +6002,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->apps = me; fl->mode = FASTRPC_MODE_SERIAL; fl->cid = -1; + fl->tvm_remote_domain = -1; fl->dev_minor = dev_minor; fl->init_mem = NULL; fl->qos_request = 0; @@ -6490,7 +6493,7 @@ int fastrpc_dspsignal_signal(struct fastrpc_file *fl, } msg = (((uint64_t)fl->tgid) << 32) | ((uint64_t)sig->signal_id); - err = fastrpc_transport_send(cid, (void *)&msg, sizeof(msg), fl->trusted_vm); + err = fastrpc_transport_send(cid, (void *)&msg, sizeof(msg), fl->tvm_remote_domain); mutex_unlock(&channel_ctx->smd_mutex); bail: diff --git a/dsp/adsprpc_rpmsg.c b/dsp/adsprpc_rpmsg.c index f5ea5b2b6f..f215fc3fc7 100644 --- a/dsp/adsprpc_rpmsg.c +++ b/dsp/adsprpc_rpmsg.c @@ -21,7 +21,7 @@ struct frpc_transport_session_control { static struct frpc_transport_session_control rpmsg_session_control[NUM_CHANNELS]; -inline int verify_transport_device(int cid, bool trusted_vm) +inline int verify_transport_device(int cid, int tvm_remote_domain) { int err = 0; struct frpc_transport_session_control *rpmsg_session = &rpmsg_session_control[cid]; @@ -197,7 +197,7 @@ int fastrpc_wait_for_transport_interrupt(int cid, return err; } -int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool trusted_vm) +int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, int tvm_remote_domain) { int err = 0; struct frpc_transport_session_control *rpmsg_session = &rpmsg_session_control[cid]; @@ -256,6 +256,12 @@ inline void fastrpc_transport_session_deinit(int cid) mutex_destroy(&rpmsg_session_control[cid].rpmsg_mutex); } +int fastrpc_set_tvm_remote_domain(struct fastrpc_file *fl, struct fastrpc_ioctl_init *init) +{ + fl->tvm_remote_domain = -1; + return 0; +} + int fastrpc_transport_init(void) { int err = 0; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 2f58a707a0..f3342de6b3 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -624,14 +624,17 @@ enum fastrpc_process_exit_states { FASTRPC_PROCESS_DSP_EXIT_ERROR = 4, }; -inline int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool trusted_vm); +struct fastrpc_file; + +int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, int tvm_remote_domain); inline int fastrpc_handle_rpc_response(void *data, int len, int cid); -inline int verify_transport_device(int cid, bool trusted_vm); +inline int verify_transport_device(int cid, int tvm_remote_domain); int fastrpc_transport_init(void); void fastrpc_transport_deinit(void); void fastrpc_transport_session_init(int cid, char *subsys); void fastrpc_transport_session_deinit(int cid); int fastrpc_wait_for_transport_interrupt(int cid, unsigned int flags); +int fastrpc_set_tvm_remote_domain(struct fastrpc_file *fl, struct fastrpc_ioctl_init *init); static inline struct smq_invoke_buf *smq_invoke_buf_start(remote_arg64_t *pra, uint32_t sc) @@ -711,8 +714,6 @@ struct qos_cores { int corecount; }; -struct fastrpc_file; - struct fastrpc_buf { struct hlist_node hn; struct hlist_node hn_rem; @@ -1053,7 +1054,7 @@ struct fastrpc_file { int tgid_open; /* Process ID during device open */ int tgid; /* Process ID that uses device for RPC calls */ int cid; - bool trusted_vm; + int tvm_remote_domain; uint64_t ssrcount; int pd; char *servloc_name; diff --git a/dsp/adsprpc_socket.c b/dsp/adsprpc_socket.c index 9ff3fa28f8..88b245b933 100644 --- a/dsp/adsprpc_socket.c +++ b/dsp/adsprpc_socket.c @@ -1,6 +1,21 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +/* For debug only. Uncomment these blocks to log on every VERIFY statement */ + +/* + * #ifndef VERIFY_PRINT_ERROR + * #define VERIFY_PRINT_ERROR + * #endif + */ + +/* + * #ifndef VERIFY_PRINT_INFO + * #define VERIFY_PRINT_INFO + * #endif + * #define VERIFY_IPRINTF(format, ...) pr_info(format, ##__VA_ARGS__) */ #include @@ -8,14 +23,12 @@ #include #include #include "fastrpc_trace.h" +#include #include "adsprpc_shared.h" // Registered QRTR service ID #define FASTRPC_REMOTE_SERVER_SERVICE_ID 5012 -// Number of remote domains -#define REMOTE_DOMAINS (2) - /* * Fastrpc remote server instance ID bit-map: * @@ -42,6 +55,7 @@ union rsp { enum fastrpc_remote_domains_id { SECURE_PD = 0, GUEST_OS = 1, + MAX_REMOTE_ID = SECURE_PD + 1, }; struct fastrpc_socket { @@ -55,44 +69,84 @@ struct fastrpc_socket { struct frpc_transport_session_control { struct fastrpc_socket frpc_socket; // Fastrpc socket data structure uint32_t remote_server_instance; // Unique remote server instance ID - bool remote_domain_available; // Flag to indicate if remote domain is enabled bool remote_server_online; // Flag to indicate remote server status + struct work_struct work; // work for handling incoming messages + struct workqueue_struct *wq; // workqueue to post @work on +}; + +struct remote_domain_configuration { + int channel_id; + int remote_domain; }; /** * glist_session_ctrl * Static list containing socket session information for all remote domains. - * Update session flag remote_domain_available whenever a remote domain will be using - * kernel sockets. */ -static struct frpc_transport_session_control glist_session_ctrl[NUM_CHANNELS][REMOTE_DOMAINS] = { - [CDSP_DOMAIN_ID][SECURE_PD].remote_domain_available = true +static struct frpc_transport_session_control *glist_session_ctrl[MAX_DOMAIN_ID][MAX_REMOTE_ID]; + + +static const struct remote_domain_configuration configurations[] = { + { + .channel_id = CDSP_DOMAIN_ID, + .remote_domain = SECURE_PD, + }, }; +int fastrpc_set_tvm_remote_domain(struct fastrpc_file *fl, struct fastrpc_ioctl_init *init) +{ + int err = 0; + char *proc_name = NULL; + + fl->tvm_remote_domain = GUEST_OS; + if (init->file) { + if (!init->filelen) + goto bail; + + proc_name = kzalloc(init->filelen + 1, GFP_KERNEL); + VERIFY(err, !IS_ERR_OR_NULL(proc_name)); + if (err) { + err = -ENOMEM; + goto bail; + } + err = copy_from_user((void *)proc_name, + (void __user *)init->file, init->filelen); + if (err) { + err = -EFAULT; + goto bail; + } + if (!strcmp(proc_name, "securepd")) + fl->tvm_remote_domain = SECURE_PD; + } +bail: + kfree(proc_name); + return err; +} + /** * verify_transport_device() * @cid: Channel ID. - * @trusted_vm: Flag to indicate whether session is for secure PD or guest OS. + * @tvm_remote_domain: Remote domain on TVM. * - * Obtain remote session information given channel ID and trusted_vm + * Obtain remote session information given channel ID and tvm_remote_domain * and verify that socket has been created and remote server is up. * * Return: 0 on success or negative errno value on failure. */ -inline int verify_transport_device(int cid, bool trusted_vm) +inline int verify_transport_device(int cid, int tvm_remote_domain) { int remote_domain, err = 0; struct frpc_transport_session_control *session_control = NULL; - remote_domain = (trusted_vm) ? SECURE_PD : GUEST_OS; - VERIFY(err, remote_domain < REMOTE_DOMAINS); + remote_domain = tvm_remote_domain; + VERIFY(err, remote_domain < MAX_REMOTE_ID); if (err) { err = -ECHRNG; goto bail; } - session_control = &glist_session_ctrl[cid][remote_domain]; - VERIFY(err, session_control->remote_domain_available); + session_control = glist_session_ctrl[cid][remote_domain]; + VERIFY(err, session_control); if (err) { err = -ECHRNG; goto bail; @@ -117,14 +171,19 @@ static void fastrpc_recv_new_server(struct frpc_transport_session_control *sessi unsigned int node, unsigned int port) { uint32_t remote_server_instance = session_control->remote_server_instance; + int32_t err = 0; /* Ignore EOF marker */ - if (!node && !port) - return; + if (!node && !port) { + err = -EINVAL; + goto bail; + } if (service != FASTRPC_REMOTE_SERVER_SERVICE_ID || - instance != remote_server_instance) - return; + instance != remote_server_instance) { + err = -ENOMSG; + goto bail; + } mutex_lock(&session_control->frpc_socket.socket_mutex); session_control->frpc_socket.remote_sock_addr.sq_family = AF_QIPCRTR; @@ -132,28 +191,42 @@ static void fastrpc_recv_new_server(struct frpc_transport_session_control *sessi session_control->frpc_socket.remote_sock_addr.sq_port = port; session_control->remote_server_online = true; mutex_unlock(&session_control->frpc_socket.socket_mutex); - ADSPRPC_INFO("Remote server is up: remote ID (0x%x)", remote_server_instance); + ADSPRPC_INFO("Remote server is up: remote ID (0x%x), node %u, port %u", + remote_server_instance, node, port); +bail: + if (err != -EINVAL && err) { + ADSPRPC_WARN("Ignoring ctrl packet: service id %u, instance id %u, err %d", + service, instance, err); + } } static void fastrpc_recv_del_server(struct frpc_transport_session_control *session_control, unsigned int node, unsigned int port) { uint32_t remote_server_instance = session_control->remote_server_instance; + int32_t err = 0; /* Ignore EOF marker */ - if (!node && !port) - return; + if (!node && !port) { + err = -EINVAL; + goto bail; + } if (node != session_control->frpc_socket.remote_sock_addr.sq_node || - port != session_control->frpc_socket.remote_sock_addr.sq_port) - return; + port != session_control->frpc_socket.remote_sock_addr.sq_port) { + err = -ENOMSG; + goto bail; + } mutex_lock(&session_control->frpc_socket.socket_mutex); session_control->frpc_socket.remote_sock_addr.sq_node = 0; session_control->frpc_socket.remote_sock_addr.sq_port = 0; session_control->remote_server_online = false; mutex_unlock(&session_control->frpc_socket.socket_mutex); - ADSPRPC_WARN("Remote server is down: remote ID (0x%x)", remote_server_instance); + ADSPRPC_INFO("Remote server is down: remote ID (0x%x)", remote_server_instance); +bail: + if (err != -EINVAL && err) + ADSPRPC_WARN("Ignoring ctrl packet: node %u, port %u, err %d", node, port, err); } /** @@ -188,6 +261,79 @@ static void fastrpc_recv_ctrl_pkt(struct frpc_transport_session_control *session le32_to_cpu(pkt->server.node), le32_to_cpu(pkt->server.port)); break; + default: + ADSPRPC_WARN("Ignoring unknown ctrl packet with size %zu", len); + } +} + +/** + * fastrpc_socket_callback_wq() + * @work: workqueue structure for incoming socket packets + * + * Callback function to receive responses that were posted on workqueue. + * We expect to receive control packets with remote domain status notifications or + * RPC data packets from remote domain. + */ +static void fastrpc_socket_callback_wq(struct work_struct *work) +{ + int32_t err = 0, cid = -1, bytes_rx = 0; + uint32_t remote_server_instance = (uint32_t)-1; + bool ignore_err = false; + struct kvec msg = {0}; + struct sockaddr_qrtr remote_sock_addr = {0}; + struct msghdr remote_server = {0}; + struct frpc_transport_session_control *session_control = NULL; + __u32 sq_node = 0, sq_port = 0; + + session_control = container_of(work, struct frpc_transport_session_control, work); + VERIFY(err, session_control); + if (err) { + err = -EFAULT; + goto bail; + } + + remote_server.msg_name = &remote_sock_addr; + remote_server.msg_namelen = sizeof(remote_sock_addr); + msg.iov_base = session_control->frpc_socket.recv_buf; + msg.iov_len = FASTRPC_SOCKET_RECV_SIZE; + remote_server_instance = session_control->remote_server_instance; + for (;;) { + trace_fastrpc_msg("socket_callback_ready: begin"); + err = kernel_recvmsg(session_control->frpc_socket.sock, &remote_server, &msg, 1, + msg.iov_len, MSG_DONTWAIT); + if (err == -EAGAIN) { + ignore_err = true; + goto bail; + } + if (err < 0) + goto bail; + + bytes_rx = err; + err = 0; + + sq_node = remote_sock_addr.sq_node; + sq_port = remote_sock_addr.sq_port; + if (sq_node == session_control->frpc_socket.local_sock_addr.sq_node && + sq_port == QRTR_PORT_CTRL) { + fastrpc_recv_ctrl_pkt(session_control, + session_control->frpc_socket.recv_buf, + bytes_rx); + } else { + cid = GET_CID_FROM_SERVER_INSTANCE(remote_server_instance); + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } + fastrpc_handle_rpc_response(msg.iov_base, msg.iov_len, cid); + } + trace_fastrpc_msg("socket_callback_ready: end"); + } +bail: + if (!ignore_err && err < 0) { + ADSPRPC_ERR( + "invalid response data %pK (rx %d bytes), buffer len %d from remote ID (0x%x) err %d\n", + msg.iov_base, bytes_rx, msg.iov_len, remote_server_instance, err); } } @@ -196,19 +342,13 @@ static void fastrpc_recv_ctrl_pkt(struct frpc_transport_session_control *session * @sk: Sock data structure with information related to the callback response. * * Callback function to receive responses from socket layer. - * We expect to receive control packets with remote domain status notifications or - * RPC data packets from remote domain. + * Responses are posted on workqueue to be process. */ static void fastrpc_socket_callback(struct sock *sk) { - int err = 0, cid = 0; - struct kvec msg = {0}; - struct sockaddr_qrtr remote_sock_addr = {0}; - struct msghdr remote_server = {0}; + int32_t err = 0; struct frpc_transport_session_control *session_control = NULL; - remote_server.msg_name = &remote_sock_addr; - remote_server.msg_namelen = sizeof(remote_sock_addr); trace_fastrpc_msg("socket_callback: begin"); VERIFY(err, sk); if (err) { @@ -218,40 +358,12 @@ static void fastrpc_socket_callback(struct sock *sk) rcu_read_lock(); session_control = rcu_dereference_sk_user_data(sk); + if (session_control) + queue_work(session_control->wq, &session_control->work); rcu_read_unlock(); - VERIFY(err, session_control); - if (err) { - err = -EFAULT; - goto bail; - } - - msg.iov_base = session_control->frpc_socket.recv_buf; - msg.iov_len = FASTRPC_SOCKET_RECV_SIZE; - err = kernel_recvmsg(session_control->frpc_socket.sock, &remote_server, &msg, 1, - msg.iov_len, MSG_DONTWAIT); - if (err < 0) - goto bail; - - if (remote_sock_addr.sq_node == session_control->frpc_socket.local_sock_addr.sq_node && - remote_sock_addr.sq_port == QRTR_PORT_CTRL) { - fastrpc_recv_ctrl_pkt(session_control, session_control->frpc_socket.recv_buf, - FASTRPC_SOCKET_RECV_SIZE); - } else { - cid = GET_CID_FROM_SERVER_INSTANCE(session_control->remote_server_instance); - VERIFY(err, VALID_FASTRPC_CID(cid)); - if (err) { - err = -ECHRNG; - goto bail; - } - fastrpc_handle_rpc_response(msg.iov_base, msg.iov_len, cid); - } bail: - if (err < 0) { - ADSPRPC_ERR( - "invalid response data %pK, len %d from remote ID (0x%x) err %d\n", - msg.iov_base, msg.iov_len, session_control->remote_server_instance, err); - } - + if (err < 0) + ADSPRPC_ERR("invalid sock received, err %d", err); trace_fastrpc_msg("socket_callback: end"); } @@ -260,16 +372,16 @@ bail: * @cid: Channel ID. * @rpc_msg: RPC message to send to remote domain. * @rpc_msg_size: RPC message size. - * @trusted_vm: Flag to indicate whether to send message to secure PD or guest OS. + * @tvm_remote_domain: Remote domain on TVM. * - * Send RPC message to remote domain. Depending on trusted_vm flag message will be - * sent to secure PD or guest OS on remote subsystem. + * Send RPC message to remote domain. Depending on tvm_remote_domain flag message will be + * sent to one of the remote domains on remote subsystem. * Depending on the channel ID and remote domain, a corresponding socket is retrieved * from glist_session_ctrl and is use to send RPC message. * * Return: 0 on success or negative errno value on failure. */ -int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool trusted_vm) +int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, int tvm_remote_domain) { int err = 0, remote_domain; struct fastrpc_socket *frpc_socket = NULL; @@ -277,14 +389,14 @@ int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool t struct msghdr remote_server = {0}; struct kvec msg = {0}; - remote_domain = (trusted_vm) ? SECURE_PD : GUEST_OS; - VERIFY(err, remote_domain < REMOTE_DOMAINS); + remote_domain = tvm_remote_domain; + VERIFY(err, remote_domain < MAX_REMOTE_ID); if (err) { err = -ECHRNG; goto bail; } - session_control = &glist_session_ctrl[cid][remote_domain]; - VERIFY(err, session_control->remote_domain_available); + session_control = glist_session_ctrl[cid][remote_domain]; + VERIFY(err, session_control); if (err) { err = -ECHRNG; goto bail; @@ -304,7 +416,11 @@ int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, bool t mutex_unlock(&frpc_socket->socket_mutex); goto bail; } + err = kernel_sendmsg(frpc_socket->sock, &remote_server, &msg, 1, msg.iov_len); + if (err > 0) + err = 0; + mutex_unlock(&frpc_socket->socket_mutex); bail: return err; @@ -338,6 +454,7 @@ static struct socket *create_socket(struct frpc_transport_session_control *sessi ADSPRPC_ERR("kernel_getsockname failed with err %d\n", err); goto bail; } + rcu_assign_sk_user_data(sock->sk, session_control); sock->sk->sk_data_ready = fastrpc_socket_callback; sock->sk->sk_error_report = fastrpc_socket_callback; @@ -413,47 +530,66 @@ void fastrpc_rproc_trace_events(const char *name, const char *event, * * Initialize and create all sockets that are enabled from all channels * and remote domains. - * Traverse array glist_session_ctrl and initialize session if remote + * Traverse array configurations and initialize session on glist_session_ctrl if remote * domain is enabled. * * Return: 0 on success or negative errno value on failure. */ int fastrpc_transport_init(void) { - int err = 0, cid = 0, ii = 0; + int err = 0, cid = -1, ii = 0, remote_domain = -1; struct socket *sock = NULL; struct fastrpc_socket *frpc_socket = NULL; struct frpc_transport_session_control *session_control = NULL; + struct workqueue_struct *wq = NULL; - for (cid = 0; cid < NUM_CHANNELS; cid++) { - for (ii = 0; ii < REMOTE_DOMAINS; ii++) { - session_control = &glist_session_ctrl[cid][ii]; - if (!session_control->remote_domain_available) - continue; - - session_control->remote_server_online = false; - frpc_socket = &session_control->frpc_socket; - mutex_init(&frpc_socket->socket_mutex); - - sock = create_socket(session_control); - if (IS_ERR_OR_NULL(sock)) { - err = PTR_ERR(sock); - goto bail; - } - - frpc_socket->sock = sock; - frpc_socket->recv_buf = kzalloc(FASTRPC_SOCKET_RECV_SIZE, GFP_KERNEL); - if (!frpc_socket->recv_buf) { - err = -ENOMEM; - goto bail; - } - session_control->remote_server_instance = GET_SERVER_INSTANCE(ii, cid); - err = register_remote_server_notifications(frpc_socket, - session_control->remote_server_instance); - if (err < 0) - goto bail; + for (ii = 0; ii < ARRAY_SIZE(configurations); ii++) { + session_control = kzalloc(sizeof(*session_control), GFP_KERNEL); + VERIFY(err, NULL != session_control); + if (err) { + err = -ENOMEM; + goto bail; } + cid = configurations[ii].channel_id; + remote_domain = configurations[ii].remote_domain; + + session_control->remote_server_online = false; + frpc_socket = &session_control->frpc_socket; + mutex_init(&frpc_socket->socket_mutex); + + sock = create_socket(session_control); + if (IS_ERR_OR_NULL(sock)) { + err = PTR_ERR(sock); + goto bail; + } + + frpc_socket->sock = sock; + frpc_socket->recv_buf = kzalloc(FASTRPC_SOCKET_RECV_SIZE, GFP_KERNEL); + if (!frpc_socket->recv_buf) { + err = -ENOMEM; + goto bail; + } + + INIT_WORK(&session_control->work, fastrpc_socket_callback_wq); + wq = alloc_workqueue("fastrpc_msg_handler", WQ_UNBOUND|WQ_HIGHPRI, 0); + if (!wq) { + err = -ENOMEM; + goto bail; + } + session_control->wq = wq; + + session_control->remote_server_instance = GET_SERVER_INSTANCE(remote_domain, cid); + err = register_remote_server_notifications(frpc_socket, + session_control->remote_server_instance); + if (err < 0) + goto bail; + + glist_session_ctrl[cid][remote_domain] = session_control; + ADSPRPC_INFO("Created and registered socket for remote server (service ID %u, instance ID 0x%x)\n", + FASTRPC_REMOTE_SERVER_SERVICE_ID, session_control->remote_server_instance); + } + err = 0; bail: if (err) @@ -466,30 +602,36 @@ bail: * * Deinitialize and release all sockets that are enabled from all channels * and remote domains. - * Traverse array glist_session_ctrl and deinitialize session if remote - * domain is enabled. + * Traverse array configurations and deinitialize corresponding session from + * glist_session_ctrl. */ void fastrpc_transport_deinit(void) { - int ii = 0; + int ii = 0, cid = -1, remote_domain = -1; struct fastrpc_socket *frpc_socket = NULL; struct frpc_transport_session_control *session_control = NULL; - int cid = -1; - for (cid = 0; cid < NUM_CHANNELS; cid++) { - for (ii = 0; ii < REMOTE_DOMAINS; ii++) { - session_control = &glist_session_ctrl[cid][ii]; - frpc_socket = &session_control->frpc_socket; - if (!session_control->remote_domain_available) - continue; + for (ii = 0; ii < ARRAY_SIZE(configurations); ii++) { + cid = configurations[ii].channel_id; + remote_domain = configurations[ii].remote_domain; - if (frpc_socket->sock) - sock_release(frpc_socket->sock); + session_control = glist_session_ctrl[cid][remote_domain]; + if (!session_control) + continue; - kfree(frpc_socket->recv_buf); - frpc_socket->recv_buf = NULL; - frpc_socket->sock = NULL; - mutex_destroy(&frpc_socket->socket_mutex); - } + frpc_socket = &session_control->frpc_socket; + + if (frpc_socket->sock) + sock_release(frpc_socket->sock); + + if (session_control->wq) + destroy_workqueue(session_control->wq); + + kfree(frpc_socket->recv_buf); + frpc_socket->recv_buf = NULL; + frpc_socket->sock = NULL; + mutex_destroy(&frpc_socket->socket_mutex); + kfree(session_control); + glist_session_ctrl[cid][remote_domain] = NULL; } } From 4d19368b341fdb6ea6384e46e625d77ef778d234 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Wed, 22 Feb 2023 22:33:59 +0530 Subject: [PATCH 036/146] msm: adsprpc: no debug info if ioctl wait for pdup If pd is down, new device ioctls wait for pdup before copying ioctl params and saving ctx information. Shift logic to check pdup from device_ioctl to after context creation and before invoke send to allow current ioctl information to be store in pending ctx. Change-Id: Ia9747394020fd35b02f4074a82edecace72f87db Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 26 +++++++++++++++----------- dsp/adsprpc_shared.h | 1 + 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 0c440b197e..97b7826792 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1870,6 +1870,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, err = -ENOKEY; goto bail; } + ctx->xo_time_in_us_created = CONVERT_CNT_TO_US(__arch_counter_get_cntvct()); spin_lock(&fl->hlock); hlist_add_head(&ctx->hn, &clst->pending); clst->num_active_ctxs++; @@ -3114,6 +3115,8 @@ static void fastrpc_update_invoke_count(uint32_t handle, uint64_t *perf_counter, } } +static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name); + int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, uint32_t kernel, struct fastrpc_ioctl_invoke_async *inv) @@ -3174,6 +3177,18 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, trace_fastrpc_msg("context_alloc: end"); if (err) goto bail; + + if (fl->servloc_name) { + err = fastrpc_check_pd_status(fl, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME); + err |= fastrpc_check_pd_status(fl, + SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME); + err |= fastrpc_check_pd_status(fl, + SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME); + if (err) + goto bail; + } + isasyncinvoke = (ctx->asyncjob.isasyncjob ? true : false); if (fl->profile) perf_counter = (uint64_t *)ctx->perf + PERF_COUNT; @@ -6892,17 +6907,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, p.inv.perf_dsp = NULL; p.inv.job = NULL; - if (fl->servloc_name) { - err = fastrpc_check_pd_status(fl, - AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME); - err |= fastrpc_check_pd_status(fl, - SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME); - err |= fastrpc_check_pd_status(fl, - SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME); - if (err) - goto bail; - } - spin_lock(&fl->hlock); if (fl->file_close >= FASTRPC_PROCESS_EXIT_START) { err = -ESHUTDOWN; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index f3342de6b3..445eb82c3d 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -832,6 +832,7 @@ struct smq_invoke_ctx { uint32_t sc_interrupted; struct fastrpc_file *fl_interrupted; uint32_t handle_interrupted; + uint64_t xo_time_in_us_created; /* XO Timestamp (in us) of ctx creation */ uint64_t xo_time_in_us_interrupted; /* XO Timestamp (in us) of interrupted ctx */ uint64_t xo_time_in_us_restored; /* XO Timestamp (in us) of restored ctx */ int tx_index; /* index of current ctx in channel gmsg_log array */ From c5866e97d699f9a4d9c8bf9187e912ba1b17fd96 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Wed, 15 Mar 2023 10:12:58 +0530 Subject: [PATCH 037/146] msm: adsprpc: Print map and buf flags in debugfs Current map and buf flags are not being printed in the debugfs data of process. Print map and buf flags. Change-Id: I621e7ca08de45f189d5b49046b3c37cfef968d54 Acked-by: DEEPAK SANNAPAREDDY Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 97b7826792..0ac4d92d85 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5785,7 +5785,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, "\n=======%s %s %s======\n", title, " LIST OF MAPS ", title); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%-20s|%-20s|%-20s\n", "va", "phys", "size"); + "%-20s|%-20s|%-20s|%-20s\n", "va", "phys", "size", "flags"); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s%s%s%s%s\n", single_line, single_line, single_line, @@ -5793,9 +5793,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, mutex_lock(&fl->map_mutex); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "0x%-20lX|0x%-20llX|0x%-20zu\n\n", + "0x%-20lX|0x%-20llX|0x%-20zu|0x%-17llX\n\n", map->va, map->phys, - map->size); + map->size, map->flags); } mutex_unlock(&fl->map_mutex); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, @@ -5831,16 +5831,16 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, " LIST OF BUFS ", title); spin_lock(&fl->hlock); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "%-19s|%-19s|%-19s\n", - "virt", "phys", "size"); + "%-19s|%-19s|%-19s|%-19s\n", + "virt", "phys", "size", "flags"); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s%s%s%s%s\n", single_line, single_line, single_line, single_line, single_line); hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) { len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, - "0x%-17p|0x%-17llX|%-19zu\n", - buf->virt, (uint64_t)buf->phys, buf->size); + "0x%-17p|0x%-17llX|%-19zu|0x%-17llX\n", + buf->virt, (uint64_t)buf->phys, buf->size, buf->flags); } len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, From e92ce41f5ac21a8304556c7d669f225e1ec7cc2f Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Fri, 31 Mar 2023 16:27:40 +0530 Subject: [PATCH 038/146] msm: adsprpc: adding proper null check. after allocation of a buffer, verify if null is returned. if null check is success then bailout. Change-Id: Idbc94c6cf109d34340b55b25f8df74afd5975d36 Signed-off-by: Vamsi Krishna Gattupalli Acked-by: ANANDU E --- dsp/adsprpc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) mode change 100644 => 100755 dsp/adsprpc.c diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c old mode 100644 new mode 100755 index 0ac4d92d85..edbf0e55f2 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4038,6 +4038,8 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, VERIFY(err, NULL != (dst_perms = kcalloc(rhvm->vmcount, sizeof(struct qcom_scm_vmperm), GFP_KERNEL))); + if (err) + goto bail; for (i = 0; i < rhvm->vmcount; i++) { dst_perms[i].vmid = rhvm->vmid[i]; dst_perms[i].perm = rhvm->vmperm[i]; @@ -4676,7 +4678,8 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, VERIFY(err, NULL != (dst_perms = kcalloc(rhvm->vmcount, sizeof(struct qcom_scm_vmperm), GFP_KERNEL))); - + if (err) + goto bail; for (i = 0; i < rhvm->vmcount; i++) { dst_perms[i].vmid = rhvm->vmid[i]; dst_perms[i].perm = rhvm->vmperm[i]; From 4282472d10eaa847eaae58980427314524f848e4 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Tue, 21 Mar 2023 16:15:43 -0700 Subject: [PATCH 039/146] Enable QMAA gating for fastrpc kernel driver Added compilation gating with QMAA override flags for frpc-adsprpc and cdsp-loader drivers. Change-Id: I4fdbc66f065d886593af5b40432526e7d43b08ba Signed-off-by: Anirudh Raghavendra --- Android.mk | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Android.mk b/Android.mk index 6b9a3fe586..38483fc8fe 100644 --- a/Android.mk +++ b/Android.mk @@ -1,3 +1,12 @@ +FASTRPC_DLKM_ENABLED := true + +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_FASTRPC_OVERRIDE), false) + FASTRPC_DLKM_ENABLED := false + endif +endif + +ifeq ($(FASTRPC_DLKM_ENABLED), true) DLKM_DIR := device/qcom/common/dlkm LOCAL_PATH := $(call my-dir) @@ -39,3 +48,4 @@ $(info KBUILD_OPTIONS = $(KBUILD_OPTIONS)) $(info intermediates dsp symvers path = $(call intermediates-dir-for,DLKM,dsp-module-symvers)) $(info DLKM_DIR = $(DLKM_DIR)) +endif \ No newline at end of file From c0787f28468755ee714eb9c30e0295c3be79b5a3 Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Wed, 5 Apr 2023 15:08:22 +0530 Subject: [PATCH 040/146] Synchronize exit process and bus driver invoke In current code fastrpc_file_free and bus driver invoke call are in parallel, we would see corruption as both try to access fastrpc_mmap list. To resolve this issue added signaling mechanism between fastrpc_file_free and bus driver invoke call. If bus driver invoke call is running in parallel with fastrpc_file_free, it would wait until the invoke call is completed. Also added locks in bus driver API to protect fastrpc_mmap's. To improve code readablity created separate API for bus driver map and unmap. Change-Id: I5fd6f331febdecb319b168b36590a73e4532038a Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 216 +++++++++++++++++++++++++++---------------- dsp/adsprpc_shared.h | 4 + 2 files changed, 140 insertions(+), 80 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index edbf0e55f2..528e35bc9c 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5559,9 +5559,10 @@ static int fastrpc_file_free(struct fastrpc_file *fl) spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; - if (!fl->is_ramdump_pend) { + if (fl->is_dma_invoke_pend) + wait_for_completion(&fl->dma_invoke); + if (!fl->is_ramdump_pend) goto skip_dump_wait; - } is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); wait_for_completion(&fl->work); @@ -5573,6 +5574,7 @@ skip_dump_wait: } hlist_del_init(&fl->hn); fl->is_ramdump_pend = false; + fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -5602,6 +5604,7 @@ skip_dump_wait: kfree(fl->hdr_bufs); if (!IS_ERR_OR_NULL(fl->pers_hdr_buf)) fastrpc_buf_free(fl->pers_hdr_buf, 0); + mutex_lock(&fl->internal_map_mutex); mutex_lock(&fl->map_mutex); do { lmap = NULL; @@ -5613,6 +5616,7 @@ skip_dump_wait: fastrpc_mmap_free(lmap, 1); } while (lmap); mutex_unlock(&fl->map_mutex); + mutex_unlock(&fl->internal_map_mutex); if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); @@ -6032,6 +6036,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; init_completion(&fl->work); + init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; filp->private_data = fl; mutex_init(&fl->internal_map_mutex); @@ -8084,8 +8089,7 @@ union fastrpc_dev_param { struct fastrpc_dev_unmap_dma *unmap; }; -long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, - unsigned long invoke_param) +long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) { int err = 0; union fastrpc_dev_param p; @@ -8095,86 +8099,138 @@ long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, uintptr_t raddr = 0; unsigned long irq_flags = 0; + p.map = (struct fastrpc_dev_map_dma *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + fl = dev->fl; + /* Verify if fastrpc file is not NULL*/ + if (!fl) { + err = -EBADF; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->internal_map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc file is being closed, holding device lock*/ + if (fl->file_close) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + goto bail; + } + fl->is_dma_invoke_pend = true; + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->map_mutex); + /* Map DMA buffer on SMMU device*/ + err = fastrpc_mmap_create(fl, -1, p.map->buf, + p.map->attrs, 0, p.map->size, + ADSP_MMAP_DMA_BUFFER, &map); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + /* Map DMA buffer on DSP*/ + VERIFY(err, 0 == (err = fastrpc_mmap_on_dsp(fl, + map->flags, 0, map->phys, map->size, map->refs, &raddr))); + if (err) + goto bail; + map->raddr = raddr; + p.map->v_dsp_addr = raddr; +bail: + if (err && map) { + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); + } + if (fl) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->file_close && fl->is_dma_invoke_pend) + complete(&fl->dma_invoke); + fl->is_dma_invoke_pend = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +long fastrpc_dev_unmap_dma(struct fastrpc_device *dev, unsigned long invoke_param) +{ + int err = 0; + union fastrpc_dev_param p; + struct fastrpc_file *fl = NULL; + struct fastrpc_mmap *map = NULL; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + p.unmap = (struct fastrpc_dev_unmap_dma *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + fl = dev->fl; + /* Verify if fastrpc file is not NULL*/ + if (!fl) { + err = -EBADF; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->internal_map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc file is being closed, holding device lock*/ + if (fl->file_close) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + goto bail; + } + fl->is_dma_invoke_pend = true; + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->map_mutex); + if (!fastrpc_mmap_find(fl, -1, p.unmap->buf, 0, 0, ADSP_MMAP_DMA_BUFFER, 0, &map)) { + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + /* Un-map DMA buffer on DSP*/ + VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, + map->phys, map->size, map->flags))); + if (err) + goto bail; + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(map, 0); + } + mutex_unlock(&fl->map_mutex); +bail: + if (fl) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->file_close && fl->is_dma_invoke_pend) + complete(&fl->dma_invoke); + fl->is_dma_invoke_pend = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } + mutex_unlock(&fl->internal_map_mutex); + return err; +} + +long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, + unsigned long invoke_param) +{ + int err = 0; + switch (invoke_num) { case FASTRPC_DEV_MAP_DMA: - p.map = (struct fastrpc_dev_map_dma *)invoke_param; - spin_lock_irqsave(&me->hlock, irq_flags); - /* Verify if fastrpc device is closed*/ - VERIFY(err, dev && !dev->dev_close); - if (err) { - err = -ESRCH; - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - fl = dev->fl; - spin_lock(&fl->hlock); - /* Verify if fastrpc file is being closed, holding device lock*/ - if (fl->file_close) { - err = -ESRCH; - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - mutex_lock(&fl->internal_map_mutex); - mutex_lock(&fl->map_mutex); - /* Map DMA buffer on SMMU device*/ - err = fastrpc_mmap_create(fl, -1, p.map->buf, - p.map->attrs, 0, p.map->size, - ADSP_MMAP_DMA_BUFFER, &map); - mutex_unlock(&fl->map_mutex); - if (err) { - mutex_unlock(&fl->internal_map_mutex); - break; - } - /* Map DMA buffer on DSP*/ - VERIFY(err, 0 == (err = fastrpc_mmap_on_dsp(fl, - map->flags, 0, map->phys, map->size, map->refs, &raddr))); - if (err) { - mutex_unlock(&fl->internal_map_mutex); - break; - } - map->raddr = raddr; - mutex_unlock(&fl->internal_map_mutex); - p.map->v_dsp_addr = raddr; + err = fastrpc_dev_map_dma(dev, invoke_param); break; case FASTRPC_DEV_UNMAP_DMA: - p.unmap = (struct fastrpc_dev_unmap_dma *)invoke_param; - spin_lock_irqsave(&me->hlock, irq_flags); - /* Verify if fastrpc device is closed*/ - VERIFY(err, dev && !dev->dev_close); - if (err) { - err = -ESRCH; - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - fl = dev->fl; - spin_lock(&fl->hlock); - /* Verify if fastrpc file is being closed, holding device lock*/ - if (fl->file_close) { - err = -ESRCH; - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - break; - } - spin_unlock(&fl->hlock); - spin_unlock_irqrestore(&me->hlock, irq_flags); - mutex_lock(&fl->internal_map_mutex); - mutex_lock(&fl->map_mutex); - if (!fastrpc_mmap_find(fl, -1, p.unmap->buf, 0, 0, ADSP_MMAP_DMA_BUFFER, 0, &map)) { - /* Un-map DMA buffer on DSP*/ - mutex_unlock(&fl->map_mutex); - VERIFY(err, !(err = fastrpc_munmap_on_dsp(fl, map->raddr, - map->phys, map->size, map->flags))); - if (err) { - mutex_unlock(&fl->internal_map_mutex); - break; - } - fastrpc_mmap_free(map, 0); - } - mutex_unlock(&fl->map_mutex); - mutex_unlock(&fl->internal_map_mutex); + err = fastrpc_dev_unmap_dma(dev, invoke_param); break; default: err = -ENOTTY; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 445eb82c3d..9664ebc158 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -1091,6 +1091,10 @@ struct fastrpc_file { struct completion work; /* Flag to indicate ram dump collection status*/ bool is_ramdump_pend; + /* Process kill will wait on bus driver invoke thread to complete its process */ + struct completion dma_invoke; + /* Flag to indicate invoke pending */ + bool is_dma_invoke_pend; /* Flag to indicate type of process (static, dynamic) */ uint32_t proc_flags; /* If set, threads will poll for DSP response instead of glink wait */ From 1c1554521172da441257a0cc368cc7c785d34f54 Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Thu, 6 Apr 2023 11:52:50 -0700 Subject: [PATCH 041/146] msm: adsprpc: stub out buffer ownership for TVM In TVM buffers might be lend from PVM and ownership might be shared between TVM and PVM. This might lead to some issues in TVM. Change-Id: I9c2ea67c1c4664512881a301cf57ab5e204f6a65 Signed-off-by: Edgar Flores --- dsp/adsprpc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 528e35bc9c..a1fe2804e2 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1196,7 +1196,10 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * err = -EBADFD; goto bail; } + +#if !IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; +#endif map->va = 0; map->phys = 0; @@ -1261,7 +1264,10 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * goto bail; } } + +#if !IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; +#endif if (map->secure) { if (!fl->secsctx) err = fastrpc_session_alloc(chan, 1, me->share_securecb, From 5f713c22a4416fe5b56dbe518c15c648472fbb34 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Thu, 23 Mar 2023 17:13:10 -0700 Subject: [PATCH 042/146] msm: adsprpc: Fix race in async context response Currently async job is first added to pending context list and later job is send to remote sub system. After the job is added to pending context list, if any SSR happens, all the async pending job contexts are responded and freed in async response thread. Original thread that added job to pending context list might not have sent the job, as there is SSR and can free the context again in same thread. Queue response in SSR only when the job is sent to remote sub system. Signed-off-by: Himateja Reddy Change-Id: I1f880316f327a8345433d5d22b619ef0a50d7240 --- dsp/adsprpc.c | 10 ++++++++-- dsp/adsprpc_shared.h | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index a1fe2804e2..2ee8066a66 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1828,6 +1828,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, ctx->is_work_done = false; ctx->copybuf = NULL; ctx->is_early_wakeup = false; + ctx->is_job_sent_to_remote_ss = false; if (ctx->fl->profile) { ctx->perf = kzalloc(sizeof(*(ctx->perf)), GFP_KERNEL); @@ -2084,7 +2085,7 @@ static void fastrpc_notify_users(struct fastrpc_file *me) trace_fastrpc_context_complete(me->cid, (uint64_t)ictx, ictx->retval, ictx->msg.invoke.header.ctx, ictx->handle, ictx->sc); - if (ictx->asyncjob.isasyncjob) + if (ictx->asyncjob.isasyncjob && ictx->is_job_sent_to_remote_ss) fastrpc_queue_completed_async_job(ictx); else complete(&ictx->work); @@ -2114,7 +2115,7 @@ static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me) trace_fastrpc_context_complete(me->cid, (uint64_t)ictx, ictx->retval, ictx->msg.invoke.header.ctx, ictx->handle, ictx->sc); - if (ictx->asyncjob.isasyncjob) + if (ictx->asyncjob.isasyncjob && ictx->is_job_sent_to_remote_ss) fastrpc_queue_completed_async_job(ictx); else complete(&ictx->work); @@ -2868,6 +2869,11 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, msg = &msg_temp; } err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->tvm_remote_domain); + if (isasync && !err) { + spin_lock(&fl->hlock); + ctx->is_job_sent_to_remote_ss = true; + spin_unlock(&fl->hlock); + } trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx, handle, sc, msg->invoke.page.addr, msg->invoke.page.size); ns = get_timestamp_in_ns(); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 9664ebc158..d8c0e46bb6 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -836,6 +836,7 @@ struct smq_invoke_ctx { uint64_t xo_time_in_us_interrupted; /* XO Timestamp (in us) of interrupted ctx */ uint64_t xo_time_in_us_restored; /* XO Timestamp (in us) of restored ctx */ int tx_index; /* index of current ctx in channel gmsg_log array */ + bool is_job_sent_to_remote_ss; /* Flag to check if job is sent to remote sub system */ }; struct fastrpc_ctx_lst { From 27d9053c28113a2660ec72df1f5b5b13aae5caf7 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Mon, 13 Mar 2023 16:20:37 +0530 Subject: [PATCH 043/146] msm: adsprpc: To avoid null pointer dereference To log error for fastrpc_mmap_remove_ssr and compat_fastrpc_get_dsp_info for avoiding null pointer dereferences leading to kw issues. Change-Id: I515485d891331e0740722a0de1291353db645b66 Acked-by: Ansa Ahmed Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 2ee8066a66..5dc4d81d3a 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4031,19 +4031,22 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, err = fastrpc_mmap_create(fl, -1, NULL, 0, init->mem, init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem); mutex_unlock(&fl->map_mutex); - if (err) + if (err || (!mem)) goto bail; spin_lock_irqsave(&me->hlock, irq_flags); mem->in_use = true; spin_unlock_irqrestore(&me->hlock, irq_flags); } + VERIFY(err, mem); + if (err) + goto bail; phys = mem->phys; size = mem->size; /* * If remote-heap VMIDs are defined in DTSI, then do * hyp_assign from HLOS to those VMs (LPASS, ADSP). */ - if (rhvm->vmid && mem && mem->refs == 1 && size) { + if (rhvm->vmid && mem->refs == 1 && size) { u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); struct qcom_scm_vmperm *dst_perms; uint32_t i = 0; From a2f9f978b19dd8f19ec3d61d4f9c00e497c8d70c Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Sat, 8 Apr 2023 06:58:45 +0530 Subject: [PATCH 044/146] msm: adsprpc: Handle UAF in fastrpc_buf_free Thread T1 add buffer to fl->cached_bufs and release fl->hlock and holding buffer reference. Now thread T2 will aquire fl->hlock and free buffer in fastrpc_cached_buf_list_free(). T1 will dereference the freed buffer. Moving reference buffer uses for T1 inside fl->hlock to avoid UAF. Change-Id: I5f08d5497099133f87d55f5879cfe50c2ba23ae6 Signed-off-by: Santosh Sakore --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100755 => 100644 dsp/adsprpc.c diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c old mode 100755 new mode 100644 index 5dc4d81d3a..65f0df3e56 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -714,8 +714,8 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache) } hlist_add_head(&buf->hn, &fl->cached_bufs); fl->num_cached_buf++; - spin_unlock(&fl->hlock); buf->type = -1; + spin_unlock(&fl->hlock); return; } skip_buf_cache: From cb7a1b4f73c6d631f463ffd6f82595ed8691103f Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Tue, 18 Apr 2023 17:22:07 -0700 Subject: [PATCH 045/146] msm: adsprpc: Use memcpy for trace events in LE LE static analysis KW is giving error from fastrpc traces because traces are using __assign_str() which translates to strcpy() and this is a deprecated API. Kernel team suggested to use memcpy instead of __assign_str(). Change-Id: Idf92446a26d8b6f472963e9215f738df3f6fcdef --- dsp/fastrpc_trace.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dsp/fastrpc_trace.h b/dsp/fastrpc_trace.h index e812d2a906..67fba9019d 100644 --- a/dsp/fastrpc_trace.h +++ b/dsp/fastrpc_trace.h @@ -381,7 +381,12 @@ TRACE_EVENT(fastrpc_msg, TP_ARGS(message), TP_STRUCT__entry(__string(buf, message)), TP_fast_assign( +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + memcpy(__get_str(buf), (message), (sizeof(message) - 1)); + __get_str(buf)[sizeof(message) - 1] = '\0'; +#else __assign_str(buf, message); +#endif ), TP_printk(" %s", __get_str(buf)) ); From 264671ac7919fede57fa7f992e1714799503be65 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Tue, 14 Mar 2023 11:10:16 +0530 Subject: [PATCH 046/146] msm: adsprpc: Share initial debug config to DSP This change enables sharing of a new page to DSP. New page will contain inital debug parameters which we need to pass to the DSP during the process initiation. Change-Id: I8ae12cb364811a97eca3f15e70106b36bcec3f54 Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 51 +++++++++++++++++++++++++++++++++++++++++--- dsp/adsprpc_shared.h | 16 ++++++++++++++ 2 files changed, 64 insertions(+), 3 deletions(-) mode change 100644 => 100755 dsp/adsprpc.c mode change 100644 => 100755 dsp/adsprpc_shared.h diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c old mode 100644 new mode 100755 index 65f0df3e56..f2a138574b --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -214,6 +214,12 @@ #define FASTRPC_USER_PD_FORCE_KILL 2 +/* +* No. of pages shared with DSP during process init +* First page for init-mem and second page for proc-attrs +*/ +#define PAGESLEN_WITH_SHAREDBUF 2 + /* Unique index flag used for mini dump */ static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; @@ -3542,6 +3548,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, struct fastrpc_ioctl_async_response async_res; uint32_t user_concurrency; struct fastrpc_ioctl_notif_rsp notif; + struct fastrpc_proc_sharedbuf_info buff_info; } p; struct fastrpc_dsp_capabilities *dsp_cap_ptr = NULL; uint32_t size = 0; @@ -3562,6 +3569,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, goto bail; } } + switch (inv2->req) { case FASTRPC_INVOKE2_ASYNC: size = sizeof(struct fastrpc_ioctl_invoke_async); @@ -3620,6 +3628,21 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, err = fastrpc_get_notif_response(&p.notif, (void *)inv2->invparam, fl); break; + case FASTRPC_INVOKE2_PROC_SHAREDBUF_INFO: + VERIFY(err, + sizeof(struct fastrpc_proc_sharedbuf_info) >= inv2->size); + if (err) { + err = -EBADE; + goto bail; + } + K_COPY_FROM_USER(err, fl->is_compat, &p.buff_info, + (void *)inv2->invparam, inv2->size); + if (err) + goto bail; + + fl->sharedbuf_info.buf_fd = p.buff_info.buf_fd; + fl->sharedbuf_info.buf_size = p.buff_info.buf_size; + break; default: err = -ENOTTY; break; @@ -3720,7 +3743,8 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, int err = 0, memlen = 0, mflags = 0, locked = 0; struct fastrpc_ioctl_invoke_async ioctl; struct fastrpc_ioctl_init *init = &uproc->init; - struct smq_phy_page pages[1]; + /* First page for init-mem and second page for proc-attrs */ + struct smq_phy_page pages[PAGESLEN_WITH_SHAREDBUF]; struct fastrpc_mmap *file = NULL; struct fastrpc_buf *imem = NULL; unsigned long imem_dma_attr = 0; @@ -3729,6 +3753,7 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, unsigned int gid = 0, one_mb = 1024*1024; unsigned int dsp_userpd_memlen = 0; struct fastrpc_buf *init_mem; + struct fastrpc_mmap *sharedbuf_map = NULL; struct { int pgid; @@ -3862,11 +3887,24 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, goto bail; fl->init_mem = imem; + inbuf.pageslen = 1; + if ((fl->sharedbuf_info.buf_fd != -1) && fl->sharedbuf_info.buf_size) { + mutex_lock(&fl->map_mutex); + err = fastrpc_mmap_create(fl, fl->sharedbuf_info.buf_fd, NULL, 0, + 0, fl->sharedbuf_info.buf_size, mflags, &sharedbuf_map); + mutex_unlock(&fl->map_mutex); + if (err) + goto bail; + + /* if shared buff is available send this as the second page and set pageslen as 2 */ + inbuf.pageslen = PAGESLEN_WITH_SHAREDBUF; + } + /* * Prepare remote arguments for dynamic process create * call to remote subsystem. */ - inbuf.pageslen = 1; + ra[0].buf.pv = (void *)&inbuf; ra[0].buf.len = sizeof(inbuf); fds[0] = -1; @@ -3881,8 +3919,14 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, pages[0].addr = imem->phys; pages[0].size = imem->size; + + /* Update IOVA of second page shared with DSP */ + if (inbuf.pageslen > 1) { + pages[1].addr = sharedbuf_map->phys; + pages[1].size = sharedbuf_map->size; + } ra[3].buf.pv = (void *)pages; - ra[3].buf.len = 1 * sizeof(*pages); + ra[3].buf.len = (inbuf.pageslen) * sizeof(*pages); fds[3] = -1; inbuf.attrs = uproc->attrs; @@ -6054,6 +6098,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; filp->private_data = fl; + fl->sharedbuf_info.buf_fd = -1; mutex_init(&fl->internal_map_mutex); mutex_init(&fl->map_mutex); spin_lock_irqsave(&me->hlock, irq_flags); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h old mode 100644 new mode 100755 index d8c0e46bb6..e14a36956f --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -202,6 +202,15 @@ struct remote_buf { size_t len; /* length of buffer */ }; +/* structure to hold fd and size of buffer shared with DSP, +* which contains inital debug parameters that needs to be passed +* during process initialization. +*/ +struct fastrpc_proc_sharedbuf_info { + int buf_fd; + int buf_size; +}; + struct remote_dma_handle { int fd; uint32_t offset; @@ -299,6 +308,7 @@ enum fastrpc_invoke2_type { FASTRPC_INVOKE2_ASYNC_RESPONSE = 2, FASTRPC_INVOKE2_KERNEL_OPTIMIZATIONS, FASTRPC_INVOKE2_STATUS_NOTIF, + FASTRPC_INVOKE2_PROC_SHAREDBUF_INFO, }; struct fastrpc_ioctl_invoke2 { @@ -1116,6 +1126,12 @@ struct fastrpc_file { bool exit_notif; /* Flag to indicate async thread exit requested*/ bool exit_async; + /* + * structure to hold fd and size of buffer shared with DSP, + * which contains initial debug configurations and other initial + * config paramters. + */ + struct fastrpc_proc_sharedbuf_info sharedbuf_info; }; union fastrpc_ioctl_param { From ccb5fba99d7553d1aa78f04a8c0b46d1d5b5347f Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Wed, 26 Apr 2023 16:23:30 +0530 Subject: [PATCH 047/146] msm:adsprpc:add error log in fastrpc init create dynamic process When DSP process successfully spawned on the ADSP , the APPS side init memory is getting unmapped due to an error in driver, by the time error printing in user space logs the SMMU fault is happening in ADSP. So add the error log in fastrpc_init_create_dynamic_process. Acked-by: Ramesh Nallagopu Change-Id: I6df8000e9e34fa0916947528a52793b164ab3acb Signed-off-by: Santosh Sakore --- dsp/adsprpc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index f2a138574b..52059df8fc 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3973,6 +3973,7 @@ bail: spin_lock(&fl->hlock); locked = 1; if (err) { + ADSPRPC_ERR("failed with err %d\n", err); fl->dsp_process_state = PROCESS_CREATE_DEFAULT; if (!IS_ERR_OR_NULL(fl->init_mem)) { init_mem = fl->init_mem; From d0ad5cc2d562546434d0275d9b089a784f0e3037 Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Thu, 27 Apr 2023 17:45:13 +0530 Subject: [PATCH 048/146] msm: adsprpc: fix remote argument array size The remote argument array size is more than the utilized. Add fix to have proper array size to accommodate the arguments for remote invocation. Change-Id: Id0b290eebae850765f24e22918166d7e9d8827c4 Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 52059df8fc..955721b306 100755 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4308,7 +4308,7 @@ static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl) uint64_t cpuinfo = 0; struct fastrpc_apps *me = &gfa; struct fastrpc_ioctl_invoke_async ioctl; - remote_arg_t ra[2]; + remote_arg_t ra[1]; int cid = -1; if (!fl) { From 2c1d2338798284d89b8b0aa53bca2fe19e8375c3 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Wed, 3 May 2023 11:44:06 -0700 Subject: [PATCH 049/146] msm: adsprpc: FastRPC driver interface to get HLOS PID Currently there is no interface request to get HLOS PID of the device attached to FastRPC bus driver. Add new request FASTRPC_DEV_GET_HLOS_PID, to get HLOS PID of the attached device. Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 33 +++++++++++++++++++++++++++++++++ include/linux/fastrpc.h | 9 +++++++++ 2 files changed, 42 insertions(+) mode change 100755 => 100644 dsp/adsprpc.c diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c old mode 100755 new mode 100644 index 955721b306..c5d8acfcea --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -8148,6 +8148,7 @@ static struct platform_driver fastrpc_driver = { union fastrpc_dev_param { struct fastrpc_dev_map_dma *map; struct fastrpc_dev_unmap_dma *unmap; + struct fastrpc_dev_get_hlos_pid *hpid; }; long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) @@ -8281,6 +8282,35 @@ bail: return err; } +long fastrpc_dev_get_hlos_pid(struct fastrpc_device *dev, unsigned long invoke_param) +{ + int err = 0; + union fastrpc_dev_param p; + struct fastrpc_file *fl = NULL; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + p.hpid = (struct fastrpc_dev_get_hlos_pid *)invoke_param; + spin_lock_irqsave(&me->hlock, irq_flags); + /* Verify if fastrpc device is closed*/ + VERIFY(err, dev && !dev->dev_close); + if (err) { + err = -ESRCH; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + fl = dev->fl; + /* Verify if fastrpc file is not NULL*/ + if (!fl) { + err = -EBADF; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; + } + p.hpid->hlos_pid = fl->tgid; + spin_unlock_irqrestore(&me->hlock, irq_flags); + return err; +} + long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, unsigned long invoke_param) { @@ -8293,6 +8323,9 @@ long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, case FASTRPC_DEV_UNMAP_DMA: err = fastrpc_dev_unmap_dma(dev, invoke_param); break; + case FASTRPC_DEV_GET_HLOS_PID: + err = fastrpc_dev_get_hlos_pid(dev, invoke_param); + break; default: err = -ENOTTY; break; diff --git a/include/linux/fastrpc.h b/include/linux/fastrpc.h index b828d9d8a7..17a989cf7b 100644 --- a/include/linux/fastrpc.h +++ b/include/linux/fastrpc.h @@ -18,6 +18,7 @@ enum fastrpc_driver_status { enum fastrpc_driver_invoke_nums { FASTRPC_DEV_MAP_DMA = 1, FASTRPC_DEV_UNMAP_DMA, + FASTRPC_DEV_GET_HLOS_PID, }; /** @@ -44,6 +45,14 @@ struct fastrpc_dev_unmap_dma { size_t size; }; +/** + * struct fastrpc_dev_get_hlos_pid - fastrpc dma buffer unmap structure + * @hlos_pid : HLOS PID of attached device + */ +struct fastrpc_dev_get_hlos_pid { + int hlos_pid; +}; + /** * fastrpc_device - device that belong to the fastrpc bus * @hn: Head node to add to fastrpc device list From 21968b8049081a28b539809acf215a51166015da Mon Sep 17 00:00:00 2001 From: Vamsi Krishna Gattupalli Date: Wed, 26 Apr 2023 12:33:47 +0530 Subject: [PATCH 050/146] msm: adsprpc: enable ramdump collection for CMA persistent buffer Ramdump collection is missing for peristent mappings. Added change to enable ramdump collection for CMA persistent buffer Change-Id: Ic8484c9d9f2814610de78fbafba9cdc65a75d862 Acked-by: DEEPAK SANNAPAREDDY Signed-off-by: Vamsi Krishna Gattupalli --- dsp/adsprpc.c | 155 +++++++++++++++++++++++++++----------------------- 1 file changed, 84 insertions(+), 71 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index c5d8acfcea..414c505ca7 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4884,7 +4884,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) { struct fastrpc_mmap *match = NULL, *map = NULL; struct hlist_node *n = NULL; - int err = 0, ret = 0; + int err = 0, ret = 0, lock = 0; struct fastrpc_apps *me = &gfa; struct qcom_dump_segment ramdump_segments_rh; struct list_head head; @@ -4898,75 +4898,82 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) goto bail; } } - do { - match = NULL; - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_for_each_entry_safe(map, n, &me->maps, hn) { - /* In hibernation suspend case fl is NULL, check !fl to cleanup */ - if (!fl || (fl && map->servloc_name && fl->servloc_name - && !strcmp(map->servloc_name, fl->servloc_name))) { - match = map; - if (map->is_persistent && map->in_use) { - struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; - uint64_t phys = map->phys; - size_t size = map->size; - - spin_unlock_irqrestore(&me->hlock, irq_flags); - //scm assign it back to HLOS - if (rhvm->vmid) { - u64 src_perms = 0; - struct qcom_scm_vmperm dst_perms = {0}; - uint32_t i = 0; - - for (i = 0; i < rhvm->vmcount; i++) { - src_perms |= BIT(rhvm->vmid[i]); - } - - dst_perms.vmid = QCOM_SCM_VMID_HLOS; - dst_perms.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(phys, (uint64_t)size, - &src_perms, &dst_perms, 1); - } - if (err) { - ADSPRPC_ERR( - "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", - err, phys, size); - err = -EADDRNOTAVAIL; - return err; - } - spin_lock_irqsave(&me->hlock, irq_flags); - map->in_use = false; - /* - * decrementing refcount for persistent mappings - * as incrementing it in fastrpc_get_persistent_map - */ - map->refs--; - } - if (map->is_persistent) { - match = NULL; - continue; - } - hlist_del_init(&map->hn); - break; - } + spin_lock_irqsave(&me->hlock, irq_flags); + lock = 1; + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + if (!lock) { + spin_lock_irqsave(&me->hlock, irq_flags); + lock = 1; + } + /* In hibernation suspend case fl is NULL, check !fl to cleanup */ + if (!fl || (fl && map->servloc_name && fl->servloc_name + && !strcmp(map->servloc_name, fl->servloc_name))) { + match = map; + if (map->is_persistent && map->in_use) { + struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; + uint64_t phys = map->phys; + size_t size = map->size; + + if (lock) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + lock = 0; + } + //scm assign it back to HLOS + if (rhvm->vmid) { + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms = {0}; + uint32_t i = 0; + + for (i = 0; i < rhvm->vmcount; i++) { + src_perms |= BIT(rhvm->vmid[i]); + } + + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; + err = qcom_scm_assign_mem(phys, (uint64_t)size, + &src_perms, &dst_perms, 1); + } + if (err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + goto bail; + } + if (!lock) { + spin_lock_irqsave(&me->hlock, irq_flags); + lock = 1; + } + map->in_use = false; + /* + * decrementing refcount for persistent mappings + * as incrementing it in fastrpc_get_persistent_map + */ + map->refs--; + } + if (!match->is_persistent) + hlist_del_init(&map->hn); + } + if (lock) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + lock = 0; } - spin_unlock_irqrestore(&me->hlock, irq_flags); if (match) { - if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - err = fastrpc_munmap_rh(match->phys, - match->size, match->flags); - } else if (match->flags == ADSP_MMAP_HEAP_ADDR) { - if (fl) - err = fastrpc_munmap_on_dsp_rh(fl, match->phys, - match->size, match->flags, 0); - else { - pr_err("Cannot communicate with DSP, ADSP is down\n"); - fastrpc_mmap_add(match); + if (!match->is_persistent) { + if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + err = fastrpc_munmap_rh(match->phys, + match->size, match->flags); + } else if (match->flags == ADSP_MMAP_HEAP_ADDR) { + if (fl) + err = fastrpc_munmap_on_dsp_rh(fl, match->phys, + match->size, match->flags, 0); + else { + pr_err("Cannot communicate with DSP, ADSP is down\n"); + fastrpc_mmap_add(match); + } } } - if (err) - goto bail; memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); ramdump_segments_rh.da = match->phys; ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); @@ -4979,14 +4986,20 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) pr_err("adsprpc: %s: unable to dump heap (err %d)\n", __func__, ret); } - if (!locked) - mutex_lock(&fl->map_mutex); - fastrpc_mmap_free(match, 0); - if (!locked) - mutex_unlock(&fl->map_mutex); + if (!match->is_persistent) { + if (!locked) + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(match, 0); + if (!locked) + mutex_unlock(&fl->map_mutex); + } } - } while (match); + } bail: + if (lock) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + lock = 0; + } if (err && match) { if (!locked) mutex_lock(&fl->map_mutex); From 6ab8ea49ffd42eb5251562b127cb3c10b1b9495e Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Mon, 10 Apr 2023 17:06:50 -0700 Subject: [PATCH 051/146] Fix issue in allocating multiple sessions for ADSP Multiple sessions were being allocated from ADSP using the shared context bank. Remove second for loop which was not checking for the sharedcb variable. Change-Id: Ie5831eb9454b909dfea62cffbdaf66d94b200b3b Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 414c505ca7..02ba2a2bb3 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5389,15 +5389,6 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan, break; } } - if (idx >= chan->sesscount) { - for (idx = 0; idx < chan->sesscount; ++idx) { - if (!chan->session[idx].used && - chan->session[idx].smmu.secure == secure) { - chan->session[idx].used = 1; - break; - } - } - } if (idx >= chan->sesscount) { err = -EUSERS; goto bail; From 4cb568d62baa1dd22114b6df86bd36a8b7bf7fd6 Mon Sep 17 00:00:00 2001 From: Anthony Adamo Date: Mon, 1 May 2023 11:38:08 -0700 Subject: [PATCH 052/146] msm: Add QMAA flags to guard compilation Change-Id: Ia822ad15eff73e0fde6a382e718f80f6f418dbae --- dsp_kernel_board.mk | 2 ++ dsp_kernel_product.mk | 2 ++ 2 files changed, 4 insertions(+) diff --git a/dsp_kernel_board.mk b/dsp_kernel_board.mk index 6c0a48bf85..12a10f0e40 100644 --- a/dsp_kernel_board.mk +++ b/dsp_kernel_board.mk @@ -1,5 +1,7 @@ +ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-adsprpc.ko #BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-trusted-adsprpc.ko BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko endif +endif \ No newline at end of file diff --git a/dsp_kernel_product.mk b/dsp_kernel_product.mk index b7c2644e47..06a8832440 100644 --- a/dsp_kernel_product.mk +++ b/dsp_kernel_product.mk @@ -1,3 +1,5 @@ +ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) PRODUCT_PACKAGES += frpc-adsprpc.ko #PRODUCT_PACKAGES += frpc_trusted-adsprpc.ko PRODUCT_PACKAGES += cdsp-loader.ko +endif \ No newline at end of file From 4c5701814d174aef9dfb0c64912c08d1bf10ab54 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Mon, 15 May 2023 08:53:34 -0700 Subject: [PATCH 053/146] msm: adsprpc: Allocate designated context bank session Currently SMMU context banks are chosen dynamically based on available context bank. Few use cases requires context banks to be fixed to retain SMMU mappings even after process exits and resumes again. Few other use cases requires to use multiple context banks of similar remote subsystem process types. Allocate designated context bank session with process type matching with remote subsystem process type. Change-Id: Ie8ccad2fde4e2e21aaf8c6ede0ab31645cdf350c Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 111 ++++++++++++++++++++++++++++++++++++++----- dsp/adsprpc_shared.h | 29 +++++++++++ 2 files changed, 129 insertions(+), 11 deletions(-) mode change 100755 => 100644 dsp/adsprpc_shared.h diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 02ba2a2bb3..4e506bdcff 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1083,8 +1083,9 @@ bail: kfree(map); } -static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure, - int sharedcb, struct fastrpc_session_ctx **session); +static int fastrpc_session_alloc_secure_memory( + struct fastrpc_channel_ctx *chan, int secure, + int sharedcb, int pd_type, struct fastrpc_session_ctx **session); static inline bool fastrpc_get_persistent_map(size_t len, struct fastrpc_mmap **pers_map) { @@ -1276,11 +1277,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * #endif if (map->secure) { if (!fl->secsctx) - err = fastrpc_session_alloc(chan, 1, me->share_securecb, - &fl->secsctx); + err = fastrpc_session_alloc_secure_memory(chan, 1, + me->share_securecb, fl->pd_type, &fl->secsctx); if (err) { ADSPRPC_ERR( - "fastrpc_session_alloc failed for fd %d ret %d\n", + "fastrpc_session_alloc_secure_memory failed for fd %d ret %d\n", fd, err); err = -ENOSR; goto bail; @@ -3467,6 +3468,54 @@ bail: return err; } +static int fastrpc_set_session_info( + struct fastrpc_proc_sess_info *sess_info, + void *param, struct fastrpc_file *fl) +{ + int err = 0; + struct fastrpc_apps *me = &gfa; + + /* + * Third-party apps don't have permission to open the fastrpc device, so + * it is opened on their behalf by DSP HAL. This is detected by + * comparing current PID with the one stored during device open. + */ + if (current->tgid != fl->tgid_open) + fl->untrusted_process = true; + VERIFY(err, sess_info->pd_type > DEFAULT_UNUSED && + sess_info->pd_type < MAX_PD_TYPE); + if (err) { + ADSPRPC_ERR( + "Session PD type %u is invalid for the process\n", + sess_info->pd_type); + err = -EBADR; + goto bail; + } + if (fl->untrusted_process && sess_info->pd_type != USERPD) { + ADSPRPC_ERR( + "Session PD type %u not allowed for untrusted process\n", + sess_info->pd_type); + err = -EBADR; + goto bail; + } + /* + * If PD type is not configured for context banks, + * ignore PD type passed by the user, leave pd_type set to DEFAULT_UNUSED(0) + */ + if (me->cb_pd_type) + fl->pd_type = sess_info->pd_type; + // Processes attaching to Sensor Static PD, share context bank. + if (sess_info->pd_type == SENSORS_STATICPD) + fl->sharedcb = 1; + VERIFY(err, 0 == (err = fastrpc_get_info(fl, &(sess_info->domain_id)))); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, sess_info, + sizeof(struct fastrpc_proc_sess_info)); +bail: + return err; +} + static int fastrpc_create_persistent_headers(struct fastrpc_file *fl, uint32_t user_concurrency) { @@ -3549,6 +3598,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, uint32_t user_concurrency; struct fastrpc_ioctl_notif_rsp notif; struct fastrpc_proc_sharedbuf_info buff_info; + struct fastrpc_proc_sess_info sess_info; } p; struct fastrpc_dsp_capabilities *dsp_cap_ptr = NULL; uint32_t size = 0; @@ -3643,6 +3693,20 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, fl->sharedbuf_info.buf_fd = p.buff_info.buf_fd; fl->sharedbuf_info.buf_size = p.buff_info.buf_size; break; + case FASTRPC_INVOKE2_SESS_INFO: + VERIFY(err, + sizeof(struct fastrpc_proc_sess_info) >= inv2->size); + if (err) { + err = -EBADE; + goto bail; + } + K_COPY_FROM_USER(err, fl->is_compat, &p.sess_info, + (void *)inv2->invparam, inv2->size); + if (err) + goto bail; + err = fastrpc_set_session_info(&p.sess_info, + (void *)inv2->invparam, fl); + break; default: err = -ENOTTY; break; @@ -5374,17 +5438,23 @@ int fastrpc_internal_mmap(struct fastrpc_file *fl, static void fastrpc_context_list_dtor(struct fastrpc_file *fl); static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan, - int secure, int sharedcb, struct fastrpc_session_ctx **session) + int secure, int sharedcb, int pd_type, struct fastrpc_session_ctx **session) { struct fastrpc_apps *me = &gfa; uint64_t idx = 0; int err = 0; + /* + * PD type can be either unused(DEFAULT_UNUSED) (or) if PD type + * is used, choose the context bank with matching PD type. + */ if (chan->sesscount) { for (idx = 0; idx < chan->sesscount; ++idx) { if (!chan->session[idx].used && chan->session[idx].smmu.secure == secure && - chan->session[idx].smmu.sharedcb == sharedcb) { + chan->session[idx].smmu.sharedcb == sharedcb && + (pd_type == DEFAULT_UNUSED || + chan->session[idx].smmu.pd_type == pd_type)) { chan->session[idx].used = 1; break; } @@ -5566,14 +5636,23 @@ bail: return err; } -static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure, - int sharedcb, struct fastrpc_session_ctx **session) +static int fastrpc_session_alloc_secure_memory( + struct fastrpc_channel_ctx *chan, int secure, + int sharedcb, int pd_type, struct fastrpc_session_ctx **session) { int err = 0; + struct fastrpc_apps *me = &gfa; + + /* + * If PD type is configured for context banks, + * Use CPZ_USERPD, to allocate secure context bank type. + */ + if (pd_type != DEFAULT_UNUSED && me->cb_pd_type) + pd_type = CPZ_USERPD; mutex_lock(&chan->smd_mutex); if (!*session) - err = fastrpc_session_alloc_locked(chan, secure, sharedcb, session); + err = fastrpc_session_alloc_locked(chan, secure, sharedcb, pd_type, session); mutex_unlock(&chan->smd_mutex); if (err == -EUSERS) { ADSPRPC_WARN( @@ -6085,6 +6164,8 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) INIT_HLIST_NODE(&fl->hn); fl->sessionid = 0; fl->tgid_open = current->tgid; + /* PD type is not known, when device is opened */ + fl->pd_type = DEFAULT_UNUSED; fl->apps = me; fl->mode = FASTRPC_MODE_SERIAL; fl->cid = -1; @@ -6251,7 +6332,7 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) fl->ssrcount = fl->apps->channel[cid].ssrcount; mutex_lock(&fl->apps->channel[cid].smd_mutex); err = fastrpc_session_alloc_locked(&fl->apps->channel[cid], - 0, fl->sharedcb, &fl->sctx); + 0, fl->sharedcb, fl->pd_type, &fl->sctx); mutex_unlock(&fl->apps->channel[cid].smd_mutex); if (err == -EUSERS) { ADSPRPC_WARN( @@ -7617,6 +7698,14 @@ static int fastrpc_cb_probe(struct device *dev) me->max_size_limit = (dma_addr_pool[1] == 0 ? 0x78000000 : dma_addr_pool[1]); + if (of_get_property(dev->of_node, "pd-type", NULL) != NULL) { + err = of_property_read_u32(dev->of_node, "pd-type", + &(sess->smmu.pd_type)); + if (err) + goto bail; + // Set cb_pd_type, if the process type is configured for context banks + me->cb_pd_type = true; + } if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { sess->smmu.sharedcb = 1; // Set share_securecb, if the secure context bank is shared diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h old mode 100755 new mode 100644 index e14a36956f..bb7f160e1d --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -294,6 +294,14 @@ struct fastrpc_ioctl_notif_rsp { uint32_t status; /* Status of the process */ }; +/* FastRPC ioctl structure to set session related info */ +struct fastrpc_proc_sess_info { + uint32_t domain_id; /* Set the remote subsystem, Domain ID of the session */ + uint32_t session_id; /* Unused, Set the Session ID on remote subsystem */ + uint32_t pd_type; /* Set the process type on remote subsystem */ + uint32_t sharedcb; /* Unused, Session can share context bank with other sessions */ +}; + /* INIT a new process or attach to guestos */ enum fastrpc_init_flags { FASTRPC_INIT_NO_CREATE = -1, @@ -309,6 +317,8 @@ enum fastrpc_invoke2_type { FASTRPC_INVOKE2_KERNEL_OPTIMIZATIONS, FASTRPC_INVOKE2_STATUS_NOTIF, FASTRPC_INVOKE2_PROC_SHAREDBUF_INFO, + /* Set session info of remote sub system */ + FASTRPC_INVOKE2_SESS_INFO, }; struct fastrpc_ioctl_invoke2 { @@ -634,6 +644,21 @@ enum fastrpc_process_exit_states { FASTRPC_PROCESS_DSP_EXIT_ERROR = 4, }; +/* + * Process types on remote subsystem + * Always add new PD types at the end, before MAX_PD_TYPE + */ +#define DEFAULT_UNUSED 0 /* pd type not configured for context banks */ +#define ROOT_PD 1 /* Root PD */ +#define AUDIO_STATICPD 2 /* ADSP Audio Static PD */ +#define SENSORS_STATICPD 3 /* ADSP Sensors Static PD */ +#define SECURE_STATICPD 4 /* CDSP Secure Static PD */ +#define OIS_STATICPD 5 /* ADSP OIS Static PD */ +#define CPZ_USERPD 6 /* CDSP CPZ USER PD */ +#define USERPD 7 /* DSP User Dynamic PD */ +#define GUEST_OS_SHARED 8 /* Legacy Guest OS Shared */ +#define MAX_PD_TYPE 9 /* Max PD type */ + struct fastrpc_file; int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, int tvm_remote_domain); @@ -869,6 +894,7 @@ struct fastrpc_smmu { int secure; int coherent; int sharedcb; + int pd_type; /* Process type on remote sub system */ /* gen pool for QRTR */ struct gen_pool *frpc_genpool; /* fastrpc gen pool buffer */ @@ -974,6 +1000,8 @@ struct fastrpc_apps { int remote_cdsp_status; /* Indicates secure context bank to be shared */ int share_securecb; + /* Indicates process type is configured for SMMU context bank */ + bool cb_pd_type; }; struct fastrpc_mmap { @@ -1073,6 +1101,7 @@ struct fastrpc_file { int file_close; int dsp_proc_init; int sharedcb; + int pd_type; /* Process type on remote subsystem */ struct fastrpc_apps *apps; struct dentry *debugfs_file; struct dev_pm_qos_request *dev_pm_qos_req; From ac175e18982daf70b28035b4876536dd3b62cc1d Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Wed, 17 May 2023 11:11:28 -0700 Subject: [PATCH 054/146] Move ioctl definitions to new header file Move ioctl definitions and structures to new header file. Kernel team has a new requirement to move userspace related API and structure definitions to a new header file independent of kernel only headers. Change-Id: Ic0fa54a2c18036cb6a7fa5f2cd389d9f8d07096f Signed-off-by: Anirudh Raghavendra --- include/uapi/fastrpc_shared.h | 285 ++++++++++++++++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 include/uapi/fastrpc_shared.h diff --git a/include/uapi/fastrpc_shared.h b/include/uapi/fastrpc_shared.h new file mode 100644 index 0000000000..4c0fd38bec --- /dev/null +++ b/include/uapi/fastrpc_shared.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef FASTRPC_IOCTL_H +#define FASTRPC_IOCTL_H + +#include +#include + +#define remote_arg_t union remote_arg +/* Map and unmap IOCTL methods reserved memory size for future extensions */ +#define MAP_RESERVED_NUM (14) +#define UNMAP_RESERVED_NUM (10) + +#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke) +#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap) +#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap) +#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64) +#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64) +#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd) +#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t) +#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init) +#define FASTRPC_IOCTL_INVOKE_ATTRS \ + _IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs) +#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t) +//#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf) +#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs) +#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc) +#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control) +#define FASTRPC_IOCTL_MUNMAP_FD _IOWR('R', 13, struct fastrpc_ioctl_munmap_fd) +#define FASTRPC_IOCTL_GET_DSP_INFO \ + _IOWR('R', 17, struct fastrpc_ioctl_capability) +#define FASTRPC_IOCTL_INVOKE2 _IOWR('R', 18, struct fastrpc_ioctl_invoke2) +#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 19, struct fastrpc_ioctl_mem_map) +#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 20, struct fastrpc_ioctl_mem_unmap) +#define FASTRPC_IOCTL_INVOKE_PERF \ + _IOWR('R', 21, struct fastrpc_ioctl_invoke_perf) +#define FASTRPC_IOCTL_NOTIF_RSP \ + _IOWR('R', 22, struct fastrpc_ioctl_notif_rsp) +#define FASTRPC_IOCTL_DSPSIGNAL_CREATE _IOWR('R', 23, struct fastrpc_ioctl_dspsignal_create) +#define FASTRPC_IOCTL_DSPSIGNAL_DESTROY _IOWR('R', 24, struct fastrpc_ioctl_dspsignal_destroy) +#define FASTRPC_IOCTL_DSPSIGNAL_SIGNAL _IOWR('R', 25, struct fastrpc_ioctl_dspsignal_signal) +#define FASTRPC_IOCTL_DSPSIGNAL_WAIT _IOWR('R', 26, struct fastrpc_ioctl_dspsignal_wait) +#define FASTRPC_IOCTL_DSPSIGNAL_CANCEL_WAIT \ + _IOWR('R', 27, struct fastrpc_ioctl_dspsignal_cancel_wait) + +struct fastrpc_mem_map { + int fd; /* ion fd */ + int offset; /* buffer offset */ + uint32_t flags; /* flags defined in enum fastrpc_map_flags */ + int attrs; /* buffer attributes used for SMMU mapping */ + uintptr_t vaddrin; /* buffer virtual address */ + size_t length; /* buffer length */ + uint64_t vaddrout; /* [out] remote virtual address */ +}; + +struct fastrpc_mem_unmap { + int fd; /* ion fd */ + uint64_t vaddr; /* remote process (dsp) virtual address */ + size_t length; /* buffer size */ +}; + +struct fastrpc_ctrl_latency { + uint32_t enable; /* latency control enable */ + uint32_t latency; /* latency request in us */ +}; + +struct fastrpc_ctrl_kalloc { + uint32_t kalloc_support; /* Remote memory allocation from kernel */ +}; + +struct fastrpc_ctrl_wakelock { + uint32_t enable; /* wakelock control enable */ +}; + +struct fastrpc_ctrl_pm { + uint32_t timeout; /* timeout(in ms) for PM to keep system awake */ +}; + +struct fastrpc_ctrl_smmu { + uint32_t sharedcb; /* Set to SMMU share context bank */ +}; + +struct fastrpc_ioctl_invoke { + uint32_t handle; /* remote handle */ + uint32_t sc; /* scalars describing the data */ + remote_arg_t *pra; /* remote arguments list */ +}; + +struct fastrpc_ioctl_invoke_fd { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ +}; + +struct fastrpc_ioctl_invoke_attrs { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ +}; + +struct fastrpc_ioctl_invoke_crc { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; +}; + +struct fastrpc_ioctl_invoke_perf { + struct fastrpc_ioctl_invoke inv; + int *fds; + unsigned int *attrs; + unsigned int *crc; + uint64_t *perf_kernel; + uint64_t *perf_dsp; +}; + +struct fastrpc_ioctl_invoke_async { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; + uint64_t *perf_kernel; + uint64_t *perf_dsp; + struct fastrpc_async_job *job; /* async job*/ +}; + +struct fastrpc_ioctl_invoke_async_no_perf { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; + struct fastrpc_async_job *job; /* async job*/ +}; + +struct fastrpc_ioctl_async_response { + uint64_t jobid;/* job id generated by user */ + int result; /* result from DSP */ + uint64_t *perf_kernel; + uint64_t *perf_dsp; + uint32_t handle; + uint32_t sc; +}; + +struct fastrpc_ioctl_notif_rsp { + int domain; /* Domain of User PD */ + int session; /* Session ID of User PD */ + uint32_t status; /* Status of the process */ +}; + +struct fastrpc_ioctl_invoke2 { + uint32_t req; /* type of invocation request */ + uintptr_t invparam; /* invocation request param */ + uint32_t size; /* size of invocation param */ + int err; /* reserved */ +}; + +struct fastrpc_ioctl_init { + uint32_t flags; /* one of FASTRPC_INIT_* macros */ + uintptr_t file; /* pointer to elf file */ + uint32_t filelen; /* elf file length */ + int32_t filefd; /* ION fd for the file */ + uintptr_t mem; /* mem for the PD */ + uint32_t memlen; /* mem length */ + int32_t memfd; /* ION fd for the mem */ +}; + +struct fastrpc_ioctl_init_attrs { + struct fastrpc_ioctl_init init; + int attrs; + unsigned int siglen; +}; + +struct fastrpc_ioctl_munmap { + uintptr_t vaddrout; /* address to unmap */ + size_t size; /* size */ +}; + +struct fastrpc_ioctl_munmap_64 { + uint64_t vaddrout; /* address to unmap */ + size_t size; /* size */ +}; + +struct fastrpc_ioctl_mmap { + int fd; /* ion fd */ + uint32_t flags; /* flags for dsp to map with */ + uintptr_t vaddrin; /* optional virtual address */ + size_t size; /* size */ + uintptr_t vaddrout; /* dsps virtual address */ +}; + +struct fastrpc_ioctl_mmap_64 { + int fd; /* ion fd */ + uint32_t flags; /* flags for dsp to map with */ + uint64_t vaddrin; /* optional virtual address */ + size_t size; /* size */ + uint64_t vaddrout; /* dsps virtual address */ +}; + +struct fastrpc_ioctl_munmap_fd { + int fd; /* fd */ + uint32_t flags; /* control flags */ + uintptr_t va; /* va */ + ssize_t len; /* length */ +}; + +struct fastrpc_ioctl_dspsignal_create { + uint32_t signal_id; /* Signal ID */ + uint32_t flags; /* Flags, currently unused */ +}; + +struct fastrpc_ioctl_dspsignal_destroy { + uint32_t signal_id; /* Signal ID */ +}; + +struct fastrpc_ioctl_dspsignal_signal { + uint32_t signal_id; /* Signal ID */ +}; + +struct fastrpc_ioctl_dspsignal_wait { + uint32_t signal_id; /* Signal ID */ + uint32_t timeout_usec; /* Timeout in microseconds. UINT32_MAX for an infinite wait */ +}; + +struct fastrpc_ioctl_dspsignal_cancel_wait { + uint32_t signal_id; /* Signal ID */ +}; + +/* map memory to DSP device */ +struct fastrpc_ioctl_mem_map { + int version; /* Initial version 0 */ + union { + struct fastrpc_mem_map m; + int reserved[MAP_RESERVED_NUM]; + }; +}; + +/* unmap memory to DSP device */ +struct fastrpc_ioctl_mem_unmap { + int version; /* Initial version 0 */ + union { + struct fastrpc_mem_unmap um; + int reserved[UNMAP_RESERVED_NUM]; + }; +}; + +struct fastrpc_ioctl_control { + uint32_t req; + union { + struct fastrpc_ctrl_latency lp; + struct fastrpc_ctrl_kalloc kalloc; + struct fastrpc_ctrl_wakelock wp; + struct fastrpc_ctrl_pm pm; + struct fastrpc_ctrl_smmu smmu; + }; +}; + +struct fastrpc_ioctl_capability { + uint32_t domain; + uint32_t attribute_ID; + uint32_t capability; +}; + +union fastrpc_ioctl_param { + struct fastrpc_ioctl_invoke_async inv; + struct fastrpc_ioctl_mem_map mem_map; + struct fastrpc_ioctl_mem_unmap mem_unmap; + struct fastrpc_ioctl_mmap mmap; + struct fastrpc_ioctl_mmap_64 mmap64; + struct fastrpc_ioctl_munmap munmap; + struct fastrpc_ioctl_munmap_64 munmap64; + struct fastrpc_ioctl_munmap_fd munmap_fd; + struct fastrpc_ioctl_init_attrs init; + struct fastrpc_ioctl_control cp; + struct fastrpc_ioctl_capability cap; + struct fastrpc_ioctl_invoke2 inv2; + struct fastrpc_ioctl_dspsignal_signal sig; + struct fastrpc_ioctl_dspsignal_wait wait; + struct fastrpc_ioctl_dspsignal_create cre; + struct fastrpc_ioctl_dspsignal_destroy des; + struct fastrpc_ioctl_dspsignal_cancel_wait canc; +}; + +#endif From 20d4dc00608c4aa809b37fa8dbde06746f65a12b Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Thu, 18 May 2023 14:31:58 +0530 Subject: [PATCH 055/146] Potential use of freed ctx in async invoke After message is sent to DSP, async response thread could immediately get the response and free context, which will result in a use-after-free in invoke send. To fix this, add local copy of ctx to trace and gmsg logging. To fix async response and SSR race, we rely on is_job_sent_to_remote_ss of ctx, now check valid ctx from ctxtable to set is_job_sent_to_remote_ss. Change-Id: I1ebbed61443beda7b5ffcbe858481a54cca96acb Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 4e506bdcff..7e4daf98bd 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2825,6 +2825,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, { struct smq_msg *msg = &ctx->msg; struct smq_msg msg_temp; + struct smq_invoke_ctx ctx_temp; struct fastrpc_file *fl = ctx->fl; struct fastrpc_channel_ctx *channel_ctx = NULL; int err = 0, cid = -1; @@ -2832,6 +2833,8 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, int64_t ns = 0; uint64_t xo_time_in_us = 0; int isasync = (ctx->asyncjob.isasyncjob ? true : false); + unsigned long irq_flags = 0; + uint32_t index = 0; if (!fl) { err = -EBADF; @@ -2870,16 +2873,27 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, /* * After message is sent to DSP, async response thread could immediately * get the response and free context, which will result in a use-after-free - * in this function. So use a local variable for message. + * in this function. So use a local variable for message and context. */ memcpy(&msg_temp, msg, sizeof(struct smq_msg)); msg = &msg_temp; + memcpy(&ctx_temp, ctx, sizeof(struct smq_invoke_ctx)); + index = (uint32_t)GET_TABLE_IDX_FROM_CTXID(ctx->ctxid); } + err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->tvm_remote_domain); - if (isasync && !err) { - spin_lock(&fl->hlock); - ctx->is_job_sent_to_remote_ss = true; - spin_unlock(&fl->hlock); + if (isasync) { + if (!err) { + /* + * Validate the ctx as this could have been already + * freed by async response. + */ + spin_lock_irqsave(&channel_ctx->ctxlock, irq_flags); + if (index < FASTRPC_CTX_MAX && channel_ctx->ctxtable[index] == ctx) + ctx->is_job_sent_to_remote_ss = true; + spin_unlock_irqrestore(&channel_ctx->ctxlock, irq_flags); + } + ctx = &ctx_temp; } trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx, handle, sc, msg->invoke.page.addr, msg->invoke.page.size); From 64813776c7852e32d3ac6e40b7f11049de252522 Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Sat, 20 May 2023 23:44:27 -0700 Subject: [PATCH 056/146] adsprpc: tvm: wakeup waiting rpc calls when secure PD exits When secure PD exits in case of SSR or another scenario, notify all rpc threads waiting on kernel. This should allow rpc threads to return failure to TVM clients. Change-Id: Ie0e97d2cb0e378b9b1c1e558f8ed642710690d1f Signed-off-by: Edgar Flores --- dsp/adsprpc.c | 8 ++++++++ dsp/adsprpc_shared.h | 1 + dsp/adsprpc_socket.c | 3 +++ 3 files changed, 12 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 7e4daf98bd..0f0f90d924 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7499,6 +7499,14 @@ static void fastrpc_print_debug_data(int cid) kfree(gmsg_log_rx); } +void fastrpc_restart_drivers(int cid) +{ + struct fastrpc_apps *me = &gfa; + + fastrpc_notify_drivers(me, cid); + me->channel[cid].ssrcount++; +} + static int fastrpc_restart_notifier_cb(struct notifier_block *nb, unsigned long code, void *data) diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index bb7f160e1d..7fb2be94df 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -670,6 +670,7 @@ void fastrpc_transport_session_init(int cid, char *subsys); void fastrpc_transport_session_deinit(int cid); int fastrpc_wait_for_transport_interrupt(int cid, unsigned int flags); int fastrpc_set_tvm_remote_domain(struct fastrpc_file *fl, struct fastrpc_ioctl_init *init); +void fastrpc_restart_drivers(int cid); static inline struct smq_invoke_buf *smq_invoke_buf_start(remote_arg64_t *pra, uint32_t sc) diff --git a/dsp/adsprpc_socket.c b/dsp/adsprpc_socket.c index 88b245b933..24fcb2a4d4 100644 --- a/dsp/adsprpc_socket.c +++ b/dsp/adsprpc_socket.c @@ -205,6 +205,7 @@ static void fastrpc_recv_del_server(struct frpc_transport_session_control *sessi { uint32_t remote_server_instance = session_control->remote_server_instance; int32_t err = 0; + int32_t cid = 0; /* Ignore EOF marker */ if (!node && !port) { @@ -223,7 +224,9 @@ static void fastrpc_recv_del_server(struct frpc_transport_session_control *sessi session_control->frpc_socket.remote_sock_addr.sq_port = 0; session_control->remote_server_online = false; mutex_unlock(&session_control->frpc_socket.socket_mutex); + cid = GET_CID_FROM_SERVER_INSTANCE(remote_server_instance); ADSPRPC_INFO("Remote server is down: remote ID (0x%x)", remote_server_instance); + fastrpc_restart_drivers(cid); bail: if (err != -EINVAL && err) ADSPRPC_WARN("Ignoring ctrl packet: node %u, port %u, err %d", node, port, err); From e696574fb5308199a28e6a28aab430b6e6ae4104 Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Sun, 23 Apr 2023 11:20:31 +0530 Subject: [PATCH 057/146] msm: adsprpc: block smmu unmap of buffer used in pending rpc call A dynamic SMMU mapping created as part of an RPC call can potentially be removed by a parallel munmap ioctl call before the RPC call is complete, leading to SMMU faults. Maintain a ref-count that indicates that the mapping is being used by a pending RPC call and allow the mapping to be removed only if this count is 0. Change-Id: Ieb4ff6b298ff9c48953bc5b3539fdfe19a14b442 Acked-by: Santosh Sakore Signed-off-by: Santosh Sakore --- dsp/adsprpc.c | 51 +++++++++++++++++++++++++++++++++++++++----- dsp/adsprpc_shared.h | 2 ++ 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 0f0f90d924..d92bda4a3f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -957,6 +957,8 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va, if ((fd < 0 || map->fd == fd) && map->raddr == va && map->raddr + map->len == va + len && map->refs == 1 && + /* Remove map only if it isn't being used in any pending RPC calls */ + !map->ctx_refs && /* Skip unmap if it is fastrpc shell memory */ !map->is_filemap) { match = map; @@ -997,7 +999,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { spin_lock_irqsave(&me->hlock, irq_flags); map->refs--; - if (!map->refs && !map->is_persistent) + if (!map->refs && !map->is_persistent && !map->ctx_refs) hlist_del_init(&map->hn); spin_unlock_irqrestore(&me->hlock, irq_flags); if (map->refs > 0) { @@ -1012,7 +1014,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) spin_unlock_irqrestore(&me->hlock, irq_flags); } else { map->refs--; - if (!map->refs) + if (!map->refs && !map->ctx_refs) hlist_del_init(&map->hn); if (map->refs > 0 && !flags) return; @@ -1953,8 +1955,15 @@ static void context_free(struct smq_invoke_ctx *ctx) spin_unlock(&ctx->fl->hlock); mutex_lock(&ctx->fl->map_mutex); - for (i = 0; i < nbufs; ++i) + for (i = 0; i < nbufs; ++i) { + /* + * Decrement ctx refs count before mmap free, + * indicate remote call no longer using it + */ + if (ctx->maps[i] && ctx->maps[i]->ctx_refs) + ctx->maps[i]->ctx_refs--; fastrpc_mmap_free(ctx->maps[i], 0); + } mutex_unlock(&ctx->fl->map_mutex); fastrpc_buf_free(ctx->buf, 1); @@ -2342,6 +2351,12 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) err = fastrpc_mmap_create(ctx->fl, ctx->fds[i], NULL, ctx->attrs[i], buf, len, mflags, &ctx->maps[i]); + /* + * Increment ctx refs count for in/out buffer if map created, + * indicate map under use in remote call + */ + if (ctx->maps[i]) + ctx->maps[i]->ctx_refs++; mutex_unlock(&ctx->fl->map_mutex); if (err) goto bail; @@ -2369,10 +2384,23 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]); if (err) { - for (j = bufs; j < i; j++) + for (j = bufs; j < i; j++) { + /* + * Due to error decrement ctx refs count before mmap free + * for each in/out handle, if map created + */ + if (ctx->maps[j] && ctx->maps[j]->ctx_refs) + ctx->maps[j]->ctx_refs--; fastrpc_mmap_free(ctx->maps[j], 0); + } mutex_unlock(&ctx->fl->map_mutex); goto bail; + } else { + /* + * Increment ctx refs count for in/out handle if map created + * and no error, indicate map under use in remote call + */ + ctx->maps[i]->ctx_refs++; } ipage += 1; } @@ -2704,6 +2732,12 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, } } else { mutex_lock(&ctx->fl->map_mutex); + /* + * Decrement ctx refs count before mmap free, + * indicate remote call no longer using it + */ + if (ctx->maps[i]->ctx_refs) + ctx->maps[i]->ctx_refs--; fastrpc_mmap_free(ctx->maps[i], 0); mutex_unlock(&ctx->fl->map_mutex); ctx->maps[i] = NULL; @@ -2714,8 +2748,15 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, if (!fdlist[i]) break; if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], NULL, 0, 0, - 0, 0, &mmap)) + 0, 0, &mmap)) { + /* + * Decrement ctx refs count before mmap free, + * indicate remote call no longer using it + */ + if (mmap && mmap->ctx_refs) + mmap->ctx_refs--; fastrpc_mmap_free(mmap, 0); + } } mutex_unlock(&ctx->fl->map_mutex); if (ctx->crc && crclist && rpra) diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 7fb2be94df..cea20a7d10 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -1031,6 +1031,8 @@ struct fastrpc_mmap { /* Mapping for fastrpc shell */ bool is_filemap; char *servloc_name; /* Indicate which daemon mapped this */ + /* Indicates map is being used by a pending RPC call */ + unsigned int ctx_refs; }; enum fastrpc_perfkeys { From 9fd8a867bf0a1d42443587c86714d643b82353fb Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Tue, 6 Jun 2023 18:48:01 +0530 Subject: [PATCH 058/146] msm: adsprpc: null check for context map Add null check for context map before increamenting reference count. Signed-off-by: Santosh Sakore --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index d92bda4a3f..17cf55e4c4 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2395,7 +2395,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) } mutex_unlock(&ctx->fl->map_mutex); goto bail; - } else { + } else if (ctx->maps[i]) { /* * Increment ctx refs count for in/out handle if map created * and no error, indicate map under use in remote call From 9151ca9fdd77e56e4a74ce08384dd4a27ef12fbd Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Tue, 9 May 2023 16:15:49 +0530 Subject: [PATCH 059/146] msm: adsprpc: Latency vote for lowest capacity cores Currently, QoS core count is probed from dtsi property. Instead, update it at run-time by counting number of lowest capacity cores. Probe DT to check if latency voting for only a single-core is enabled, update count then. Change-Id: I8eaddc382a4929d28a60db8d351eb8ca9793e82e Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 106 ++++++++++++++++++++++--------------------- dsp/adsprpc_shared.h | 10 ++-- 2 files changed, 59 insertions(+), 57 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 17cf55e4c4..3f2dba1445 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -141,6 +142,10 @@ #define ION_FLAG_CACHED (1) #endif +#ifndef topology_cluster_id +#define topology_cluster_id(cpu) topology_physical_package_id(cpu) +#endif + /* * ctxid of every message is OR-ed with fastrpc_remote_pd_type before * it is sent to DSP. So mask 2 LSBs to retrieve actual context @@ -2945,9 +2950,9 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, } /* - * fastrpc_get_dsp_status - Reads the property string from device node - * and updates the cdsp device avialbility status - * if the node belongs to cdsp device. + * Reads the property string from device node + * and updates the cdsp device avialbility status + * if the node belongs to cdsp device. * @me : pointer to fastrpc_apps. */ @@ -2974,6 +2979,26 @@ static void fastrpc_get_dsp_status(struct fastrpc_apps *me) } while (1); } +/* + * Counts number of cores corresponding + * to cluster id 0. If a core is defective or unavailable, skip counting + * that core. + * @me : pointer to fastrpc_apps. + */ + +static void fastrpc_lowest_capacity_corecount(struct fastrpc_apps *me) +{ + unsigned int cpu = 0; + + cpu = cpumask_first(cpu_possible_mask); + for_each_cpu(cpu, cpu_possible_mask) { + if (topology_cluster_id(cpu) == 0) + me->lowest_capacity_core_count++; + } + ADSPRPC_INFO("lowest capacity core count: %u\n", + me->lowest_capacity_core_count); +} + static void fastrpc_init(struct fastrpc_apps *me) { int i, jj; @@ -5842,13 +5867,13 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) { struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; struct fastrpc_apps *me = &gfa; - u32 ii; + unsigned int ii; if (!fl) return 0; if (fl->qos_request && fl->dev_pm_qos_req) { - for (ii = 0; ii < me->silvercores.corecount; ii++) { + for (ii = 0; ii < me->lowest_capacity_core_count; ii++) { if (!dev_pm_qos_request_active(&fl->dev_pm_qos_req[ii])) continue; dev_pm_qos_remove_request(&fl->dev_pm_qos_req[ii]); @@ -6245,9 +6270,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) spin_lock_irqsave(&me->hlock, irq_flags); hlist_add_head(&fl->hn, &me->drivers); spin_unlock_irqrestore(&me->hlock, irq_flags); - fl->dev_pm_qos_req = kcalloc(me->silvercores.corecount, - sizeof(struct dev_pm_qos_request), - GFP_KERNEL); + if (me->lowest_capacity_core_count) + fl->dev_pm_qos_req = kzalloc((me->lowest_capacity_core_count) * + sizeof(struct dev_pm_qos_request), + GFP_KERNEL); spin_lock_init(&fl->dspsignals_lock); mutex_init(&fl->signal_create_mutex); init_completion(&fl->shutdown); @@ -6445,7 +6471,7 @@ int fastrpc_internal_control(struct fastrpc_file *fl, unsigned int latency; struct fastrpc_apps *me = &gfa; int sessionid = 0; - u32 silver_core_count = me->silvercores.corecount, ii = 0, cpu; + unsigned int cpu; unsigned long flags = 0; VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps)); @@ -6469,23 +6495,29 @@ int fastrpc_internal_control(struct fastrpc_file *fl, goto bail; } - VERIFY(err, me->silvercores.coreno && fl->dev_pm_qos_req); + VERIFY(err, (me->lowest_capacity_core_count && fl->dev_pm_qos_req)); if (err) { + ADSPRPC_INFO("Skipping PM QoS latency voting, core count: %u\n", + me->lowest_capacity_core_count); err = -EINVAL; goto bail; } + /* + * Add voting request for all possible cores corresponding to cluster + * id 0. If DT property 'qcom,single-core-latency-vote' is enabled + * then add voting request for only one core of cluster id 0. + */ + for (cpu = 0; cpu < me->lowest_capacity_core_count; cpu++) { - for (ii = 0; ii < silver_core_count; ii++) { - cpu = me->silvercores.coreno[ii]; if (!fl->qos_request) { err = dev_pm_qos_add_request( get_cpu_device(cpu), - &fl->dev_pm_qos_req[ii], + &fl->dev_pm_qos_req[cpu], DEV_PM_QOS_RESUME_LATENCY, latency); } else { err = dev_pm_qos_update_request( - &fl->dev_pm_qos_req[ii], + &fl->dev_pm_qos_req[cpu], latency); } /* PM QoS request APIs return 0 or 1 on success */ @@ -6499,7 +6531,6 @@ int fastrpc_internal_control(struct fastrpc_file *fl, fl->qos_request = 1; err = 0; } - /* Ensure CPU feature map updated to DSP for early WakeUp */ fastrpc_send_cpuinfo_to_dsp(fl); break; @@ -7931,39 +7962,6 @@ bail: } } -static void init_qos_cores_list(struct device *dev, char *prop_name, - struct qos_cores *silvercores) -{ - int err = 0; - u32 len = 0, i = 0; - u32 *coreslist = NULL; - - if (!of_find_property(dev->of_node, prop_name, &len)) - goto bail; - if (len == 0) - goto bail; - len /= sizeof(u32); - VERIFY(err, NULL != (coreslist = kcalloc(len, sizeof(u32), - GFP_KERNEL))); - if (err) - goto bail; - for (i = 0; i < len; i++) { - err = of_property_read_u32_index(dev->of_node, prop_name, i, - &coreslist[i]); - if (err) { - pr_err("adsprpc: %s: failed to read QOS cores list\n", - __func__); - goto bail; - } - } - silvercores->coreno = coreslist; - silvercores->corecount = len; -bail: - if (err) - kfree(coreslist); - -} - static void fastrpc_init_privileged_gids(struct device *dev, char *prop_name, struct gid_list *gidlist) { @@ -8125,9 +8123,14 @@ static int fastrpc_probe(struct platform_device *pdev) &gcinfo[0].rhvm); fastrpc_init_privileged_gids(dev, "qcom,fastrpc-gids", &me->gidlist); - init_qos_cores_list(dev, "qcom,qos-cores", - &me->silvercores); - + /* + * Check if latency voting for only one core + * is enabled for the platform + */ + me->single_core_latency_vote = of_property_read_bool(dev->of_node, + "qcom,single-core-latency-vote"); + if (me->single_core_latency_vote) + me->lowest_capacity_core_count = 1; of_property_read_u32(dev->of_node, "qcom,rpc-latency-us", &me->latency); if (of_get_property(dev->of_node, @@ -8677,6 +8680,7 @@ static int __init fastrpc_device_init(void) goto bus_device_register_bail; } me->fastrpc_bus_register = true; + fastrpc_lowest_capacity_corecount(me); VERIFY(err, 0 == platform_driver_register(&fastrpc_driver)); if (err) goto register_bail; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index cea20a7d10..7493fc521a 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -745,11 +745,6 @@ struct gid_list { unsigned int gidcount; }; -struct qos_cores { - int *coreno; - int corecount; -}; - struct fastrpc_buf { struct hlist_node hn; struct hlist_node hn_rem; @@ -992,7 +987,6 @@ struct fastrpc_apps { /* Non-secure subsystem like CDSP will use regular client */ struct wakeup_source *wake_source; uint32_t duplicate_rsp_err_cnt; - struct qos_cores silvercores; uint32_t max_size_limit; struct hlist_head frpc_devices; struct hlist_head frpc_drivers; @@ -1003,6 +997,10 @@ struct fastrpc_apps { int share_securecb; /* Indicates process type is configured for SMMU context bank */ bool cb_pd_type; + /* Number of lowest capacity cores for given platform */ + unsigned int lowest_capacity_core_count; + /* Flag to check if PM QoS vote needs to be done for only one core */ + bool single_core_latency_vote; }; struct fastrpc_mmap { From ae5e634211e89e4ad4e48b7d00329084cfb28b0f Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Mon, 1 May 2023 12:10:14 -0700 Subject: [PATCH 060/146] Enable ddk compilation for pineapple Setting LOCAL_MODULE_DDK_BUILD to true to enable ddk compilation for pineapple. Change-Id: I36d0c55c1a3f53ff727e74a90f731d1447c79b6b Signed-off-by: Anirudh Raghavendra --- Android.mk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Android.mk b/Android.mk index 38483fc8fe..c3109f4a45 100644 --- a/Android.mk +++ b/Android.mk @@ -13,6 +13,8 @@ LOCAL_PATH := $(call my-dir) DSP_BLD_DIR := $(abspath .)/vendor/qcom/opensource/dsp-kernel +LOCAL_MODULE_DDK_BUILD := true + include $(CLEAR_VARS) $(info DLKM_DIR = $(DLKM_DIR)) LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) From 7da4c92f797f721884f44afadc3cc4127b013e0d Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Wed, 24 May 2023 16:24:16 +0530 Subject: [PATCH 061/146] Avoid waiting for pending dma invoke in spinlock Currently, spinlock is acquired and wait state is entered for dma_invoke to complete. This scenario leads to watchdog bark for threads waiting to acquire spinlock. This change is to avoid waiting for dma_invoke completion after acquiring spinlock. Change-Id: I9443fd8bfda77194103a871e4ad0295f79cf3034 Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 3f2dba1445..f18a42f724 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5782,8 +5782,17 @@ static int fastrpc_file_free(struct fastrpc_file *fl) spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; - if (fl->is_dma_invoke_pend) - wait_for_completion(&fl->dma_invoke); + if (!fl->is_dma_invoke_pend) + goto skip_dmainvoke_wait; + is_locked = false; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + wait_for_completion(&fl->dma_invoke); + +skip_dmainvoke_wait: + if (!is_locked) { + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + is_locked = true; + } if (!fl->is_ramdump_pend) goto skip_dump_wait; is_locked = false; From ba3198ddb1ee36ff56bcd334bb5bf6f5783ad0cb Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Tue, 23 May 2023 14:35:05 -0700 Subject: [PATCH 062/146] msm: adsprpc: Fail bus match when device is closing Currently probe is failing if device is closed. Driver registration with device might already be finished, if match is successful, even though probe fails. Fail the bus match when device is closed, so driver does not gets registered with device. Change-Id: I0511c7b3a27ddd4c2cd30d4aea9f961d1f4355d9 Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index f18a42f724..4158910824 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -8508,32 +8508,33 @@ static struct device fastrpc_bus = { static int fastrpc_bus_match(struct device *dev, struct device_driver *driver) { + struct fastrpc_apps *me = &gfa; struct fastrpc_driver *frpc_driver = to_fastrpc_driver(driver); struct fastrpc_device *frpc_device = to_fastrpc_device(dev); + unsigned long irq_flags = 0; - if (frpc_device->handle == frpc_driver->handle) + if (frpc_device->handle == frpc_driver->handle) { + spin_lock_irqsave(&me->hlock, irq_flags); + /* If device is being closed, fail the match */ + if (frpc_device->dev_close) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + return 0; + } + frpc_device->refs++; + frpc_driver->device = dev; + spin_unlock_irqrestore(&me->hlock, irq_flags); return 1; + } return 0; } static int fastrpc_bus_probe(struct device *dev) { - struct fastrpc_apps *me = &gfa; struct fastrpc_device *frpc_dev = to_fastrpc_device(dev); struct fastrpc_driver *frpc_drv = to_fastrpc_driver(dev->driver); - unsigned long irq_flags = 0; - if (frpc_drv && frpc_drv->probe) { - spin_lock_irqsave(&me->hlock, irq_flags); - if (frpc_dev->dev_close) { - spin_unlock_irqrestore(&me->hlock, irq_flags); - return 0; - } - frpc_dev->refs++; - frpc_drv->device = dev; - spin_unlock_irqrestore(&me->hlock, irq_flags); + if (frpc_drv && frpc_drv->probe) return frpc_drv->probe(frpc_dev); - } return 0; } From f1752b7f5bc2b89c6831ff4e7d9dbafb396edf4c Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Wed, 24 May 2023 10:06:19 -0700 Subject: [PATCH 063/146] Include new header fastrpc_shared.h Include new header fastrpc_shared.h in existing header (adsprpc_shared.h) and remove all ioctl definitions from the existing header. Change-Id: I5c5a08de0a077ec2717683134a0ec31466c34047 Signed-off-by: Anirudh Raghavendra --- Kbuild | 1 + define_modules.bzl | 10 +- dsp/adsprpc_shared.h | 275 +--------------------------------- include/uapi/fastrpc_shared.h | 1 - 4 files changed, 10 insertions(+), 277 deletions(-) diff --git a/Kbuild b/Kbuild index 62fa146890..dd2bdac0da 100644 --- a/Kbuild +++ b/Kbuild @@ -20,6 +20,7 @@ ccflags-y += -DCONFIG_DSP_PINEAPPLE=1 endif LINUXINCLUDE += -I$(DSP_ROOT)/include/linux +LINUXINCLUDE += -I$(DSP_ROOT)/include/uapi frpc-adsprpc-y := dsp/adsprpc.o \ dsp/adsprpc_rpmsg.o \ diff --git a/define_modules.bzl b/define_modules.bzl index 9650e7b173..e6b2eb7be7 100644 --- a/define_modules.bzl +++ b/define_modules.bzl @@ -30,8 +30,14 @@ def define_modules(target, variant): ], local_defines = ["DSP_TRACE_INCLUDE_PATH={}".format(trace_include_path)], out = "frpc-adsprpc.ko", - hdrs = ["include/linux/fastrpc.h"], - includes = ["include/linux"], + hdrs = [ + "include/linux/fastrpc.h", + "include/uapi/fastrpc_shared.h", + ], + includes = [ + "include/linux", + "include/uapi", + ], ) ddk_module( diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 7493fc521a..ae5d4f670b 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -8,38 +8,7 @@ #include #include - -#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke) -#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap) -#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap) -#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64) -#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64) -#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd) -#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t) -#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init) -#define FASTRPC_IOCTL_INVOKE_ATTRS \ - _IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs) -#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t) -//#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf) -#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs) -#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc) -#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control) -#define FASTRPC_IOCTL_MUNMAP_FD _IOWR('R', 13, struct fastrpc_ioctl_munmap_fd) -#define FASTRPC_IOCTL_GET_DSP_INFO \ - _IOWR('R', 17, struct fastrpc_ioctl_capability) -#define FASTRPC_IOCTL_INVOKE2 _IOWR('R', 18, struct fastrpc_ioctl_invoke2) -#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 19, struct fastrpc_ioctl_mem_map) -#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 20, struct fastrpc_ioctl_mem_unmap) -#define FASTRPC_IOCTL_INVOKE_PERF \ - _IOWR('R', 21, struct fastrpc_ioctl_invoke_perf) -#define FASTRPC_IOCTL_NOTIF_RSP \ - _IOWR('R', 22, struct fastrpc_ioctl_notif_rsp) -#define FASTRPC_IOCTL_DSPSIGNAL_CREATE _IOWR('R', 23, struct fastrpc_ioctl_dspsignal_create) -#define FASTRPC_IOCTL_DSPSIGNAL_DESTROY _IOWR('R', 24, struct fastrpc_ioctl_dspsignal_destroy) -#define FASTRPC_IOCTL_DSPSIGNAL_SIGNAL _IOWR('R', 25, struct fastrpc_ioctl_dspsignal_signal) -#define FASTRPC_IOCTL_DSPSIGNAL_WAIT _IOWR('R', 26, struct fastrpc_ioctl_dspsignal_wait) -#define FASTRPC_IOCTL_DSPSIGNAL_CANCEL_WAIT \ - _IOWR('R', 27, struct fastrpc_ioctl_dspsignal_cancel_wait) +#include "fastrpc_shared.h" #define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp" #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp" @@ -195,8 +164,6 @@ union remote_arg64 { uint32_t h; }; -#define remote_arg_t union remote_arg - struct remote_buf { void *pv; /* buffer pointer */ size_t len; /* length of buffer */ @@ -222,78 +189,12 @@ union remote_arg { uint32_t h; /* remote handle */ }; -struct fastrpc_ioctl_invoke { - uint32_t handle; /* remote handle */ - uint32_t sc; /* scalars describing the data */ - remote_arg_t *pra; /* remote arguments list */ -}; - -struct fastrpc_ioctl_invoke_fd { - struct fastrpc_ioctl_invoke inv; - int *fds; /* fd list */ -}; - -struct fastrpc_ioctl_invoke_attrs { - struct fastrpc_ioctl_invoke inv; - int *fds; /* fd list */ - unsigned int *attrs; /* attribute list */ -}; - -struct fastrpc_ioctl_invoke_crc { - struct fastrpc_ioctl_invoke inv; - int *fds; /* fd list */ - unsigned int *attrs; /* attribute list */ - unsigned int *crc; -}; - -struct fastrpc_ioctl_invoke_perf { - struct fastrpc_ioctl_invoke inv; - int *fds; - unsigned int *attrs; - unsigned int *crc; - uint64_t *perf_kernel; - uint64_t *perf_dsp; -}; - struct fastrpc_async_job { uint32_t isasyncjob; /* flag to distinguish async job */ uint64_t jobid; /* job id generated by user */ uint32_t reserved; /* reserved */ }; -struct fastrpc_ioctl_invoke_async { - struct fastrpc_ioctl_invoke inv; - int *fds; /* fd list */ - unsigned int *attrs; /* attribute list */ - unsigned int *crc; - uint64_t *perf_kernel; - uint64_t *perf_dsp; - struct fastrpc_async_job *job; /* async job*/ -}; - -struct fastrpc_ioctl_invoke_async_no_perf { - struct fastrpc_ioctl_invoke inv; - int *fds; /* fd list */ - unsigned int *attrs; /* attribute list */ - unsigned int *crc; - struct fastrpc_async_job *job; /* async job*/ -}; - -struct fastrpc_ioctl_async_response { - uint64_t jobid;/* job id generated by user */ - int result; /* result from DSP */ - uint64_t *perf_kernel; - uint64_t *perf_dsp; - uint32_t handle; - uint32_t sc; -}; - -struct fastrpc_ioctl_notif_rsp { - int domain; /* Domain of User PD */ - int session; /* Session ID of User PD */ - uint32_t status; /* Status of the process */ -}; - /* FastRPC ioctl structure to set session related info */ struct fastrpc_proc_sess_info { uint32_t domain_id; /* Set the remote subsystem, Domain ID of the session */ @@ -321,84 +222,6 @@ enum fastrpc_invoke2_type { FASTRPC_INVOKE2_SESS_INFO, }; -struct fastrpc_ioctl_invoke2 { - uint32_t req; /* type of invocation request */ - uintptr_t invparam; /* invocation request param */ - uint32_t size; /* size of invocation param */ - int err; /* reserved */ -}; - -struct fastrpc_ioctl_init { - uint32_t flags; /* one of FASTRPC_INIT_* macros */ - uintptr_t file; /* pointer to elf file */ - uint32_t filelen; /* elf file length */ - int32_t filefd; /* ION fd for the file */ - uintptr_t mem; /* mem for the PD */ - uint32_t memlen; /* mem length */ - int32_t memfd; /* ION fd for the mem */ -}; - -struct fastrpc_ioctl_init_attrs { - struct fastrpc_ioctl_init init; - int attrs; - unsigned int siglen; -}; - -struct fastrpc_ioctl_munmap { - uintptr_t vaddrout; /* address to unmap */ - size_t size; /* size */ -}; - -struct fastrpc_ioctl_munmap_64 { - uint64_t vaddrout; /* address to unmap */ - size_t size; /* size */ -}; - -struct fastrpc_ioctl_mmap { - int fd; /* ion fd */ - uint32_t flags; /* flags for dsp to map with */ - uintptr_t vaddrin; /* optional virtual address */ - size_t size; /* size */ - uintptr_t vaddrout; /* dsps virtual address */ -}; - -struct fastrpc_ioctl_mmap_64 { - int fd; /* ion fd */ - uint32_t flags; /* flags for dsp to map with */ - uint64_t vaddrin; /* optional virtual address */ - size_t size; /* size */ - uint64_t vaddrout; /* dsps virtual address */ -}; - -struct fastrpc_ioctl_munmap_fd { - int fd; /* fd */ - uint32_t flags; /* control flags */ - uintptr_t va; /* va */ - ssize_t len; /* length */ -}; - -struct fastrpc_ioctl_dspsignal_create { - uint32_t signal_id; /* Signal ID */ - uint32_t flags; /* Flags, currently unused */ -}; - -struct fastrpc_ioctl_dspsignal_destroy { - uint32_t signal_id; /* Signal ID */ -}; - -struct fastrpc_ioctl_dspsignal_signal { - uint32_t signal_id; /* Signal ID */ -}; - -struct fastrpc_ioctl_dspsignal_wait { - uint32_t signal_id; /* Signal ID */ - uint32_t timeout_usec; /* Timeout in microseconds. UINT32_MAX for an infinite wait */ -}; - -struct fastrpc_ioctl_dspsignal_cancel_wait { - uint32_t signal_id; /* Signal ID */ -}; - /** * Control flags for mapping memory on DSP user process */ @@ -439,44 +262,6 @@ enum fastrpc_map_flags { FASTRPC_MAP_MAX, }; -struct fastrpc_mem_map { - int fd; /* ion fd */ - int offset; /* buffer offset */ - uint32_t flags; /* flags defined in enum fastrpc_map_flags */ - int attrs; /* buffer attributes used for SMMU mapping */ - uintptr_t vaddrin; /* buffer virtual address */ - size_t length; /* buffer length */ - uint64_t vaddrout; /* [out] remote virtual address */ -}; - -/* Map and unmap IOCTL methods reserved memory size for future extensions */ -#define MAP_RESERVED_NUM (14) -#define UNMAP_RESERVED_NUM (10) - -/* map memory to DSP device */ -struct fastrpc_ioctl_mem_map { - int version; /* Initial version 0 */ - union { - struct fastrpc_mem_map m; - int reserved[MAP_RESERVED_NUM]; - }; -}; - -struct fastrpc_mem_unmap { - int fd; /* ion fd */ - uint64_t vaddr; /* remote process (dsp) virtual address */ - size_t length; /* buffer size */ -}; - -/* unmap memory to DSP device */ -struct fastrpc_ioctl_mem_unmap { - int version; /* Initial version 0 */ - union { - struct fastrpc_mem_unmap um; - int reserved[UNMAP_RESERVED_NUM]; - }; -}; - /* * This enum is shared with DSP. So, existing values should NOT * be modified. Only new members can be added. @@ -515,38 +300,6 @@ enum fastrpc_control_type { FASTRPC_CONTROL_NOTIF_WAKE = 9, }; -struct fastrpc_ctrl_latency { - uint32_t enable; /* latency control enable */ - uint32_t latency; /* latency request in us */ -}; - -struct fastrpc_ctrl_kalloc { - uint32_t kalloc_support; /* Remote memory allocation from kernel */ -}; - -struct fastrpc_ctrl_wakelock { - uint32_t enable; /* wakelock control enable */ -}; - -struct fastrpc_ctrl_pm { - uint32_t timeout; /* timeout(in ms) for PM to keep system awake */ -}; - -struct fastrpc_ctrl_smmu { - uint32_t sharedcb; /* Set to SMMU share context bank */ -}; - -struct fastrpc_ioctl_control { - uint32_t req; - union { - struct fastrpc_ctrl_latency lp; - struct fastrpc_ctrl_kalloc kalloc; - struct fastrpc_ctrl_wakelock wp; - struct fastrpc_ctrl_pm pm; - struct fastrpc_ctrl_smmu smmu; - }; -}; - #define FASTRPC_MAX_DSP_ATTRIBUTES (256) #define FASTRPC_MAX_ATTRIBUTES (260) @@ -555,12 +308,6 @@ enum fastrpc_dsp_capability { DMA_HANDLE_REVERSE_RPC_CAP = 129, }; -struct fastrpc_ioctl_capability { - uint32_t domain; - uint32_t attribute_ID; - uint32_t capability; -}; - struct smq_null_invoke { uint64_t ctx; /* invoke caller context */ uint32_t handle; /* handle to invoke */ @@ -1164,26 +911,6 @@ struct fastrpc_file { struct fastrpc_proc_sharedbuf_info sharedbuf_info; }; -union fastrpc_ioctl_param { - struct fastrpc_ioctl_invoke_async inv; - struct fastrpc_ioctl_mem_map mem_map; - struct fastrpc_ioctl_mem_unmap mem_unmap; - struct fastrpc_ioctl_mmap mmap; - struct fastrpc_ioctl_mmap_64 mmap64; - struct fastrpc_ioctl_munmap munmap; - struct fastrpc_ioctl_munmap_64 munmap64; - struct fastrpc_ioctl_munmap_fd munmap_fd; - struct fastrpc_ioctl_init_attrs init; - struct fastrpc_ioctl_control cp; - struct fastrpc_ioctl_capability cap; - struct fastrpc_ioctl_invoke2 inv2; - struct fastrpc_ioctl_dspsignal_signal sig; - struct fastrpc_ioctl_dspsignal_wait wait; - struct fastrpc_ioctl_dspsignal_create cre; - struct fastrpc_ioctl_dspsignal_destroy des; - struct fastrpc_ioctl_dspsignal_cancel_wait canc; -}; - int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, uint32_t kernel, struct fastrpc_ioctl_invoke_async *inv); diff --git a/include/uapi/fastrpc_shared.h b/include/uapi/fastrpc_shared.h index 4c0fd38bec..e8f9a0da80 100644 --- a/include/uapi/fastrpc_shared.h +++ b/include/uapi/fastrpc_shared.h @@ -7,7 +7,6 @@ #define FASTRPC_IOCTL_H #include -#include #define remote_arg_t union remote_arg /* Map and unmap IOCTL methods reserved memory size for future extensions */ From dabc96ecf3afbf9ae9267e364756168e7a5fc381 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Thu, 1 Jun 2023 10:52:04 +0530 Subject: [PATCH 064/146] Expose NSP status via sys fs node NSP device status is exposed via SOC API. On cat to this sys fs node, NSP status will be returned. NSP status flag renamed to maintain backward compatibility. Change-Id: I67ae19e51fd58e02d78a40b30563f147bef5b20e Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 55 ++++++++++++++++++-------------------------- dsp/adsprpc_shared.h | 4 ++-- 2 files changed, 24 insertions(+), 35 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 4158910824..3f8603c4f0 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -55,6 +55,7 @@ #include #include #include +#include #ifdef CONFIG_HIBERNATION #include @@ -2949,34 +2950,22 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, return err; } -/* - * Reads the property string from device node - * and updates the cdsp device avialbility status - * if the node belongs to cdsp device. +/* fastrpc_get_nsp_status() - Reads the property string from soc_info + * denoted for nsp part, and updates the nsp device avialbility status + * if the nsp is not defective. * @me : pointer to fastrpc_apps. */ -static void fastrpc_get_dsp_status(struct fastrpc_apps *me) +static void fastrpc_get_nsp_status(struct fastrpc_apps *me) { - int ret = -1; - struct device_node *node = NULL; - const char *name = NULL; - - do { - node = of_find_compatible_node(node, NULL, "qcom,pil-tz-generic"); - if (node) { - ret = of_property_read_string(node, "qcom,firmware-name", &name); - if (!strcmp(name, "cdsp")) { - ret = of_device_is_available(node); - me->remote_cdsp_status = ret; - ADSPRPC_INFO("cdsp node found with ret:%x\n", ret); - break; - } - } else { - ADSPRPC_ERR("cdsp node not found\n"); - break; - } - } while (1); + if (socinfo_get_part_info(PART_NSP)) { + me->fastrpc_nsp_status = 0; + ADSPRPC_ERR( + "nsp part defective with status:%x\n", me->fastrpc_nsp_status); + } else { + me->fastrpc_nsp_status = 1; + ADSPRPC_INFO("nsp available with status: %x\n", me->fastrpc_nsp_status); + } } /* @@ -8069,15 +8058,15 @@ bail: } /* - * remote_cdsp_status_show - Updates the buffer with remote cdsp status - * by reading the fastrpc node. + * fastrpc_nsp_status_show() - Updates the buffer with remote nsp status + * by reading the fastrpc node. * @dev : pointer to device node. * @attr: pointer to device attribute. - * @buf : Output parameter to be updated with remote cdsp status. + * @buf : Output parameter to be updated with remote nsp status. * Return : bytes written to buffer. */ -static ssize_t remote_cdsp_status_show(struct device *dev, +static ssize_t fastrpc_nsp_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fastrpc_apps *me = &gfa; @@ -8094,15 +8083,15 @@ static ssize_t remote_cdsp_status_show(struct device *dev, } return scnprintf(buf, PAGE_SIZE, "%d", - me->remote_cdsp_status); + me->fastrpc_nsp_status); } -/* Remote cdsp status attribute declaration as read only */ -static DEVICE_ATTR_RO(remote_cdsp_status); +/* Remote nsp status attribute declaration as read only */ +static DEVICE_ATTR_RO(fastrpc_nsp_status); /* Declaring attribute for remote dsp */ static struct attribute *msm_remote_dsp_attrs[] = { - &dev_attr_remote_cdsp_status.attr, + &dev_attr_fastrpc_nsp_status.attr, NULL }; @@ -8674,7 +8663,7 @@ static int __init fastrpc_device_init(void) } memset(me, 0, sizeof(*me)); fastrpc_init(me); - fastrpc_get_dsp_status(me); + fastrpc_get_nsp_status(me); me->dev = NULL; me->legacy_remote_heap = false; err = bus_register(&fastrpc_bus_type); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index ae5d4f670b..5ac5ceed11 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -738,8 +738,8 @@ struct fastrpc_apps { struct hlist_head frpc_devices; struct hlist_head frpc_drivers; struct mutex mut_uid; - /* Indicates cdsp device status */ - int remote_cdsp_status; + /* Indicates nsp status */ + int fastrpc_nsp_status; /* Indicates secure context bank to be shared */ int share_securecb; /* Indicates process type is configured for SMMU context bank */ From 00d699d6cb41cb5e175d7ba3c0e3b19cce128aef Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Thu, 15 Jun 2023 22:45:55 -0700 Subject: [PATCH 065/146] Add LE compilation fix for fastrpc_shared.h Compilation was failing with new header on LE HY11 build. Adding compile time flags to fix the issue. Change-Id: Ia51e54f6b18e4d7195f47294b88f39ed2d52c4e8 Signed-off-by: Anirudh Raghavendra --- dsp/adsprpc_shared.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 5ac5ceed11..cb48e2e87b 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -8,7 +8,12 @@ #include #include + +#ifdef CONFIG_MSM_ADSPRPC_TRUSTED +#include "../include/uapi/fastrpc_shared.h" +#else #include "fastrpc_shared.h" +#endif #define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp" #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp" From 6e70c954d5a10b425e7eb1e58c539a06e614e228 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Fri, 16 Jun 2023 12:09:50 -0700 Subject: [PATCH 066/146] msm: adsprpc: Support multiple sessions per process Currently a process is limited to create only 2 sessions, by toggling the 30th bit of tgid of the process, to create different process IDs on DSP remote sybsystem. This approach is not scalable to create unique process IDs to DSP, by using bits within the tgid of the process. Add support to allow a process to create multiple sessions by choosing and sending unique dsp process IDs on DSP remote sub system, instead of tgid of HLOS process. Change-Id: I33f52c68453301bdbb83dfb9a10df16143098a49 Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 151 ++++++++++++++++++++++++++++++------------- dsp/adsprpc_shared.h | 9 +++ 2 files changed, 115 insertions(+), 45 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 3f8603c4f0..199c41731d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -106,9 +106,6 @@ #define M_KERNEL_PERF_LIST (PERF_KEY_MAX) #define M_DSP_PERF_LIST (12) -#define SESSION_ID_INDEX (30) -#define SESSION_ID_MASK (1 << SESSION_ID_INDEX) -#define PROCESS_ID_MASK ((2^SESSION_ID_INDEX) - 1) #define FASTRPC_CTX_MAGIC (0xbeeddeed) /* Process status notifications from DSP will be sent with this unique context */ @@ -203,6 +200,9 @@ /* Max no. of persistent headers pre-allocated per process */ #define MAX_PERSISTENT_HEADERS (25) +/* Max value of unique fastrpc tgid */ +#define MAX_FRPC_TGID 65 + #define PERF_CAPABILITY_SUPPORT (1 << 1) #define KERNEL_ERROR_CODE_V1_SUPPORT 1 #define USERSPACE_ALLOCATION_SUPPORT 1 @@ -229,6 +229,9 @@ /* Unique index flag used for mini dump */ static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; +/* Array to keep track unique tgid_frpc usage */ +static bool frpc_tgid_usage_array[NUM_CHANNELS][MAX_FRPC_TGID] = {0}; + /* Fastrpc remote process attributes */ enum fastrpc_proc_attr { /* Macro for Debug attr */ @@ -1836,6 +1839,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, } ctx->retval = -1; ctx->pid = current->pid; + /* Store HLOS PID in context, it is not being sent to DSP */ ctx->tgid = fl->tgid; init_completion(&ctx->work); ctx->magic = FASTRPC_CTX_MAGIC; @@ -1854,6 +1858,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, goto bail; } memset(ctx->perf, 0, sizeof(*(ctx->perf))); + /* Use HLOS PID, as perf tid is not being sent to DSP and is used to log in traces */ ctx->perf->tid = fl->tgid; } if (invokefd->job) { @@ -2033,13 +2038,11 @@ static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif struct fastrpc_file *fl = NULL; struct hlist_node *n; bool is_process_found = false; - int sessionid = 0; unsigned long irq_flags = 0; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { - if (fl->tgid == notif->pid || - (fl->tgid == (notif->pid & PROCESS_ID_MASK))) { + if (fl->tgid_frpc == notif->pid) { is_process_found = true; break; } @@ -2048,9 +2051,7 @@ static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif if (!is_process_found) return; - if (notif->pid & SESSION_ID_MASK) - sessionid = 1; - fastrpc_queue_pd_status(fl, domain, notif->status, sessionid); + fastrpc_queue_pd_status(fl, domain, notif->status, fl->sessionid); } static void context_notify_user(struct smq_invoke_ctx *ctx, @@ -2896,10 +2897,9 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, channel_ctx = &fl->apps->channel[cid]; mutex_lock(&channel_ctx->smd_mutex); - msg->pid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + msg->pid = fl->tgid_frpc; msg->tid = current->pid; - if (fl->sessionid) - msg->tid |= SESSION_ID_MASK; if (kernel == KERNEL_MSG_WITH_ZERO_PID) msg->pid = 0; msg->invoke.header.ctx = ctx->ctxid | fl->pd; @@ -2997,6 +2997,7 @@ static void fastrpc_init(struct fastrpc_apps *me) spin_lock_init(&me->hlock); me->channel = &gcinfo[0]; mutex_init(&me->mut_uid); + me->max_sess_per_proc = DEFAULT_MAX_SESS_PER_PROC; for (i = 0; i < NUM_CHANNELS; i++) { init_completion(&me->channel[i].work); init_completion(&me->channel[i].workport); @@ -3576,6 +3577,16 @@ static int fastrpc_set_session_info( // Processes attaching to Sensor Static PD, share context bank. if (sess_info->pd_type == SENSORS_STATICPD) fl->sharedcb = 1; + if (sess_info->session_id >= me->max_sess_per_proc) { + ADSPRPC_ERR( + "Session ID %u cannot be beyond %u\n", + sess_info->session_id, me->max_sess_per_proc); + err = -EBADR; + goto bail; + } + fl->sessionid = sess_info->session_id; + // Set multi_session_support, to disable old way of setting session_id + fl->multi_session_support = true; VERIFY(err, 0 == (err = fastrpc_get_info(fl, &(sess_info->domain_id)))); if (err) goto bail; @@ -3826,7 +3837,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked); static int fastrpc_init_attach_process(struct fastrpc_file *fl, struct fastrpc_ioctl_init *init) { - int err = 0, tgid = fl->tgid; + int err = 0, tgid = fl->tgid_frpc; remote_arg_t ra[1]; struct fastrpc_ioctl_invoke_async ioctl; @@ -3839,6 +3850,7 @@ static int fastrpc_init_attach_process(struct fastrpc_file *fl, /* * Prepare remote arguments for creating thread group * in guestOS/staticPD on the remote subsystem. + * Send unique fastrpc id to dsp */ ra[0].buf.pv = (void *)&tgid; ra[0].buf.len = sizeof(tgid); @@ -3922,7 +3934,7 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, spin_unlock(&fl->hlock); - inbuf.pgid = fl->tgid; + inbuf.pgid = fl->tgid_frpc; inbuf.namelen = strlen(current->comm) + 1; inbuf.filelen = init->filelen; fl->pd = FASTRPC_USER_PD; @@ -4183,7 +4195,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, } fl->pd = FASTRPC_USER_PD; - inbuf.pgid = fl->tgid; + inbuf.pgid = fl->tgid_frpc; inbuf.namelen = init->filelen; inbuf.pageslen = 0; @@ -4623,7 +4635,8 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) err = -ECONNRESET; goto bail; } - tgid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + tgid = fl->tgid_frpc; ra[0].buf.pv = (void *)&tgid; ra[0].buf.len = sizeof(tgid); ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP; @@ -4682,7 +4695,8 @@ static int fastrpc_mem_map_to_dsp(struct fastrpc_file *fl, int fd, int offset, uint64_t vaddrout; } routargs; - inargs.pid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + inargs.pid = fl->tgid_frpc; inargs.fd = fd; inargs.offset = offset; inargs.vaddrin = (uintptr_t)va; @@ -4733,7 +4747,8 @@ static int fastrpc_mem_unmap_to_dsp(struct fastrpc_file *fl, int fd, uint64_t len; } inargs; - inargs.pid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + inargs.pid = fl->tgid_frpc; inargs.fd = fd; inargs.vaddrin = (uint64_t)va; inargs.len = (uint64_t)size; @@ -4769,7 +4784,8 @@ static int fastrpc_unmap_on_dsp(struct fastrpc_file *fl, size_t size; } inargs; - inargs.pid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + inargs.pid = fl->tgid_frpc; inargs.size = size; inargs.vaddrout = raddr; ra[0].buf.pv = (void *)&inargs; @@ -4821,7 +4837,8 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, goto bail; } cid = fl->cid; - inargs.pid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + inargs.pid = fl->tgid_frpc; inargs.vaddrin = (uintptr_t)va; inargs.flags = flags; inargs.num = fl->apps->compat ? num * sizeof(page) : num; @@ -4926,7 +4943,8 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, goto bail; } - tgid = fl->tgid; + /* Send unique fastrpc process ID to dsp */ + tgid = fl->tgid_frpc; ra[0].buf.pv = (void *)&tgid; ra[0].buf.len = sizeof(tgid); ra[1].buf.pv = (void *)&routargs; @@ -5566,7 +5584,8 @@ static void handle_remote_signal(uint64_t msg, int cid) spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { - if ((fl->tgid == pid) && (fl->cid == cid)) { + /* Response from DSP contains unique fastrpc process id, use unique fastrpc process ID to compare */ + if ((fl->tgid_frpc == pid) && (fl->cid == cid)) { unsigned long fflags = 0; spin_lock_irqsave(&fl->dspsignals_lock, fflags); @@ -5797,6 +5816,10 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; + VERIFY(err, VALID_FASTRPC_CID(cid)); + /* Reset the tgid usage to false */ + if (!err) + frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6247,6 +6270,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->apps = me; fl->mode = FASTRPC_MODE_SERIAL; fl->cid = -1; + fl->tgid_frpc = -1; fl->tvm_remote_domain = -1; fl->dev_minor = dev_minor; fl->init_mem = NULL; @@ -6258,6 +6282,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->is_compat = false; fl->exit_notif = false; fl->exit_async = false; + fl->multi_session_support = false; init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; @@ -6306,6 +6331,25 @@ bail: return err; } +// Generate a unique process ID to DSP process +static int get_unique_hlos_process_id(uint32_t cid) +{ + int tgid_frpc = -1, tgid_index = 1; + struct fastrpc_apps *me = &gfa; + + spin_lock(&me->hlock); + for (tgid_index = 1; tgid_index < MAX_FRPC_TGID; tgid_index++) { + if (!frpc_tgid_usage_array[cid][tgid_index]) { + tgid_frpc = tgid_index; + /* Set the tgid usage to false */ + frpc_tgid_usage_array[cid][tgid_index] = true; + break; + } + } + spin_unlock(&me->hlock); + return tgid_frpc; +} + static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) { int err = 0, buf_size = 0; @@ -6315,6 +6359,13 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) memcpy(cur_comm, current->comm, TASK_COMM_LEN); cur_comm[TASK_COMM_LEN-1] = '\0'; fl->tgid = current->tgid; + fl->tgid_frpc = get_unique_hlos_process_id(cid); + VERIFY(err, fl->tgid_frpc != -1); + if (err) { + ADSPRPC_ERR("too many fastrpc clients, max %u allowed\n", MAX_FRPC_TGID); + err = -EUSERS; + goto bail; + } /* * Third-party apps don't have permission to open the fastrpc device, so @@ -6347,8 +6398,11 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) err = -ENOMEM; return err; } - snprintf(fl->debug_buf, buf_size, "%.10s%s%d%s%d", - cur_comm, "_", current->pid, "_", cid); + /* Use HLOS PID, unique fastrpc PID, CID in debugfs filename, + * for better ability to debug + */ + snprintf(fl->debug_buf, buf_size, "%.10s%s%d%s%d%s%d", + cur_comm, "_", current->pid, "_", fl->tgid_frpc, "_", cid); fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644, debugfs_root, fl, &debugfs_fops); if (IS_ERR_OR_NULL(fl->debugfs_file)) { @@ -6374,6 +6428,11 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) err = -EBADF; goto bail; } + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } fastrpc_get_process_gids(&fl->gidlist); err = fastrpc_set_process_info(fl, cid); @@ -6383,11 +6442,6 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) if (fl->cid == -1) { struct fastrpc_channel_ctx *chan = NULL; - VERIFY(err, cid < NUM_CHANNELS); - if (err) { - err = -ECHRNG; - goto bail; - } chan = &me->channel[cid]; /* Check to see if the device node is non-secure */ if (fl->dev_minor == MINOR_NUM_DEV) { @@ -6468,7 +6522,6 @@ int fastrpc_internal_control(struct fastrpc_file *fl, int err = 0; unsigned int latency; struct fastrpc_apps *me = &gfa; - int sessionid = 0; unsigned int cpu; unsigned long flags = 0; @@ -6564,9 +6617,7 @@ int fastrpc_internal_control(struct fastrpc_file *fl, break; case FASTRPC_CONTROL_DSPPROCESS_CLEAN: (void)fastrpc_release_current_dsp_process(fl); - if (fl->tgid & SESSION_ID_MASK) - sessionid = 1; - fastrpc_queue_pd_status(fl, fl->cid, FASTRPC_USER_PD_FORCE_KILL, sessionid); + fastrpc_queue_pd_status(fl, fl->cid, FASTRPC_USER_PD_FORCE_KILL, fl->sessionid); break; case FASTRPC_CONTROL_RPC_POLL: err = fastrpc_manage_poll_mode(fl, cp->lp.enable, cp->lp.latency); @@ -6648,8 +6699,8 @@ int fastrpc_setmode(unsigned long ioctl_param, "multiple sessions not allowed for untrusted apps\n"); goto bail; } - fl->sessionid = 1; - fl->tgid |= SESSION_ID_MASK; + if (!fl->multi_session_support) + fl->sessionid = 1; break; default: err = -ENOTTY; @@ -6720,8 +6771,9 @@ int fastrpc_dspsignal_signal(struct fastrpc_file *fl, // track outgoing signals in the driver. The userspace library does a // basic sanity check and any security validation needs to be done by // the recipient. - DSPSIGNAL_VERBOSE("Send signal PID %u, signal %u\n", - (unsigned int)fl->tgid, (unsigned int)sig->signal_id); + DSPSIGNAL_VERBOSE("Send signal PID %u, unique fastrpc pid %u signal %u\n", + (unsigned int)fl->tgid, (unsigned int)fl->tgid_frpc, + (unsigned int)sig->signal_id); VERIFY(err, sig->signal_id < DSPSIGNAL_NUM_SIGNALS); if (err) { ADSPRPC_ERR("Sending bad signal %u for PID %u", @@ -6745,7 +6797,8 @@ int fastrpc_dspsignal_signal(struct fastrpc_file *fl, goto bail; } - msg = (((uint64_t)fl->tgid) << 32) | ((uint64_t)sig->signal_id); + /* Use unique fastrpc pid, to signal DSP process */ + msg = (((uint64_t)fl->tgid_frpc) << 32) | ((uint64_t)sig->signal_id); err = fastrpc_transport_send(cid, (void *)&msg, sizeof(msg), fl->tvm_remote_domain); mutex_unlock(&channel_ctx->smd_mutex); @@ -7793,10 +7846,10 @@ static int fastrpc_cb_probe(struct device *dev) if (of_get_property(dev->of_node, "pd-type", NULL) != NULL) { err = of_property_read_u32(dev->of_node, "pd-type", &(sess->smmu.pd_type)); + /* Set cb_pd_type, if the process type is set for context banks */ + me->cb_pd_type = true; if (err) goto bail; - // Set cb_pd_type, if the process type is configured for context banks - me->cb_pd_type = true; } if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { sess->smmu.sharedcb = 1; @@ -8131,6 +8184,8 @@ static int fastrpc_probe(struct platform_device *pdev) me->lowest_capacity_core_count = 1; of_property_read_u32(dev->of_node, "qcom,rpc-latency-us", &me->latency); + of_property_read_u32(dev->of_node, "qcom,max-sessions", + &me->max_sess_per_proc); if (of_get_property(dev->of_node, "qcom,secure-domains", NULL) != NULL) { VERIFY(err, !of_property_read_u32(dev->of_node, @@ -8564,17 +8619,23 @@ static int fastrpc_device_create(struct fastrpc_file *fl) frpc_dev->dev.parent = &fastrpc_bus; frpc_dev->dev.bus = &fastrpc_bus_type; - dev_set_name(&frpc_dev->dev, "%s-%d-%d", - dev_name(frpc_dev->dev.parent), fl->tgid, fl->cid); + /* Use HLOS PID, unique fastrpc process ID and CID to create device file, + * Else names would conflict for multiple sessions + * And also for better ability to debug + */ + dev_set_name(&frpc_dev->dev, "%s-%d-%d-%d", + dev_name(frpc_dev->dev.parent), fl->tgid, fl->tgid_frpc, fl->cid); frpc_dev->dev.release = fastrpc_dev_release; frpc_dev->fl = fl; - frpc_dev->handle = fl->tgid; + /* Use unique fastrpc tgid as handle */ + frpc_dev->handle = fl->tgid_frpc; err = device_register(&frpc_dev->dev); if (err) { put_device(&frpc_dev->dev); - ADSPRPC_ERR("fastrpc device register failed for process %d with error %d\n", - fl->tgid, err); + ADSPRPC_ERR( + "fastrpc device register failed for process %d unique fastrpc tgid %d session %d with error %d\n", + fl->tgid, fl->tgid_frpc, fl->sessionid, err); goto bail; } fl->device = frpc_dev; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index cb48e2e87b..3ee534c460 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -88,6 +88,9 @@ #define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/ #define NUM_SESSIONS 14 /* max 11 compute, 3 cpz */ +/* Default maximum sessions allowed per process */ +#define DEFAULT_MAX_SESS_PER_PROC 4 + #define VALID_FASTRPC_CID(cid) \ (cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS) @@ -753,6 +756,8 @@ struct fastrpc_apps { unsigned int lowest_capacity_core_count; /* Flag to check if PM QoS vote needs to be done for only one core */ bool single_core_latency_vote; + /* Maximum sessions allowed to be created per process */ + uint32_t max_sess_per_proc; }; struct fastrpc_mmap { @@ -846,6 +851,8 @@ struct fastrpc_file { int sessionid; int tgid_open; /* Process ID during device open */ int tgid; /* Process ID that uses device for RPC calls */ + /* Unique HLOS process ID created by fastrpc for each client */ + int tgid_frpc; int cid; int tvm_remote_domain; uint64_t ssrcount; @@ -914,6 +921,8 @@ struct fastrpc_file { * config paramters. */ struct fastrpc_proc_sharedbuf_info sharedbuf_info; + /* Flag to indicate 4 session support available */ + bool multi_session_support; }; int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, From bed0667a56fafecc73cd89b19f6b9151a9bdd8c2 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Thu, 15 Jun 2023 11:45:09 +0530 Subject: [PATCH 067/146] msm: adsprpc: Handle SSR error code after invoke send Currently, on SSR notification connection reset error is returned even if invoke send completes successfully. So, if there is invoke call for PD spawn, init memory gets freed after receiving the error. Currently, if init memory is accessed on DSP, SMMU fault occurs. If internal invoke is successfully completed, return the connection reset error only for non-kernel handles. Change-Id: I2f35a22562fac14a410815df6121cb1df80982a4 Acked-by: Abhishek Singh Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 199c41731d..8b19427c88 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3364,9 +3364,11 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, context_free(ctx); trace_fastrpc_msg("context_free: end"); } - if (VALID_FASTRPC_CID(cid) - && (fl->ssrcount != fl->apps->channel[cid].ssrcount)) - err = -ECONNRESET; + if (!kernel) { + if (VALID_FASTRPC_CID(cid) + && (fl->ssrcount != fl->apps->channel[cid].ssrcount)) + err = -ECONNRESET; + } invoke_end: if (fl->profile && !interrupted && isasyncinvoke) From f3bb535dca1bb40cf7bfac594979952bee9e5c0d Mon Sep 17 00:00:00 2001 From: Santosh Date: Tue, 4 Jul 2023 15:49:05 +0530 Subject: [PATCH 068/146] msm: Add blair module support Add blair module to support blair target change-Id: I7d8e02539913116843e50a32a97dbb436bba1937 --- BUILD.bazel | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/BUILD.bazel b/BUILD.bazel index bce67067be..aab077fba2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -13,3 +13,7 @@ load(":define_modules.bzl", "define_modules") define_modules("pineapple", "consolidate") define_modules("pineapple", "gki") + +define_modules("blair", "consolidate") + +define_modules("blair", "gki") \ No newline at end of file From c16622ebe5fc5dc8095b62a71023a9c6464be817 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Thu, 6 Jul 2023 14:28:49 -0700 Subject: [PATCH 069/146] msm: adsprpc: Fail set session info on subsequent calls Currently there is no check if set session info is invoked multiple times. Multiple calls to session info leaks memory and process identifiers. Fail set session info on subsequent calls. Change-Id: I2072add44ce2a9c4c4ff544f8e568eeccc1220cf Signed-off-by: Himateja Reddy (cherry picked from commit 1a5889127e7b0ff6974e50d762708bc2ef2d3a6c) --- dsp/adsprpc.c | 19 +++++++++++++++++-- dsp/adsprpc_shared.h | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 8b19427c88..306e589f0d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3547,6 +3547,11 @@ static int fastrpc_set_session_info( int err = 0; struct fastrpc_apps *me = &gfa; + if (fl->set_session_info) { + ADSPRPC_ERR("Set session info invoked multiple times\n"); + err = -EBADR; + goto bail; + } /* * Third-party apps don't have permission to open the fastrpc device, so * it is opened on their behalf by DSP HAL. This is detected by @@ -5818,9 +5823,8 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; - VERIFY(err, VALID_FASTRPC_CID(cid)); /* Reset the tgid usage to false */ - if (!err) + if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6285,6 +6289,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; fl->multi_session_support = false; + fl->set_session_info = false; init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; @@ -6430,6 +6435,16 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) err = -EBADF; goto bail; } + spin_lock(&fl->hlock); + if (fl->set_session_info) { + spin_unlock(&fl->hlock); + ADSPRPC_ERR("Set session info invoked multiple times\n"); + err = -EBADR; + goto bail; + } + // Set set_session_info to true + fl->set_session_info = true; + spin_unlock(&fl->hlock); VERIFY(err, VALID_FASTRPC_CID(cid)); if (err) { err = -ECHRNG; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 3ee534c460..b682318966 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -923,6 +923,8 @@ struct fastrpc_file { struct fastrpc_proc_sharedbuf_info sharedbuf_info; /* Flag to indicate 4 session support available */ bool multi_session_support; + /* Flag to indicate session info is set */ + bool set_session_info; }; int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, From b7614deec2e5ffb0eaa0bfcacbeaf36582dbfca2 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Thu, 6 Jul 2023 14:28:49 -0700 Subject: [PATCH 070/146] msm: adsprpc: Fail set session info on subsequent calls Currently there is no check if set session info is invoked multiple times. Multiple calls to session info leaks memory and process identifiers. Fail set session info on subsequent calls. Signed-off-by: Himateja Reddy (cherry picked from commit 1a5889127e7b0ff6974e50d762708bc2ef2d3a6c) Signed-off-by: Linux Image Build Automation Change-Id: I3b281a6892d8ab1cc1adbb5b9296485ed2738050 --- dsp/adsprpc.c | 19 +++++++++++++++++-- dsp/adsprpc_shared.h | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 8b19427c88..306e589f0d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3547,6 +3547,11 @@ static int fastrpc_set_session_info( int err = 0; struct fastrpc_apps *me = &gfa; + if (fl->set_session_info) { + ADSPRPC_ERR("Set session info invoked multiple times\n"); + err = -EBADR; + goto bail; + } /* * Third-party apps don't have permission to open the fastrpc device, so * it is opened on their behalf by DSP HAL. This is detected by @@ -5818,9 +5823,8 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; - VERIFY(err, VALID_FASTRPC_CID(cid)); /* Reset the tgid usage to false */ - if (!err) + if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6285,6 +6289,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; fl->multi_session_support = false; + fl->set_session_info = false; init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; @@ -6430,6 +6435,16 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) err = -EBADF; goto bail; } + spin_lock(&fl->hlock); + if (fl->set_session_info) { + spin_unlock(&fl->hlock); + ADSPRPC_ERR("Set session info invoked multiple times\n"); + err = -EBADR; + goto bail; + } + // Set set_session_info to true + fl->set_session_info = true; + spin_unlock(&fl->hlock); VERIFY(err, VALID_FASTRPC_CID(cid)); if (err) { err = -ECHRNG; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 3ee534c460..b682318966 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -923,6 +923,8 @@ struct fastrpc_file { struct fastrpc_proc_sharedbuf_info sharedbuf_info; /* Flag to indicate 4 session support available */ bool multi_session_support; + /* Flag to indicate session info is set */ + bool set_session_info; }; int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, From ac57e6d9390e20d00e98511f9917934a92f98027 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Fri, 7 Jul 2023 11:35:29 +0530 Subject: [PATCH 071/146] msm: adsprpc: Modify global variable spinlocks to avoid spinlock recursion Spinlock in current scenario can be interrupted thus during ongoing ISR. If callback received from dsp, attempt to acquire same lock again will result into recursive spinlock with wait on queue to acquire lock again. Modify spinlocks with global variable gfa to non interruptible spinlocks in order to avoid this scenario. Change-Id: I5ae4864370d94ae0e0e19d3d4939ada41d609234 Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 306e589f0d..1536cf263c 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6343,8 +6343,9 @@ static int get_unique_hlos_process_id(uint32_t cid) { int tgid_frpc = -1, tgid_index = 1; struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; - spin_lock(&me->hlock); + spin_lock_irqsave(&me->hlock, irq_flags); for (tgid_index = 1; tgid_index < MAX_FRPC_TGID; tgid_index++) { if (!frpc_tgid_usage_array[cid][tgid_index]) { tgid_frpc = tgid_index; @@ -6353,7 +6354,7 @@ static int get_unique_hlos_process_id(uint32_t cid) break; } } - spin_unlock(&me->hlock); + spin_unlock_irqrestore(&me->hlock, irq_flags); return tgid_frpc; } @@ -7657,6 +7658,7 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, struct hlist_node *n; int cid = -1; struct timespec64 startT = {0}; + unsigned long irq_flags = 0; ctx = container_of(nb, struct fastrpc_channel_ctx, nb); cid = ctx - &me->channel[0]; @@ -7676,13 +7678,13 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, case QCOM_SSR_AFTER_SHUTDOWN: fastrpc_rproc_trace_events(gcinfo[cid].subsys, "QCOM_SSR_AFTER_SHUTDOWN", "fastrpc_restart_notifier-enter"); - spin_lock(&me->hlock); + spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { if (fl->cid != cid) continue; complete(&fl->shutdown); } - spin_unlock(&me->hlock); + spin_unlock_irqrestore(&me->hlock, irq_flags); ctx->subsystemstate = SUBSYSTEM_DOWN; pr_info("adsprpc: %s: received RAMDUMP notification for %s\n", __func__, gcinfo[cid].subsys); From 12a8dd0f1fd1ae34673a9b52beb8fb1670d0c3f2 Mon Sep 17 00:00:00 2001 From: Linux Image Build Automation Date: Tue, 18 Jul 2023 15:53:12 -0700 Subject: [PATCH 072/146] Revert "msm: adsprpc: Fail set session info on subsequent calls" This reverts commit b7614deec2e5ffb0eaa0bfcacbeaf36582dbfca2. Change-Id: I83716eea97831e88af67290342a2214ee53fbe06 Signed-off-by: Linux Image Build Automation --- dsp/adsprpc.c | 19 ++----------------- dsp/adsprpc_shared.h | 2 -- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 306e589f0d..8b19427c88 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3547,11 +3547,6 @@ static int fastrpc_set_session_info( int err = 0; struct fastrpc_apps *me = &gfa; - if (fl->set_session_info) { - ADSPRPC_ERR("Set session info invoked multiple times\n"); - err = -EBADR; - goto bail; - } /* * Third-party apps don't have permission to open the fastrpc device, so * it is opened on their behalf by DSP HAL. This is detected by @@ -5823,8 +5818,9 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; + VERIFY(err, VALID_FASTRPC_CID(cid)); /* Reset the tgid usage to false */ - if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) + if (!err) frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6289,7 +6285,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; fl->multi_session_support = false; - fl->set_session_info = false; init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; @@ -6435,16 +6430,6 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) err = -EBADF; goto bail; } - spin_lock(&fl->hlock); - if (fl->set_session_info) { - spin_unlock(&fl->hlock); - ADSPRPC_ERR("Set session info invoked multiple times\n"); - err = -EBADR; - goto bail; - } - // Set set_session_info to true - fl->set_session_info = true; - spin_unlock(&fl->hlock); VERIFY(err, VALID_FASTRPC_CID(cid)); if (err) { err = -ECHRNG; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index b682318966..3ee534c460 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -923,8 +923,6 @@ struct fastrpc_file { struct fastrpc_proc_sharedbuf_info sharedbuf_info; /* Flag to indicate 4 session support available */ bool multi_session_support; - /* Flag to indicate session info is set */ - bool set_session_info; }; int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, From be29afe6b2a23a4a15e2f4c0c58036463a31e998 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Mon, 17 Jul 2023 17:01:05 -0700 Subject: [PATCH 073/146] msm: adsprpc: Unique handles for different remote sub systems Currently handles are unique only for a particular sub systems, but they are not unique across all remote sub systems. Assign unique handle to each session of the remote sub system. Change-Id: I5cf0e82d87283006e719a3b24ae01a1fcb97c392 Signed-off-by: Himateja Reddy --- dsp/adsprpc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 1536cf263c..50fe7c7c8b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -201,7 +201,7 @@ #define MAX_PERSISTENT_HEADERS (25) /* Max value of unique fastrpc tgid */ -#define MAX_FRPC_TGID 65 +#define MAX_FRPC_TGID 256 #define PERF_CAPABILITY_SUPPORT (1 << 1) #define KERNEL_ERROR_CODE_V1_SUPPORT 1 @@ -230,7 +230,7 @@ static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; /* Array to keep track unique tgid_frpc usage */ -static bool frpc_tgid_usage_array[NUM_CHANNELS][MAX_FRPC_TGID] = {0}; +static bool frpc_tgid_usage_array[MAX_FRPC_TGID] = {0}; /* Fastrpc remote process attributes */ enum fastrpc_proc_attr { @@ -5824,8 +5824,8 @@ skip_dump_wait: fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; /* Reset the tgid usage to false */ - if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) - frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; + if (fl->tgid_frpc != -1) + frpc_tgid_usage_array[fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6339,7 +6339,7 @@ bail: } // Generate a unique process ID to DSP process -static int get_unique_hlos_process_id(uint32_t cid) +static int get_unique_hlos_process_id(void) { int tgid_frpc = -1, tgid_index = 1; struct fastrpc_apps *me = &gfa; @@ -6347,10 +6347,10 @@ static int get_unique_hlos_process_id(uint32_t cid) spin_lock_irqsave(&me->hlock, irq_flags); for (tgid_index = 1; tgid_index < MAX_FRPC_TGID; tgid_index++) { - if (!frpc_tgid_usage_array[cid][tgid_index]) { + if (!frpc_tgid_usage_array[tgid_index]) { tgid_frpc = tgid_index; /* Set the tgid usage to false */ - frpc_tgid_usage_array[cid][tgid_index] = true; + frpc_tgid_usage_array[tgid_index] = true; break; } } @@ -6367,7 +6367,7 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) memcpy(cur_comm, current->comm, TASK_COMM_LEN); cur_comm[TASK_COMM_LEN-1] = '\0'; fl->tgid = current->tgid; - fl->tgid_frpc = get_unique_hlos_process_id(cid); + fl->tgid_frpc = get_unique_hlos_process_id(); VERIFY(err, fl->tgid_frpc != -1); if (err) { ADSPRPC_ERR("too many fastrpc clients, max %u allowed\n", MAX_FRPC_TGID); From 9e2564e979e8d4009ccea244a780434bc3605627 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Thu, 20 Jul 2023 12:11:48 +0530 Subject: [PATCH 074/146] msm: adsprpc: out of bound access for glist_session_ctrl Check proper index before access glist_session_ctrl to fix out of bound access. Change-Id: Id942b0add1bcc99f0c2f8b22ce631a11685ab340 Signed-off-by: nishant chaubey --- dsp/adsprpc_socket.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc_socket.c b/dsp/adsprpc_socket.c index 24fcb2a4d4..02c3551591 100644 --- a/dsp/adsprpc_socket.c +++ b/dsp/adsprpc_socket.c @@ -83,7 +83,7 @@ struct remote_domain_configuration { * glist_session_ctrl * Static list containing socket session information for all remote domains. */ -static struct frpc_transport_session_control *glist_session_ctrl[MAX_DOMAIN_ID][MAX_REMOTE_ID]; +static struct frpc_transport_session_control *glist_session_ctrl[NUM_CHANNELS][MAX_REMOTE_ID]; static const struct remote_domain_configuration configurations[] = { @@ -555,6 +555,16 @@ int fastrpc_transport_init(void) } cid = configurations[ii].channel_id; remote_domain = configurations[ii].remote_domain; + VERIFY(err, remote_domain < MAX_REMOTE_ID); + if (err) { + err = -ECHRNG; + goto bail; + } + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } session_control->remote_server_online = false; frpc_socket = &session_control->frpc_socket; @@ -610,13 +620,23 @@ bail: */ void fastrpc_transport_deinit(void) { - int ii = 0, cid = -1, remote_domain = -1; + int ii = 0, cid = -1, remote_domain = -1, err = 0; struct fastrpc_socket *frpc_socket = NULL; struct frpc_transport_session_control *session_control = NULL; for (ii = 0; ii < ARRAY_SIZE(configurations); ii++) { cid = configurations[ii].channel_id; remote_domain = configurations[ii].remote_domain; + VERIFY(err, remote_domain < MAX_REMOTE_ID); + if (err) { + err = -ECHRNG; + goto bail; + } + VERIFY(err, VALID_FASTRPC_CID(cid)); + if (err) { + err = -ECHRNG; + goto bail; + } session_control = glist_session_ctrl[cid][remote_domain]; if (!session_control) @@ -637,4 +657,7 @@ void fastrpc_transport_deinit(void) kfree(session_control); glist_session_ctrl[cid][remote_domain] = NULL; } +bail: + if (err) + ADSPRPC_ERR("fastrpc_socket_deinit failed with err %d\n", err); } From 9e7b2c72205c8950ade3af78c35efc0b0a896d71 Mon Sep 17 00:00:00 2001 From: DEEPAK SANNAPAREDDY Date: Sat, 22 Jul 2023 12:08:43 +0530 Subject: [PATCH 075/146] msm: adsprpc: correct the invocation of dma_buf_begin/end_cpu_access In case of IO Coherence disabled, to simulate cache clean and invalidate for output buffers, used dma_buf_end_cpu_access(DMA_TO_DEVICE) and dma_buf_begin_cpu_access(DMA_FROM_DEVICE). Change-Id: Id176a26cb740d168a1a28240874434c626e48d75 Signed-off-by: DEEPAK SANNAPAREDDY --- dsp/adsprpc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 50fe7c7c8b..4d0500942c 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2812,9 +2812,9 @@ static void inv_args(struct smq_invoke_ctx *ctx) ctx->overps[i]->mstart)) == map->size) || ctx->overps[i]->do_cmo) { dma_buf_begin_cpu_access(map->buf, - DMA_TO_DEVICE); - dma_buf_end_cpu_access(map->buf, DMA_FROM_DEVICE); + dma_buf_end_cpu_access(map->buf, + DMA_TO_DEVICE); ADSPRPC_DEBUG( "sc 0x%x pv 0x%llx, mend 0x%llx mstart 0x%llx, len %zu size %zu\n", sc, rpra[over].buf.pv, @@ -2848,10 +2848,10 @@ static void inv_args(struct smq_invoke_ctx *ctx) } up_read(¤t->mm->mmap_lock); dma_buf_begin_cpu_access_partial( - map->buf, DMA_TO_DEVICE, offset, + map->buf, DMA_FROM_DEVICE, offset, inv_len); dma_buf_end_cpu_access_partial(map->buf, - DMA_FROM_DEVICE, offset, + DMA_TO_DEVICE, offset, inv_len); ADSPRPC_DEBUG( "sc 0x%x vm_start 0x%llx pv 0x%llx, offset 0x%llx, mend 0x%llx mstart 0x%llx, len %zu size %zu\n", From 15b180fee9d4c1814f55331617d493f469a257b4 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Tue, 20 Jun 2023 11:29:58 +0530 Subject: [PATCH 076/146] msm: adsprpc: Verbose logging in case of dma attachment failures When dma attachment fails during mmap_create, status of HLOS memory is logged with sizes occupied by heap and non heap buffers mapped in fl maps. The purpose of this data is to get a snapshot of memory usage. Change-Id: Ie913702a743a8572d9f68c9b58233d28541167b9 Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 26 ++++++++++++++++++++++++++ dsp/adsprpc_shared.h | 9 +++++++++ 2 files changed, 35 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 4d0500942c..af940a59cd 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1089,6 +1089,14 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) if (!IS_ERR_OR_NULL(map->buf)) dma_buf_put(map->buf); } + if (fl) { + spin_lock(&fl->hlock); + if ((map->flags == ADSP_MMAP_ADD_PAGES) || (map->flags == ADSP_MMAP_ADD_PAGES_LLC)) + fl->mem_snap.heap_bufs_size -= map->size; + else + fl->mem_snap.nonheap_bufs_size -= map->size; + spin_unlock(&fl->hlock); + } bail: if (!map->is_persistent) kfree(map); @@ -1163,6 +1171,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * struct fastrpc_mmap *map = NULL; int err = 0, vmid, sgl_index = 0; struct scatterlist *sgl = NULL; + bool dma_attach_fail = false; + size_t tot_bufs_size = 0; if (!fl) { err = -EBADF; @@ -1227,6 +1237,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * ADSPRPC_ERR( "dma_buf_attach for fd %d for len 0x%zx failed to map buffer on SMMU device %s ret %ld\n", fd, len, dev_name(me->dev), PTR_ERR(map->attach)); + dma_attach_fail = true; err = -EFAULT; goto bail; } @@ -1319,6 +1330,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * "dma_buf_attach for fd %d failed for len 0x%zx to map buffer on SMMU device %s ret %ld\n", fd, len, dev_name(sess->smmu.dev), PTR_ERR(map->attach)); + dma_attach_fail = true; err = -EFAULT; goto bail; } @@ -1398,10 +1410,24 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * } map->len = len; + spin_lock(&fl->hlock); + if ((mflags == ADSP_MMAP_ADD_PAGES) || (mflags == ADSP_MMAP_ADD_PAGES_LLC)) + fl->mem_snap.heap_bufs_size += map->size; + else + fl->mem_snap.nonheap_bufs_size += map->size; + spin_unlock(&fl->hlock); + fastrpc_mmap_add(map); *ppmap = map; bail: + if (dma_attach_fail && fl) { + tot_bufs_size = fl->mem_snap.heap_bufs_size + + fl->mem_snap.nonheap_bufs_size; + ADSPRPC_INFO("Heapbufs size: %zu, non-heapbufs size: %zu, total size: %zu\n", + fl->mem_snap.heap_bufs_size, fl->mem_snap.nonheap_bufs_size, + tot_bufs_size); + } if (map) ktime_get_real_ts64(&map->map_end_time); if (err && map) diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index b682318966..e343680990 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -827,6 +827,13 @@ struct fastrpc_dspsignal { int state; }; +struct memory_snapshot { + /* Total size of heap buffers allocated in userspace */ + size_t heap_bufs_size; + /* Total size of non-heap buffers allocated in userspace */ + size_t nonheap_bufs_size; +}; + struct fastrpc_file { struct hlist_node hn; spinlock_t hlock; @@ -844,6 +851,8 @@ struct fastrpc_file { struct fastrpc_buf *pers_hdr_buf; /* Pre-allocated buffer divided into N chunks */ struct fastrpc_buf *hdr_bufs; + /* Store snapshot of memory occupied by different buffers */ + struct memory_snapshot mem_snap; struct fastrpc_session_ctx *secsctx; uint32_t mode; From e72afbb1efa6d538181fcb5ef5283bea6d466b46 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Thu, 6 Jul 2023 14:28:49 -0700 Subject: [PATCH 077/146] msm: adsprpc: Fail set session info on subsequent calls Currently there is no check if set session info is invoked multiple times. Multiple calls to session info leaks memory and process identifiers. Fail set session info on subsequent calls. Signed-off-by: Himateja Reddy (cherry picked from commit 1a5889127e7b0ff6974e50d762708bc2ef2d3a6c) Signed-off-by: Linux Image Build Automation Change-Id: Id3512263274b86f0534fc5fd45bdf62783859ad5 --- dsp/adsprpc.c | 19 +++++++++++++++++-- dsp/adsprpc_shared.h | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 8b19427c88..306e589f0d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3547,6 +3547,11 @@ static int fastrpc_set_session_info( int err = 0; struct fastrpc_apps *me = &gfa; + if (fl->set_session_info) { + ADSPRPC_ERR("Set session info invoked multiple times\n"); + err = -EBADR; + goto bail; + } /* * Third-party apps don't have permission to open the fastrpc device, so * it is opened on their behalf by DSP HAL. This is detected by @@ -5818,9 +5823,8 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; - VERIFY(err, VALID_FASTRPC_CID(cid)); /* Reset the tgid usage to false */ - if (!err) + if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6285,6 +6289,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; fl->multi_session_support = false; + fl->set_session_info = false; init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; @@ -6430,6 +6435,16 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) err = -EBADF; goto bail; } + spin_lock(&fl->hlock); + if (fl->set_session_info) { + spin_unlock(&fl->hlock); + ADSPRPC_ERR("Set session info invoked multiple times\n"); + err = -EBADR; + goto bail; + } + // Set set_session_info to true + fl->set_session_info = true; + spin_unlock(&fl->hlock); VERIFY(err, VALID_FASTRPC_CID(cid)); if (err) { err = -ECHRNG; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 3ee534c460..b682318966 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -923,6 +923,8 @@ struct fastrpc_file { struct fastrpc_proc_sharedbuf_info sharedbuf_info; /* Flag to indicate 4 session support available */ bool multi_session_support; + /* Flag to indicate session info is set */ + bool set_session_info; }; int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, From c982dc242fb5c2bd51653a513481d662b2818382 Mon Sep 17 00:00:00 2001 From: Himateja Reddy Date: Mon, 17 Jul 2023 17:01:05 -0700 Subject: [PATCH 078/146] msm: adsprpc: Unique handles for different remote sub systems Currently handles are unique only for a particular sub systems, but they are not unique across all remote sub systems. Assign unique handle to each session of the remote sub system. Change-Id: Ie246f80c440d684c8fcb30ad0103da069c82ab6e Signed-off-by: Himateja Reddy Signed-off-by: Linux Image Build Automation --- dsp/adsprpc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 306e589f0d..63bc80b40b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -201,7 +201,7 @@ #define MAX_PERSISTENT_HEADERS (25) /* Max value of unique fastrpc tgid */ -#define MAX_FRPC_TGID 65 +#define MAX_FRPC_TGID 256 #define PERF_CAPABILITY_SUPPORT (1 << 1) #define KERNEL_ERROR_CODE_V1_SUPPORT 1 @@ -230,7 +230,7 @@ static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; /* Array to keep track unique tgid_frpc usage */ -static bool frpc_tgid_usage_array[NUM_CHANNELS][MAX_FRPC_TGID] = {0}; +static bool frpc_tgid_usage_array[MAX_FRPC_TGID] = {0}; /* Fastrpc remote process attributes */ enum fastrpc_proc_attr { @@ -5824,8 +5824,8 @@ skip_dump_wait: fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; /* Reset the tgid usage to false */ - if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) - frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; + if (fl->tgid_frpc != -1) + frpc_tgid_usage_array[fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6339,17 +6339,17 @@ bail: } // Generate a unique process ID to DSP process -static int get_unique_hlos_process_id(uint32_t cid) +static int get_unique_hlos_process_id(void) { int tgid_frpc = -1, tgid_index = 1; struct fastrpc_apps *me = &gfa; spin_lock(&me->hlock); for (tgid_index = 1; tgid_index < MAX_FRPC_TGID; tgid_index++) { - if (!frpc_tgid_usage_array[cid][tgid_index]) { + if (!frpc_tgid_usage_array[tgid_index]) { tgid_frpc = tgid_index; /* Set the tgid usage to false */ - frpc_tgid_usage_array[cid][tgid_index] = true; + frpc_tgid_usage_array[tgid_index] = true; break; } } @@ -6366,7 +6366,7 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) memcpy(cur_comm, current->comm, TASK_COMM_LEN); cur_comm[TASK_COMM_LEN-1] = '\0'; fl->tgid = current->tgid; - fl->tgid_frpc = get_unique_hlos_process_id(cid); + fl->tgid_frpc = get_unique_hlos_process_id(); VERIFY(err, fl->tgid_frpc != -1); if (err) { ADSPRPC_ERR("too many fastrpc clients, max %u allowed\n", MAX_FRPC_TGID); From cccaae9d74b546591e67d175b12a135a685e3de9 Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Fri, 28 Jul 2023 18:15:38 +0530 Subject: [PATCH 079/146] msm: adsprpc: Return fail when hyp assign failed. Currently the error code from hype assign failure is over writing by fastrpc_unmap_on_dsp success and returning the false success. So added separate variable to capture the error from fastrpc_unmap_on_dsp. Change-Id: I6444635925416d8ef96800a02e8a1e3e550fa011 Acked-by: Ramesh Nallagopu Signed-off-by: Santosh Sakore --- dsp/adsprpc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index af940a59cd..61c5676e04 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4931,16 +4931,18 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, &src_perms, dst_perms, rhvm->vmcount); kfree(dst_perms); if (err) { + int unmap_err = 0; + ADSPRPC_ERR( "rh hyp assign failed with %d for phys 0x%llx, size %zu\n", err, phys, size); err = -EADDRNOTAVAIL; - err = fastrpc_unmap_on_dsp(fl, + unmap_err = fastrpc_unmap_on_dsp(fl, *raddr, phys, size, flags); - if (err) { + if (unmap_err) { ADSPRPC_ERR( "failed to unmap %d for phys 0x%llx, size %zd\n", - err, phys, size); + unmap_err, phys, size); } goto bail; } From 079b05c23e2ccb6157a2dff7607514a562373cda Mon Sep 17 00:00:00 2001 From: Linux Image Build Automation Date: Mon, 31 Jul 2023 13:01:59 -0700 Subject: [PATCH 080/146] Revert "msm: adsprpc: Unique handles for different remote sub systems" This reverts commit c982dc242fb5c2bd51653a513481d662b2818382. Change-Id: I453a3f993b31699cf906d400fdb70590fa6b8367 Signed-off-by: Linux Image Build Automation --- dsp/adsprpc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 63bc80b40b..306e589f0d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -201,7 +201,7 @@ #define MAX_PERSISTENT_HEADERS (25) /* Max value of unique fastrpc tgid */ -#define MAX_FRPC_TGID 256 +#define MAX_FRPC_TGID 65 #define PERF_CAPABILITY_SUPPORT (1 << 1) #define KERNEL_ERROR_CODE_V1_SUPPORT 1 @@ -230,7 +230,7 @@ static int md_unique_index_flag[MAX_UNIQUE_ID] = { 0, 0, 0, 0, 0 }; /* Array to keep track unique tgid_frpc usage */ -static bool frpc_tgid_usage_array[MAX_FRPC_TGID] = {0}; +static bool frpc_tgid_usage_array[NUM_CHANNELS][MAX_FRPC_TGID] = {0}; /* Fastrpc remote process attributes */ enum fastrpc_proc_attr { @@ -5824,8 +5824,8 @@ skip_dump_wait: fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; /* Reset the tgid usage to false */ - if (fl->tgid_frpc != -1) - frpc_tgid_usage_array[fl->tgid_frpc] = false; + if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) + frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6339,17 +6339,17 @@ bail: } // Generate a unique process ID to DSP process -static int get_unique_hlos_process_id(void) +static int get_unique_hlos_process_id(uint32_t cid) { int tgid_frpc = -1, tgid_index = 1; struct fastrpc_apps *me = &gfa; spin_lock(&me->hlock); for (tgid_index = 1; tgid_index < MAX_FRPC_TGID; tgid_index++) { - if (!frpc_tgid_usage_array[tgid_index]) { + if (!frpc_tgid_usage_array[cid][tgid_index]) { tgid_frpc = tgid_index; /* Set the tgid usage to false */ - frpc_tgid_usage_array[tgid_index] = true; + frpc_tgid_usage_array[cid][tgid_index] = true; break; } } @@ -6366,7 +6366,7 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) memcpy(cur_comm, current->comm, TASK_COMM_LEN); cur_comm[TASK_COMM_LEN-1] = '\0'; fl->tgid = current->tgid; - fl->tgid_frpc = get_unique_hlos_process_id(); + fl->tgid_frpc = get_unique_hlos_process_id(cid); VERIFY(err, fl->tgid_frpc != -1); if (err) { ADSPRPC_ERR("too many fastrpc clients, max %u allowed\n", MAX_FRPC_TGID); From 32d85c1e8314ec292554d18f78d64085b5e8539b Mon Sep 17 00:00:00 2001 From: Linux Image Build Automation Date: Mon, 31 Jul 2023 13:05:30 -0700 Subject: [PATCH 081/146] Revert "msm: adsprpc: Fail set session info on subsequent calls" This reverts commit e72afbb1efa6d538181fcb5ef5283bea6d466b46. Change-Id: Ie0b1e84d2306456ae66c41a811f3a43b1ff2d4de Signed-off-by: Linux Image Build Automation --- dsp/adsprpc.c | 19 ++----------------- dsp/adsprpc_shared.h | 2 -- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 306e589f0d..8b19427c88 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3547,11 +3547,6 @@ static int fastrpc_set_session_info( int err = 0; struct fastrpc_apps *me = &gfa; - if (fl->set_session_info) { - ADSPRPC_ERR("Set session info invoked multiple times\n"); - err = -EBADR; - goto bail; - } /* * Third-party apps don't have permission to open the fastrpc device, so * it is opened on their behalf by DSP HAL. This is detected by @@ -5823,8 +5818,9 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; + VERIFY(err, VALID_FASTRPC_CID(cid)); /* Reset the tgid usage to false */ - if (VALID_FASTRPC_CID(cid) && fl->tgid_frpc != -1) + if (!err) frpc_tgid_usage_array[cid][fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); @@ -6289,7 +6285,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_notif = false; fl->exit_async = false; fl->multi_session_support = false; - fl->set_session_info = false; init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; @@ -6435,16 +6430,6 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) err = -EBADF; goto bail; } - spin_lock(&fl->hlock); - if (fl->set_session_info) { - spin_unlock(&fl->hlock); - ADSPRPC_ERR("Set session info invoked multiple times\n"); - err = -EBADR; - goto bail; - } - // Set set_session_info to true - fl->set_session_info = true; - spin_unlock(&fl->hlock); VERIFY(err, VALID_FASTRPC_CID(cid)); if (err) { err = -ECHRNG; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index b682318966..3ee534c460 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -923,8 +923,6 @@ struct fastrpc_file { struct fastrpc_proc_sharedbuf_info sharedbuf_info; /* Flag to indicate 4 session support available */ bool multi_session_support; - /* Flag to indicate session info is set */ - bool set_session_info; }; int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, From e986aa6a2f168b28a08e03ad8e59d566cb4d8116 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Fri, 28 Jul 2023 13:00:23 +0530 Subject: [PATCH 082/146] msm: adsprpc: add tgid to frpc_tgid conversion debug logs Currently we send custom tgid instead of original tgid to DSP. It is difficult to debug issues only with DSP logs, dumps and logcat. Add original tgid to custom tgid conversion log to help in debug. Change-Id: If05bae05bce69cf513cef0bd1672f78856c11ea8 Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 61c5676e04..6e19f6e473 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6038,6 +6038,8 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, "\n%s %13s %d\n", "cid", ":", fl->cid); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s %12s %d\n", "tgid", ":", fl->tgid); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %14s %d\n", "tgid_frpc", ":", fl->tgid_frpc); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s %7s %d\n", "sessionid", ":", fl->sessionid); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, @@ -6402,6 +6404,8 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl, uint32_t cid) err = -EUSERS; goto bail; } + ADSPRPC_INFO("HLOS pid %d, cid %d is mapped to unique sessions pid %d", + fl->tgid, cid, fl->tgid_frpc); /* * Third-party apps don't have permission to open the fastrpc device, so From ee0b4fc8a6f7f92deac499da2fef7ba8f5fe6d49 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Thu, 27 Jul 2023 14:33:35 +0530 Subject: [PATCH 083/146] dsp-kernel: make pm relax vote when process is exiting If process is exiting and pm wakelock is not released, cpu can't go to sleep. Relax wakeup source during file_free to allow cpu to go to sleep. Change-Id: Ie6161edbd43f1fb11f36fbb8f913ceaf92e89736 Signed-off-by: nishant chaubey --- dsp/adsprpc.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6e19f6e473..570c5bf2f5 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3064,6 +3064,24 @@ static inline void fastrpc_pm_awake(struct fastrpc_file *fl, int channel_type) pm_wakeup_ws_event(wake_source, fl->ws_timeout, true); } +static inline void fastrpc_pm_relax(struct fastrpc_file *fl, int channel_type) +{ + struct fastrpc_apps *me = &gfa; + struct wakeup_source *wake_source = NULL; + + if (!fl->wake_enable) + return; + + if (channel_type == SECURE_CHANNEL) + wake_source = me->wake_source_secure; + else if (channel_type == NON_SECURE_CHANNEL) + wake_source = me->wake_source; + + ADSPRPC_INFO("done for tgid %d\n", fl->tgid); + if (wake_source) + __pm_relax(wake_source); +} + static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx, uint32_t kernel) { @@ -5895,6 +5913,7 @@ skip_dump_wait: } while (lmap); mutex_unlock(&fl->map_mutex); mutex_unlock(&fl->internal_map_mutex); + fastrpc_pm_relax(fl, gcinfo[fl->cid].secure); if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); From 322b6a259689c8d325c0b3ea9fe570be2d4c3129 Mon Sep 17 00:00:00 2001 From: quic_anane Date: Tue, 1 Aug 2023 10:54:45 +0530 Subject: [PATCH 084/146] msm: Add cliffs module support Add cliffs module to support cliffs target Signed-off-by: quic_anane --- BUILD.bazel | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/BUILD.bazel b/BUILD.bazel index aab077fba2..e7bf22fd24 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -16,4 +16,8 @@ define_modules("pineapple", "gki") define_modules("blair", "consolidate") -define_modules("blair", "gki") \ No newline at end of file +define_modules("blair", "gki") + +define_modules("cliffs", "consolidate") + +define_modules("cliffs", "gki") \ No newline at end of file From 6dd07d65780e0012074573d871b574de85c7de72 Mon Sep 17 00:00:00 2001 From: nishant chaubey Date: Thu, 27 Jul 2023 14:33:35 +0530 Subject: [PATCH 085/146] dsp-kernel: make pm relax vote when process is exiting If process is exiting and pm wakelock is not released, cpu can't go to sleep. Relax wakeup source during file_free to allow cpu to go to sleep. Change-Id: I1be9d6b295c123e657dac90ba7fa013cd2f42bae Signed-off-by: nishant chaubey Signed-off-by: Linux Image Build Automation --- dsp/adsprpc.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6e19f6e473..570c5bf2f5 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3064,6 +3064,24 @@ static inline void fastrpc_pm_awake(struct fastrpc_file *fl, int channel_type) pm_wakeup_ws_event(wake_source, fl->ws_timeout, true); } +static inline void fastrpc_pm_relax(struct fastrpc_file *fl, int channel_type) +{ + struct fastrpc_apps *me = &gfa; + struct wakeup_source *wake_source = NULL; + + if (!fl->wake_enable) + return; + + if (channel_type == SECURE_CHANNEL) + wake_source = me->wake_source_secure; + else if (channel_type == NON_SECURE_CHANNEL) + wake_source = me->wake_source; + + ADSPRPC_INFO("done for tgid %d\n", fl->tgid); + if (wake_source) + __pm_relax(wake_source); +} + static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx, uint32_t kernel) { @@ -5895,6 +5913,7 @@ skip_dump_wait: } while (lmap); mutex_unlock(&fl->map_mutex); mutex_unlock(&fl->internal_map_mutex); + fastrpc_pm_relax(fl, gcinfo[fl->cid].secure); if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); From 02257e15e3d44597f616807eb0dcfb2ed2363c43 Mon Sep 17 00:00:00 2001 From: Thyagarajan Venkatanarayanan Date: Thu, 10 Aug 2023 16:30:27 -0700 Subject: [PATCH 086/146] msm: adsprpc: add ftrace for dspsignal events for perf debugging Define new ftrace to log dspsignal events like signalling, waiting, waking up, completing and cancelling wait. These ftraces can be used in performance debugging of dspqueue overheads. Change-Id: Iaf5f3df0f7ba3bd3da94f7614724b8f63ca09ed6 Signed-off-by: Thyagarajan Venkatanarayanan --- dsp/adsprpc.c | 12 +++++++++--- dsp/fastrpc_trace.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 570c5bf2f5..9ade55bb0c 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5652,6 +5652,7 @@ static void handle_remote_signal(uint64_t msg, int cid) (sig->state == DSPSIGNAL_STATE_SIGNALED)) { DSPSIGNAL_VERBOSE("Signaling signal %u for PID %u\n", signal_id, pid); + trace_fastrpc_dspsignal("complete", signal_id, sig->state, 0); complete(&sig->comp); sig->state = DSPSIGNAL_STATE_SIGNALED; } else if (sig->state == DSPSIGNAL_STATE_UNUSED) { @@ -5682,7 +5683,7 @@ int fastrpc_handle_rpc_response(void *data, int len, int cid) struct fastrpc_channel_ctx *chan = NULL; unsigned long irq_flags = 0; int64_t ns = 0; - uint64_t xo_time_in_us = 0; + uint64_t xo_time_in_us = 0, dspsig_msg = 0; xo_time_in_us = CONVERT_CNT_TO_US(__arch_counter_get_cntvct()); @@ -5690,7 +5691,9 @@ int fastrpc_handle_rpc_response(void *data, int len, int cid) /* * dspsignal message from the DSP */ - handle_remote_signal(*((uint64_t *)data), cid); + dspsig_msg = *((uint64_t *)data); + trace_fastrpc_transport_response(cid, dspsig_msg, 0, 0, 0); + handle_remote_signal(dspsig_msg, cid); goto bail; } @@ -6870,7 +6873,7 @@ int fastrpc_dspsignal_signal(struct fastrpc_file *fl, msg = (((uint64_t)fl->tgid_frpc) << 32) | ((uint64_t)sig->signal_id); err = fastrpc_transport_send(cid, (void *)&msg, sizeof(msg), fl->tvm_remote_domain); mutex_unlock(&channel_ctx->smd_mutex); - + trace_fastrpc_dspsignal("signal", sig->signal_id, 0, 0); bail: return err; } @@ -6923,10 +6926,12 @@ int fastrpc_dspsignal_wait(struct fastrpc_file *fl, } spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); + trace_fastrpc_dspsignal("wait", signal_id, s->state, wait->timeout_usec); if (timeout != 0xffffffff) ret = wait_for_completion_interruptible_timeout(&s->comp, timeout); else ret = wait_for_completion_interruptible(&s->comp); + trace_fastrpc_dspsignal("wakeup", signal_id, s->state, wait->timeout_usec); if (ret == 0) { DSPSIGNAL_VERBOSE("Wait for signal %u timed out\n", signal_id); @@ -7114,6 +7119,7 @@ int fastrpc_dspsignal_cancel_wait(struct fastrpc_file *fl, if (s->state != DSPSIGNAL_STATE_CANCELED) { s->state = DSPSIGNAL_STATE_CANCELED; + trace_fastrpc_dspsignal("cancel", signal_id, s->state, 0); complete_all(&s->comp); } diff --git a/dsp/fastrpc_trace.h b/dsp/fastrpc_trace.h index 67fba9019d..f89c69df1b 100644 --- a/dsp/fastrpc_trace.h +++ b/dsp/fastrpc_trace.h @@ -391,6 +391,36 @@ TRACE_EVENT(fastrpc_msg, TP_printk(" %s", __get_str(buf)) ); +TRACE_EVENT(fastrpc_dspsignal, + + TP_PROTO(const char *event, uint32_t signal_id, + int state, uint32_t timeout), + + TP_ARGS(event, signal_id, state, timeout), + + TP_STRUCT__entry( + __string(buf, event) + __field(u32, signal_id) + __field(int, state) + __field(u32, timeout) + ), + + TP_fast_assign( +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + memcpy(__get_str(buf), (event), (sizeof(event) - 1)); + __get_str(buf)[sizeof(event) - 1] = '\0'; +#else + __assign_str(buf, event); +#endif + __entry->signal_id = signal_id; + __entry->state = state; + __entry->timeout = timeout; + ), + + TP_printk("%s for sig id %u, state %d, timeout %u", + __get_str(buf), __entry->signal_id, __entry->state, __entry->timeout) +); + #endif /* This part must be outside protection */ From b0308ca14e40f4aeb25c855741740a52ede5786e Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Thu, 10 Aug 2023 16:44:12 -0700 Subject: [PATCH 087/146] adsprpc: tvm: Forcing compatibility flag to true for TVM Setting flag to true in TVM to force TVM driver to use APIs adsp_process_group_mmap64 and adsp_process_group_munmap64 instead of adsp_process_group_mmap and adsp_process_group_munmap. Change-Id: Ibbeb7f4177f11e75b1150e011090347219f04806 --- dsp/adsprpc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 9ade55bb0c..1e93100f40 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -8834,7 +8834,12 @@ static int __init fastrpc_device_init(void) VERIFY(err, !IS_ERR(me->class)); if (err) goto class_create_bail; + +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + me->compat = 1; +#else me->compat = (fops.compat_ioctl == NULL) ? 0 : 1; +#endif /* * Create devices and register with sysfs From 49d8960d0c08c89f26e267fcca8ac6a07e9d60c1 Mon Sep 17 00:00:00 2001 From: Krishna Dogney Date: Mon, 5 Jun 2023 15:53:29 -0700 Subject: [PATCH 088/146] msm: adsprpc: Validate sessions of process are of same pd type on same dsp. Currently, a single process can create multiple sessions of different pd types. Now, force all sessions of a process to be of same pd type on same dsp. Also, allow untrusted apps to create multiple sessions on dsp. Signed-off-by: Krishna Dogney Change-Id:I98c97c1ceeefa303cee4909ccca280a2430da908 --- dsp/adsprpc.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 1e93100f40..c009b2c95e 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3943,9 +3943,12 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, struct smq_phy_page pages[PAGESLEN_WITH_SHAREDBUF]; struct fastrpc_mmap *file = NULL; struct fastrpc_buf *imem = NULL; - unsigned long imem_dma_attr = 0; + unsigned long imem_dma_attr = 0, irq_flags = 0; remote_arg_t ra[6]; int fds[6]; + struct fastrpc_apps *me = &gfa; + struct hlist_node *n = NULL; + struct fastrpc_file *fl_curr = NULL; unsigned int gid = 0, one_mb = 1024*1024; unsigned int dsp_userpd_memlen = 0; struct fastrpc_buf *init_mem; @@ -3993,6 +3996,20 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, if (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE) fl->is_unsigned_pd = true; + /* Validate that any existing sessions of process are of same pd type */ + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(fl_curr, n, &me->drivers, hn) { + if ((fl != fl_curr) && (fl->tgid == fl_curr->tgid) && (fl->cid == fl_curr->cid)) { + err = (fl->is_unsigned_pd != fl_curr->is_unsigned_pd) ? -ECONNREFUSED : 0; + break; + } + } + spin_unlock_irqrestore(&me->hlock, irq_flags); + if (err) { + ADSPRPC_ERR("existing session pd type %u not same as requested pd type %u \n", + fl_curr->is_unsigned_pd, fl->is_unsigned_pd); + goto bail; + } /* Check if file memory passed by userspace is valid */ VERIFY(err, access_ok((void __user *)init->file, init->filelen)); if (err) @@ -6765,12 +6782,6 @@ int fastrpc_setmode(unsigned long ioctl_param, fl->profile = (uint32_t)ioctl_param; break; case FASTRPC_MODE_SESSION: - if (fl->untrusted_process) { - err = -EPERM; - ADSPRPC_ERR( - "multiple sessions not allowed for untrusted apps\n"); - goto bail; - } if (!fl->multi_session_support) fl->sessionid = 1; break; @@ -6778,7 +6789,6 @@ int fastrpc_setmode(unsigned long ioctl_param, err = -ENOTTY; break; } -bail: return err; } From e32585b2b4604e1426349e24f86c50f9a9fdc5bb Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Tue, 15 Aug 2023 10:21:06 -0700 Subject: [PATCH 089/146] adsprpc: tvm: Obtain buffer attributes to determine ownership and access Query buffer's attributes to determine if buffer should be mapped to secure context bank. Query buffer's HLOS access and return error from TVM driver if HLOS has access to buffer. Change-Id: Ia6d02b28929e1126a01c69a8425b6797fbee3506 --- dsp/adsprpc.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 78 insertions(+), 6 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index c009b2c95e..a48ed99b3a 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -43,6 +43,7 @@ #include "adsprpc_shared.h" #include #include +#include #include #include #include @@ -1159,6 +1160,77 @@ bail: return err; } +static int get_buffer_attr(struct dma_buf *buf, bool *exclusive_access, bool *hlos_access) +{ + const int *vmids_list = NULL, *perms = NULL; + int err = 0, vmids_list_len = 0; + + *exclusive_access = false; + *hlos_access = false; + err = mem_buf_dma_buf_get_vmperm(buf, &vmids_list, &perms, &vmids_list_len); + if (err) + goto bail; + + /* + * If one VM has access to buffer and is the current VM, + * then VM has exclusive access to buffer + */ + if (vmids_list_len == 1 && vmids_list[0] == mem_buf_current_vmid()) + *exclusive_access = true; + +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + /* + * PVM (HLOS) can share buffers with TVM. In that case, + * it is expected to relinquish its ownership to those buffers + * before sharing. But if the PVM still retains access, then + * these buffers cannot be used by TVM. + */ + + for (int ii = 0; ii < vmids_list_len; ii++) { + if (vmids_list[ii] == VMID_HLOS) { + *hlos_access = true; + break; + } + } +#endif + +bail: + return err; +} + +static int set_buffer_secure_type(struct fastrpc_mmap *map) +{ + int err = 0; + bool hlos_access = false, exclusive_access = false; + + VERIFY(err, 0 == (err = get_buffer_attr(map->buf, &exclusive_access, &hlos_access))); + if (err) { + ADSPRPC_ERR("failed to obtain buffer attributes for fd %d ret %d\n", map->fd, err); + err = -EBADFD; + goto bail; + } +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + if (hlos_access) { + ADSPRPC_ERR("Sharing HLOS buffer (fd %d) not allowed on TVM\n", map->fd); + err = -EACCES; + goto bail; + } +#endif + /* + * Secure buffers would always be owned by multiple VMs. + * If current VM is the exclusive owner of a buffer, it is considered non-secure. + * In PVM: + * - CPZ buffers are secure + * - All other buffers are non-secure + * In TVM: + * - Since it is a secure environment by default, there are no explicit "secure" buffers + * - All buffers are marked "non-secure" + */ + map->secure = (exclusive_access) ? 0 : 1; +bail: + return err; +} + static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf *buf, unsigned int attr, uintptr_t va, size_t len, int mflags, struct fastrpc_mmap **ppmap) @@ -1224,10 +1296,10 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * err = -EBADFD; goto bail; } + err = set_buffer_secure_type(map); + if (err) + goto bail; -#if !IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) - map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; -#endif map->va = 0; map->phys = 0; @@ -1293,10 +1365,10 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * goto bail; } } + err = set_buffer_secure_type(map); + if (err) + goto bail; -#if !IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) - map->secure = (mem_buf_dma_buf_exclusive_owner(map->buf)) ? 0 : 1; -#endif if (map->secure) { if (!fl->secsctx) err = fastrpc_session_alloc_secure_memory(chan, 1, From b92e5483d4b3258754235a334b475a3886c38c16 Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Mon, 21 Aug 2023 14:14:38 +0530 Subject: [PATCH 090/146] msm: adsprpc: add warning log for non-ion buffer Print non-ion buffer details as warning to help in size issues debugging. Change-Id: Ib96af6d202620e06cd9ed15f2698f6eac5c3a444 Signed-off-by: Ramesh Nallagopu --- dsp/adsprpc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index a48ed99b3a..623b3d4874 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2560,6 +2560,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) err = -EFAULT; goto bail; } + if (templen > DEBUG_PRINT_SIZE_LIMIT) + ADSPRPC_WARN( + "user passed non ion buffer size %zu, mend 0x%llx mstart 0x%llx, sc 0x%x handle 0x%x\n", + templen, mend, mstart, sc, ctx->handle); copylen += templen; } totallen = ALIGN(totallen, BALIGN) + copylen; From fb1fbff95a55188c6fe9bdf9a7697bef55e96108 Mon Sep 17 00:00:00 2001 From: Linux Image Build Automation Date: Wed, 23 Aug 2023 16:19:28 -0700 Subject: [PATCH 091/146] Revert "dsp-kernel: make pm relax vote when process is exiting" This reverts commit 6dd07d65780e0012074573d871b574de85c7de72. Change-Id: I698b0f770e4aff4c99bfd2635d4a236023b7a5dd Signed-off-by: Linux Image Build Automation --- dsp/adsprpc.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 570c5bf2f5..6e19f6e473 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3064,24 +3064,6 @@ static inline void fastrpc_pm_awake(struct fastrpc_file *fl, int channel_type) pm_wakeup_ws_event(wake_source, fl->ws_timeout, true); } -static inline void fastrpc_pm_relax(struct fastrpc_file *fl, int channel_type) -{ - struct fastrpc_apps *me = &gfa; - struct wakeup_source *wake_source = NULL; - - if (!fl->wake_enable) - return; - - if (channel_type == SECURE_CHANNEL) - wake_source = me->wake_source_secure; - else if (channel_type == NON_SECURE_CHANNEL) - wake_source = me->wake_source; - - ADSPRPC_INFO("done for tgid %d\n", fl->tgid); - if (wake_source) - __pm_relax(wake_source); -} - static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx, uint32_t kernel) { @@ -5913,7 +5895,6 @@ skip_dump_wait: } while (lmap); mutex_unlock(&fl->map_mutex); mutex_unlock(&fl->internal_map_mutex); - fastrpc_pm_relax(fl, gcinfo[fl->cid].secure); if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); From 7fd02f3859012cd24f4a3bbce12055566a599189 Mon Sep 17 00:00:00 2001 From: quic_anane Date: Thu, 3 Aug 2023 17:36:49 +0530 Subject: [PATCH 092/146] msm: adsprpc: handle failures during fastrpc device init Currenty in case where CMA alloc fails in fastrpc_device_init, a warning is issued. But the error code is not reset. With this change, above issue is handled properly and if CMA allocation is successful, then only add the information to the channel structure. Change-Id: I15aa32e82cecedaf4e2da7275cef13369b3429bc Signed-off-by: quic_anane --- dsp/adsprpc.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 623b3d4874..7cc3943e76 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -8963,29 +8963,33 @@ static int __init fastrpc_device_init(void) if (i == CDSP_DOMAIN_ID) { me->channel[i].dev = me->non_secure_dev; #if !IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + /* + * Allocate CMA memory for mini dump. + * Ignore error as CMA node may not be available on all targets. + */ err = fastrpc_alloc_cma_memory(®ion_phys, ®ion_vaddr, MINI_DUMP_DBG_SIZE, (unsigned long)attr); #endif - if (err) - ADSPRPC_WARN("%s: CMA alloc failed err 0x%x\n", - __func__, err); + if (err) { + ADSPRPC_WARN("CMA alloc failed err 0x%x\n", err); + err = 0; + } VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL))); if (err) { err = -ENOMEM; - ADSPRPC_ERR("%s: CMA alloc failed err 0x%x\n", - __func__, err); - goto device_create_bail; + ADSPRPC_WARN("kzalloc failed err 0x%x\n", err); + err = 0; + } else { + INIT_HLIST_NODE(&buf->hn); + buf->virt = region_vaddr; + buf->phys = (uintptr_t)region_phys; + buf->size = MINI_DUMP_DBG_SIZE; + buf->dma_attr = attr; + buf->raddr = 0; + me->channel[i].buf = buf; } - INIT_HLIST_NODE(&buf->hn); - buf->virt = region_vaddr; - buf->phys = (uintptr_t)region_phys; - buf->size = MINI_DUMP_DBG_SIZE; - buf->dma_attr = attr; - buf->raddr = 0; - ktime_get_real_ts64(&buf->buf_start_time); - me->channel[i].buf = buf; } if (IS_ERR_OR_NULL(me->channel[i].handle)) pr_warn("adsprpc: %s: SSR notifier register failed for %s with err %d\n", From 7b85330500edfabe94544bc6dfafd98a613dabde Mon Sep 17 00:00:00 2001 From: "V S Ganga VaraPrasad (VARA) Adabala" Date: Sun, 27 Aug 2023 16:07:36 +0530 Subject: [PATCH 093/146] Revert "Revert "dsp-kernel: make pm relax vote when process is exiting"" This reverts commit fb1fbff95a55188c6fe9bdf9a7697bef55e96108. Signed-off-by: V S Ganga VaraPrasad (VARA) Adabala --- dsp/adsprpc.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6e19f6e473..570c5bf2f5 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3064,6 +3064,24 @@ static inline void fastrpc_pm_awake(struct fastrpc_file *fl, int channel_type) pm_wakeup_ws_event(wake_source, fl->ws_timeout, true); } +static inline void fastrpc_pm_relax(struct fastrpc_file *fl, int channel_type) +{ + struct fastrpc_apps *me = &gfa; + struct wakeup_source *wake_source = NULL; + + if (!fl->wake_enable) + return; + + if (channel_type == SECURE_CHANNEL) + wake_source = me->wake_source_secure; + else if (channel_type == NON_SECURE_CHANNEL) + wake_source = me->wake_source; + + ADSPRPC_INFO("done for tgid %d\n", fl->tgid); + if (wake_source) + __pm_relax(wake_source); +} + static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx, uint32_t kernel) { @@ -5895,6 +5913,7 @@ skip_dump_wait: } while (lmap); mutex_unlock(&fl->map_mutex); mutex_unlock(&fl->internal_map_mutex); + fastrpc_pm_relax(fl, gcinfo[fl->cid].secure); if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); From 6087dc5827d2013a89303c237a1019282049ebaf Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Wed, 2 Aug 2023 02:18:11 +0530 Subject: [PATCH 094/146] msm: adsprpc: Usage of HLIST for async queue instead of LIST To avoid queueing of a duplicate job that may belong to a freed ctx, update async queue type from LIST to HLIST to avoid appending unhashed nodes back to queue. Thread race can occur between thread undergoing SSR routine and invoke response thread for FASTRPC_INVOKE2_ASYNC_RESPONSE to queue job to the async queue. Change-Id: Iebcd0e82f22ceb64d0f89e8458d6329c08c62bdc Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 22 +++++++++++++--------- dsp/adsprpc_shared.h | 4 ++-- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 7cc3943e76..560e45a594 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1876,8 +1876,9 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, } INIT_HLIST_NODE(&ctx->hn); - INIT_LIST_HEAD(&ctx->asyncn); + INIT_HLIST_NODE(&ctx->asyncn); hlist_add_fake(&ctx->hn); + hlist_add_fake(&ctx->asyncn); ctx->fl = fl; ctx->maps = (struct fastrpc_mmap **)(&ctx[1]); ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]); @@ -2098,10 +2099,12 @@ static void fastrpc_queue_completed_async_job(struct smq_invoke_ctx *ctx) spin_lock_irqsave(&fl->aqlock, flags); if (ctx->is_early_wakeup) goto bail; - list_add_tail(&ctx->asyncn, &fl->clst.async_queue); - atomic_add(1, &fl->async_queue_job_count); - ctx->is_early_wakeup = true; - wake_up_interruptible(&fl->async_wait_queue); + if (!hlist_unhashed(&ctx->asyncn)) { + hlist_add_head(&ctx->asyncn, &fl->clst.async_queue); + atomic_add(1, &fl->async_queue_job_count); + ctx->is_early_wakeup = true; + wake_up_interruptible(&fl->async_wait_queue); + } bail: spin_unlock_irqrestore(&fl->aqlock, flags); } @@ -2355,7 +2358,7 @@ static void context_list_ctor(struct fastrpc_ctx_lst *me) INIT_HLIST_HEAD(&me->interrupted); INIT_HLIST_HEAD(&me->pending); me->num_active_ctxs = 0; - INIT_LIST_HEAD(&me->async_queue); + INIT_HLIST_HEAD(&me->async_queue); INIT_LIST_HEAD(&me->notif_queue); } @@ -3502,10 +3505,11 @@ static int fastrpc_wait_on_async_queue( struct fastrpc_file *fl) { int err = 0, ierr = 0, interrupted = 0, perfErr = 0; - struct smq_invoke_ctx *ctx = NULL, *ictx = NULL, *n = NULL; + struct smq_invoke_ctx *ctx = NULL, *ictx = NULL; unsigned long flags; uint64_t *perf_counter = NULL; bool isworkdone = false; + struct hlist_node *n; read_async_job: interrupted = wait_event_interruptible(fl->async_wait_queue, @@ -3523,8 +3527,8 @@ read_async_job: goto bail; spin_lock_irqsave(&fl->aqlock, flags); - list_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) { - list_del_init(&ictx->asyncn); + hlist_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) { + hlist_del_init(&ictx->asyncn); atomic_sub(1, &fl->async_queue_job_count); ctx = ictx; break; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index e343680990..627678b7dd 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -579,7 +579,7 @@ struct smq_notif_rsp { struct smq_invoke_ctx { struct hlist_node hn; /* Async node to add to async job ctx list */ - struct list_head asyncn; + struct hlist_node asyncn; struct completion work; int retval; int pid; @@ -631,7 +631,7 @@ struct fastrpc_ctx_lst { /* Number of active contexts queued to DSP */ uint32_t num_active_ctxs; /* Queue which holds all async job contexts of process */ - struct list_head async_queue; + struct hlist_head async_queue; /* Queue which holds all status notifications of process */ struct list_head notif_queue; }; From 955d53fcac4f614d35f1168ba1f89195dc124c76 Mon Sep 17 00:00:00 2001 From: Krishna Dogney Date: Tue, 29 Aug 2023 14:17:56 -0700 Subject: [PATCH 095/146] Revert "msm: adsprpc: Validate sessions of process are of same pd type on same dsp." This reverts commit 49d8960d0c08c89f26e267fcca8ac6a07e9d60c1. Reason for revert: This change will block applications which will create multiple sessions with different pd type. Hence need to revert the change. Keeping 3rd party app to create multiple session intact. Signed-off-by: Krishna Dogney Change-Id: I1bef85d37bd003b752db05d42530d3ddfad0f726 --- dsp/adsprpc.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 560e45a594..2c289693a9 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4023,12 +4023,9 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, struct smq_phy_page pages[PAGESLEN_WITH_SHAREDBUF]; struct fastrpc_mmap *file = NULL; struct fastrpc_buf *imem = NULL; - unsigned long imem_dma_attr = 0, irq_flags = 0; + unsigned long imem_dma_attr = 0; remote_arg_t ra[6]; int fds[6]; - struct fastrpc_apps *me = &gfa; - struct hlist_node *n = NULL; - struct fastrpc_file *fl_curr = NULL; unsigned int gid = 0, one_mb = 1024*1024; unsigned int dsp_userpd_memlen = 0; struct fastrpc_buf *init_mem; @@ -4076,20 +4073,6 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, if (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE) fl->is_unsigned_pd = true; - /* Validate that any existing sessions of process are of same pd type */ - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_for_each_entry_safe(fl_curr, n, &me->drivers, hn) { - if ((fl != fl_curr) && (fl->tgid == fl_curr->tgid) && (fl->cid == fl_curr->cid)) { - err = (fl->is_unsigned_pd != fl_curr->is_unsigned_pd) ? -ECONNREFUSED : 0; - break; - } - } - spin_unlock_irqrestore(&me->hlock, irq_flags); - if (err) { - ADSPRPC_ERR("existing session pd type %u not same as requested pd type %u \n", - fl_curr->is_unsigned_pd, fl->is_unsigned_pd); - goto bail; - } /* Check if file memory passed by userspace is valid */ VERIFY(err, access_ok((void __user *)init->file, init->filelen)); if (err) From 4198880546f1c7f670c822936a49d05da3927c58 Mon Sep 17 00:00:00 2001 From: DEEPAK SANNAPAREDDY Date: Tue, 12 Sep 2023 23:38:07 +0530 Subject: [PATCH 096/146] msm: Add holi module support Add holi module to support holi target Change-Id: Ie7557e5d7dd725576286f08b4050b53d70cf8f27 --- BUILD.bazel | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/BUILD.bazel b/BUILD.bazel index e7bf22fd24..4ebd7d5600 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -18,6 +18,10 @@ define_modules("blair", "consolidate") define_modules("blair", "gki") +define_modules("holi", "consolidate") + +define_modules("holi", "gki") + define_modules("cliffs", "consolidate") define_modules("cliffs", "gki") \ No newline at end of file From 3a1e7d811168a32b10171905503d724605064238 Mon Sep 17 00:00:00 2001 From: DEEPAK SANNAPAREDDY Date: Fri, 22 Sep 2023 16:32:06 +0530 Subject: [PATCH 097/146] msm: adsprpc: Handle UAF in process shell memory Added flag to indicate memory used in process initialization. And, this memory would not removed in internal unmap to avoid UAF or double free. Change-Id: Ie470fe58ac334421d186feb41fa67bd24bb5efea Signed-off-by: DEEPAK SANNAPAREDDY --- dsp/adsprpc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 2c289693a9..43648e97bd 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4351,6 +4351,8 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, mutex_lock(&fl->map_mutex); err = fastrpc_mmap_create(fl, -1, NULL, 0, init->mem, init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem); + if (mem) + mem->is_filemap = true; mutex_unlock(&fl->map_mutex); if (err || (!mem)) goto bail; From 98ca55499e8c3daac02d3f094fede3c88e0b996c Mon Sep 17 00:00:00 2001 From: DEEPAK SANNAPAREDDY Date: Mon, 9 Oct 2023 11:48:56 +0530 Subject: [PATCH 098/146] msm: adsprpc : Fix use after free in fastrpc_internal_mem_unmap Thread 1 can make a to call fastrpc_mmap_create under internal mem map and release fl->map_mutex. Thread 2 can make call to internal mem unmap, acquire fl->map_mutex and get same map though fastrpc_mmap_remove. Thread 1 fail in fastrpc_mem_map_to_dsp jumps to bail and do map free. Thread 2 still holds same map which can lead use after free. Serialize fastrpc internal mem map and unmap. Change-Id: I54a3602914b43fc67635c0de193bd21aa13daaa3 Signed-off-by: DEEPAK SANNAPAREDDY --- dsp/adsprpc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 43648e97bd..9903d4f48a 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5478,6 +5478,7 @@ int fastrpc_internal_mem_map(struct fastrpc_file *fl, int err = 0; struct fastrpc_mmap *map = NULL; + mutex_lock(&fl->internal_map_mutex); VERIFY(err, fl->dsp_proc_init == 1); if (err) { pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n", @@ -5516,6 +5517,7 @@ bail: mutex_unlock(&fl->map_mutex); } } + mutex_unlock(&fl->internal_map_mutex); return err; } @@ -5526,6 +5528,7 @@ int fastrpc_internal_mem_unmap(struct fastrpc_file *fl, struct fastrpc_mmap *map = NULL; size_t map_size = 0; + mutex_lock(&fl->internal_map_mutex); VERIFY(err, fl->dsp_proc_init == 1); if (err) { pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n", @@ -5572,6 +5575,7 @@ bail: mutex_unlock(&fl->map_mutex); } } + mutex_unlock(&fl->internal_map_mutex); return err; } From c37154e2e4b52d781ff432b46501742a6bcdc8e8 Mon Sep 17 00:00:00 2001 From: DEEPAK SANNAPAREDDY Date: Mon, 9 Oct 2023 11:48:56 +0530 Subject: [PATCH 099/146] msm: adsprpc : Fix use after free in fastrpc_internal_mem_unmap Thread 1 can make a to call fastrpc_mmap_create under internal mem map and release fl->map_mutex. Thread 2 can make call to internal mem unmap, acquire fl->map_mutex and get same map though fastrpc_mmap_remove. Thread 1 fail in fastrpc_mem_map_to_dsp jumps to bail and do map free. Thread 2 still holds same map which can lead use after free. Serialize fastrpc internal mem map and unmap. Change-Id: I54a3602914b43fc67635c0de193bd21aa13daaa3 Signed-off-by: DEEPAK SANNAPAREDDY --- dsp/adsprpc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 2c289693a9..ac0c08618f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5476,6 +5476,7 @@ int fastrpc_internal_mem_map(struct fastrpc_file *fl, int err = 0; struct fastrpc_mmap *map = NULL; + mutex_lock(&fl->internal_map_mutex); VERIFY(err, fl->dsp_proc_init == 1); if (err) { pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n", @@ -5514,6 +5515,7 @@ bail: mutex_unlock(&fl->map_mutex); } } + mutex_unlock(&fl->internal_map_mutex); return err; } @@ -5524,6 +5526,7 @@ int fastrpc_internal_mem_unmap(struct fastrpc_file *fl, struct fastrpc_mmap *map = NULL; size_t map_size = 0; + mutex_lock(&fl->internal_map_mutex); VERIFY(err, fl->dsp_proc_init == 1); if (err) { pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n", @@ -5570,6 +5573,7 @@ bail: mutex_unlock(&fl->map_mutex); } } + mutex_unlock(&fl->internal_map_mutex); return err; } From f633ed34d135d24d8c415a89822b52b941f5170c Mon Sep 17 00:00:00 2001 From: Anvesh Jain P Date: Tue, 10 Oct 2023 12:42:50 +0530 Subject: [PATCH 100/146] Check product type before enabling driver Add condition to enable driver for non GVM target. Virtual fastrpc driver is used for target based on hypervisor, skipping driver compilation. Signed-off-by: Anvesh Jain P Change-Id: I1ac5c0e29f259cbd05f426ca51cd945b695078c9 --- dsp_kernel_board.mk | 4 +++- dsp_kernel_product.mk | 4 +++- product.mk | 2 ++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/dsp_kernel_board.mk b/dsp_kernel_board.mk index 12a10f0e40..fb39d962f5 100644 --- a/dsp_kernel_board.mk +++ b/dsp_kernel_board.mk @@ -1,7 +1,9 @@ ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) +ifneq ($(ENABLE_HYP), true) ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-adsprpc.ko #BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/frpc-trusted-adsprpc.ko BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko endif -endif \ No newline at end of file +endif +endif diff --git a/dsp_kernel_product.mk b/dsp_kernel_product.mk index 06a8832440..fb61d5653d 100644 --- a/dsp_kernel_product.mk +++ b/dsp_kernel_product.mk @@ -1,5 +1,7 @@ ifneq ($(TARGET_KERNEL_DLKM_DISABLE), true) +ifneq ($(ENABLE_HYP), true) PRODUCT_PACKAGES += frpc-adsprpc.ko #PRODUCT_PACKAGES += frpc_trusted-adsprpc.ko PRODUCT_PACKAGES += cdsp-loader.ko -endif \ No newline at end of file +endif +endif diff --git a/product.mk b/product.mk index 34d12e010c..24c9e25931 100644 --- a/product.mk +++ b/product.mk @@ -1,2 +1,4 @@ +ifneq ($(ENABLE_HYP), true) PRODUCT_PACKAGES += frpc-adsprpc.ko #PRODUCT_PACKAGES += cdsp-loader.ko +endif From 5065a3b8797add3ba3ae9389639f6a58375aa653 Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Wed, 20 Sep 2023 18:10:20 +0530 Subject: [PATCH 101/146] msm: adsprpc : Mark tgid_frpc unuse after device unregister In fastrpc_file_free tgid_frpc is marked as unused before device unregister. And current tgid_frpc can be used by to other sessions from same process, which will lead to device register failures. To avoid this scenario, mark tgid_frpc available after device unregister. Change-Id: I6ba77af3a2b6d0d9aa961459dfe2bf163d5aede2 Signed-off-by: Santosh Sakore --- dsp/adsprpc.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 2c289693a9..4c18ec3085 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5952,13 +5952,15 @@ skip_dump_wait: fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; - /* Reset the tgid usage to false */ - if (fl->tgid_frpc != -1) - frpc_tgid_usage_array[fl->tgid_frpc] = false; is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); if (!fl->sctx) { + spin_lock_irqsave(&me->hlock, irq_flags); + /* Reset the tgid usage to false */ + if (fl->tgid_frpc != -1) + frpc_tgid_usage_array[fl->tgid_frpc] = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); kfree(fl); return 0; } @@ -6001,6 +6003,12 @@ skip_dump_wait: if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); + spin_lock_irqsave(&me->hlock, irq_flags); + /* Reset the tgid usage to false */ + if (fl->tgid_frpc != -1) + frpc_tgid_usage_array[fl->tgid_frpc] = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + VERIFY(err, VALID_FASTRPC_CID(cid)); if (!err && fl->sctx) fastrpc_session_free(&fl->apps->channel[cid], fl->sctx); From 5c26a308b1c8ca31f10a91bf091b282cb469528c Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Wed, 25 Oct 2023 18:27:15 +0530 Subject: [PATCH 102/146] msm: adsprpc: fix memory leak scenario in print debug data Add proper return path to ensure that allocated memory for gmsglog variables is freed before exiting. In error cases when returning from the function without proper exit handling, not freeing allocated memory leads to memory leak. Change-Id: I718a6a3d1fef8598cb67e7d627bde00a8b009324 Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 9903d4f48a..f2b2a2e29b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3512,9 +3512,13 @@ static int fastrpc_wait_on_async_queue( struct hlist_node *n; read_async_job: + if (!fl) { + err = -EBADF; + goto bail; + } interrupted = wait_event_interruptible(fl->async_wait_queue, atomic_read(&fl->async_queue_job_count)); - if (!fl || fl->file_close >= FASTRPC_PROCESS_EXIT_START) { + if (fl->file_close >= FASTRPC_PROCESS_EXIT_START) { err = -EBADF; goto bail; } @@ -3598,12 +3602,12 @@ static int fastrpc_wait_on_notif_queue( struct smq_notif_rsp *notif = NULL, *inotif = NULL, *n = NULL; read_notif_status: + if (!fl) { + err = -EBADF; + goto bail; + } interrupted = wait_event_interruptible(fl->proc_state_notif.notif_wait_queue, atomic_read(&fl->proc_state_notif.notif_queue_count)); - if (!fl) { - err = -EBADF; - goto bail; - } if (fl->exit_notif) { err = -EFAULT; goto bail; @@ -7613,20 +7617,20 @@ static void fastrpc_print_debug_data(int cid) VERIFY(err, NULL != (gmsg_log_tx = kzalloc(MD_GMSG_BUFFER, GFP_KERNEL))); if (err) { err = -ENOMEM; - return; + goto free_buf; } VERIFY(err, NULL != (gmsg_log_rx = kzalloc(MD_GMSG_BUFFER, GFP_KERNEL))); if (err) { err = -ENOMEM; - return; + goto free_buf; } chan = &me->channel[cid]; if ((!chan) || (!chan->buf)) - return; + goto free_buf; mini_dump_buff = chan->buf->virt; if (!mini_dump_buff) - return; + goto free_buf; if (chan) { tx_index = chan->gmsg_log.tx_index; @@ -7772,6 +7776,7 @@ static void fastrpc_print_debug_data(int cid) "gmsg_log_rx:\n %s\n", gmsg_log_rx); if (chan && chan->buf) chan->buf->size = strlen(mini_dump_buff); +free_buf: kfree(gmsg_log_tx); kfree(gmsg_log_rx); } From b18ae2cb6bc244ad2fe7ec7a9c1f2569dcc15b09 Mon Sep 17 00:00:00 2001 From: quic_anane Date: Wed, 8 Nov 2023 12:12:11 +0530 Subject: [PATCH 103/146] msm: adsprpc: Fix UAF in fastrpc_print_debug_data In fastrpc_print_debug_data accessing fl will cause UAF condition if is_ramdump_enable is not set. In this case, there won't be any wait condition in fastrpc_file_free so fl will be freed in between accessing data. To fix this, check is_ramdump_enable before accessing fl data. Signed-off-by: quic_anane Change-Id: Ia4670a73f887e17afae3cfeb7e6c6457b3337ae9 --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 2d931b6388..66d741f430 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7646,7 +7646,7 @@ static void fastrpc_print_debug_data(int cid) } spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { - if (fl->cid == cid) { + if (fl->cid == cid && fl->is_ramdump_pend) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - From 47e1fdc035252531a9df50d0a218bbb4b01eb7cb Mon Sep 17 00:00:00 2001 From: DEEPAK SANNAPAREDDY Date: Mon, 6 Nov 2023 18:14:28 +0530 Subject: [PATCH 104/146] msm: Add pitti module support Add pitti module to support pitti target. Change-Id: I89984961218279dd0f510bca18c6a1899d5ac59a Signed-off-by: DEEPAK SANNAPAREDDY --- BUILD.bazel | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/BUILD.bazel b/BUILD.bazel index 4ebd7d5600..c25515f62b 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -24,4 +24,9 @@ define_modules("holi", "gki") define_modules("cliffs", "consolidate") -define_modules("cliffs", "gki") \ No newline at end of file +define_modules("cliffs", "gki") + +define_modules("pitti", "consolidate") + +define_modules("pitti", "gki") + From c46358d49d1a1b800ec9c25231fb6e9554e0ef58 Mon Sep 17 00:00:00 2001 From: ntarte Date: Mon, 27 Nov 2023 16:02:15 +0530 Subject: [PATCH 105/146] msm: adsprpc: Fix UAF in fastrpc_print_debug_data In fastrpc_print_debug_data accessing fl will cause UAF condition if is_ramdump_enable is not set. In this case, there won't be any wait condition in fastrpc_file_free so fl will be freed in between accessing data. To fix this, check is_ramdump_enable before accessing fl data. Signed-off-by: quic_anane Change-Id: Ia4670a73f887e17afae3cfeb7e6c6457b3337ae9 (cherry picked from commit b18ae2cb6bc244ad2fe7ec7a9c1f2569dcc15b09) --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 2d931b6388..66d741f430 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7646,7 +7646,7 @@ static void fastrpc_print_debug_data(int cid) } spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { - if (fl->cid == cid) { + if (fl->cid == cid && fl->is_ramdump_pend) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - From c5d0af46d762d851b6a1557987de62eb71f6fc0b Mon Sep 17 00:00:00 2001 From: quic_anane Date: Mon, 11 Dec 2023 15:15:03 +0530 Subject: [PATCH 106/146] dsp-kernel: add session id for SSR notifications Currently driver is passing session id as zero when SSR notification is queued. This will cause issues in case of multisession, only default session will get the notification. Add a change to pass proper session ID to make sure all the sessions are getting notified. Change-Id: I1f3bb7169ff9c7b725e3a69dc098c56197e4cbaf Signed-off-by: ANANDU KRISHNAN E --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 66d741f430..4f14ec6ac5 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2331,7 +2331,7 @@ static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid) spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { if (fl->cid == cid) { - fastrpc_queue_pd_status(fl, cid, FASTRPC_DSP_SSR, 0); + fastrpc_queue_pd_status(fl, cid, FASTRPC_DSP_SSR, fl->sessionid); fastrpc_notify_users(fl); } } From a976ff28ccb56d1b227a857a7cf5f92cd60985f2 Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Mon, 18 Dec 2023 12:32:29 +0530 Subject: [PATCH 107/146] msm: adsprpc: Avoid double free on map Decrement and check the ref count of map inside the lock. Otherwise, two threads may free the same map. Change-Id: Iae758752c0d3c296f155f3200adb783c92100a70 Signed-off-by: Abhishek Singh --- dsp/adsprpc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 4f14ec6ac5..4867a6bad8 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1011,14 +1011,13 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) map->refs--; if (!map->refs && !map->is_persistent && !map->ctx_refs) hlist_del_init(&map->hn); - spin_unlock_irqrestore(&me->hlock, irq_flags); if (map->refs > 0) { ADSPRPC_WARN( "multiple references for remote heap size %zu va 0x%lx ref count is %d\n", map->size, map->va, map->refs); + spin_unlock_irqrestore(&me->hlock, irq_flags); return; } - spin_lock_irqsave(&me->hlock, irq_flags); if (map->is_persistent && map->in_use) map->in_use = false; spin_unlock_irqrestore(&me->hlock, irq_flags); From a7c28cef64f5979281ea381ddee9374b36cc2391 Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Tue, 19 Dec 2023 11:43:28 +0530 Subject: [PATCH 108/146] dsp-kernel: Check pdrcount count along with pd status Currently, only pd status is checked before sending any request to DSP. On pd down notification all the pending contexts are completed with connection reset error. But, if context gets created after the pd down callback, it is not returned with connection reset error. If the context is regarding pd attach, daemon will get attached to DSP pd. And in this scenario, if daemon gets killed and reconnection happens, ownership of init memory will be assigned back to HLOS, which will cause SMMU fault. Check pdr count for audioPD before sending any request to DSP. Change-Id: Iadf1c9ca718659086fcd6dc8db105f48337933f6 Signed-off-by: Abhishek Singh --- dsp/adsprpc.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 4f14ec6ac5..b2c9a0927d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6830,6 +6830,13 @@ static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name) err = fastrpc_get_spd_session(sloc_name, &session, &cid); if (err || cid != fl->cid) goto bail; + if ((!strcmp(fl->servloc_name, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME)) && + (me->channel[cid].spd[session].pdrcount != + me->channel[cid].spd[session].prevpdrcount)) { + err = -ECONNRESET; + goto bail; + } #if IS_ENABLED(CONFIG_QCOM_PDR_HELPERS) if (!strcmp(fl->servloc_name, AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME) || !strcmp(fl->servloc_name, From 698dfba6080d00a954785538f154cf501b31e9c7 Mon Sep 17 00:00:00 2001 From: Edgar Flores Date: Tue, 16 Jan 2024 14:14:27 -0800 Subject: [PATCH 109/146] msm: adsprpc: Set buffer type in TVM to non-secure Customer is seeing issue when sharing buffer to secure PD. Buffer is being set to 'secure buffer type' by trusted driver which is invalid in TVM. There are no 'secure' buffers on TVM. All buffers in TVM need to be marked as 'non-secure'. Fix is to explicitly mark buffers as 'non-secure' for TVM only. Change-Id: I80c70bc59dcbd78be4119c1855fd4e5fa2e7d5cb --- dsp/adsprpc.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 7607b5551b..24b6d57cdb 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1178,13 +1178,6 @@ static int get_buffer_attr(struct dma_buf *buf, bool *exclusive_access, bool *hl *exclusive_access = true; #if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) - /* - * PVM (HLOS) can share buffers with TVM. In that case, - * it is expected to relinquish its ownership to those buffers - * before sharing. But if the PVM still retains access, then - * these buffers cannot be used by TVM. - */ - for (int ii = 0; ii < vmids_list_len; ii++) { if (vmids_list[ii] == VMID_HLOS) { *hlos_access = true; @@ -1209,8 +1202,13 @@ static int set_buffer_secure_type(struct fastrpc_mmap *map) goto bail; } #if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + /* + * PVM (HLOS) can share buffers with TVM, in case buffers are to be shared to secure PD, + * PVM is expected to relinquish its ownership to those buffers before sharing. + * If PVM still retains access, then those buffers cannot be shared to secure PD. + */ if (hlos_access) { - ADSPRPC_ERR("Sharing HLOS buffer (fd %d) not allowed on TVM\n", map->fd); + ADSPRPC_ERR("Buffers with HLOS access (fd %d) are not allowed on TVM\n", map->fd); err = -EACCES; goto bail; } @@ -1225,7 +1223,12 @@ static int set_buffer_secure_type(struct fastrpc_mmap *map) * - Since it is a secure environment by default, there are no explicit "secure" buffers * - All buffers are marked "non-secure" */ +#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED) + map->secure = 0; +#else map->secure = (exclusive_access) ? 0 : 1; +#endif + bail: return err; } From 23611a16262d7f3b384d542941c5e1018eca9ddd Mon Sep 17 00:00:00 2001 From: quic_anane Date: Thu, 25 Jan 2024 16:43:12 +0530 Subject: [PATCH 110/146] dsp-kernel: Check for user input buffer Add check for user input buffer to fix improper access. Signed-off-by: quic_anane --- dsp/adsprpc.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 24b6d57cdb..e93e460c1a 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2612,6 +2612,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) struct fastrpc_mmap *map = ctx->maps[i]; uint64_t buf = ptr_to_uint64(lpra[i].buf.pv); size_t len = lpra[i].buf.len; + uint64_t buf_start = 0; rpra[i].buf.pv = 0; rpra[i].buf.len = len; @@ -2633,7 +2634,17 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) up_read(¤t->mm->mmap_lock); goto bail; } - offset = buf_page_start(buf) - vma->vm_start; + buf_start = buf_page_start(buf); + VERIFY(err, vma->vm_start <= buf_start); + if (err) { + up_read(¤t->mm->mmap_lock); + ADSPRPC_ERR( + "buffer VA invalid for fd %d, IPA 0x%llx, VA 0x%llx, vma start 0x%llx\n", + map->fd, map->phys, map->va, vma->vm_start); + err = -EFAULT; + goto bail; + } + offset = buf_start - vma->vm_start; up_read(¤t->mm->mmap_lock); VERIFY(err, offset + len <= (uintptr_t)map->size); if (err) { From 8b49e1eaa0de88bb5542f1a80ab622268ac712f0 Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Fri, 12 Jan 2024 10:48:23 +0530 Subject: [PATCH 111/146] msm: Add volcano module support Add volcano module to support volcano target Signed-off-by: Abhishek Singh Change-Id: I02b1c9a14370ceb5bf2ae495e15be7c54f1a3bb6 --- BUILD.bazel | 3 +++ 1 file changed, 3 insertions(+) diff --git a/BUILD.bazel b/BUILD.bazel index c25515f62b..e18d78aeb2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -30,3 +30,6 @@ define_modules("pitti", "consolidate") define_modules("pitti", "gki") +define_modules("volcano", "consolidate") + +define_modules("volcano", "gki") From 941b3f835f9f4e97c97a2944dde7be83bc83cc5d Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Fri, 16 Feb 2024 11:17:03 +0530 Subject: [PATCH 112/146] msm: adsprpc: Free the memory allocated for status notification Currently, memory allocated for status notification is only freed by the notif thread. If notif thread exits, notif entries will not be freed. Free the notif entries while closing the fastrpc file. Change-Id: I8e715a4c449a595ce492379bfc50eaf456bbccf6 Signed-off-by: Abhishek Singh --- dsp/adsprpc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index e93e460c1a..e63218c60e 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2369,6 +2369,8 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl) struct fastrpc_ctx_lst *clst = &fl->clst; struct smq_invoke_ctx *ictx = NULL, *ctxfree; struct hlist_node *n; + unsigned long irq_flags = 0; + struct smq_notif_rsp *inotif = NULL, *n1 = NULL; do { ctxfree = NULL; @@ -2396,6 +2398,14 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl) if (ctxfree) context_free(ctxfree); } while (ctxfree); + + spin_lock_irqsave(&fl->proc_state_notif.nqlock, irq_flags); + list_for_each_entry_safe(inotif, n1, &clst->notif_queue, notifn) { + list_del_init(&inotif->notifn); + atomic_sub(1, &fl->proc_state_notif.notif_queue_count); + kfree(inotif); + } + spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, irq_flags); } static int fastrpc_file_free(struct fastrpc_file *fl); From 207899cb03a4402168cd658dcf38e9f4686fdbf9 Mon Sep 17 00:00:00 2001 From: quic_anane Date: Thu, 25 Jan 2024 16:43:12 +0530 Subject: [PATCH 113/146] dsp-kernel: Check for user input buffer Add check for user input buffer to fix improper access. Signed-off-by: quic_anane (cherry picked from commit 23611a16262d7f3b384d542941c5e1018eca9ddd) Change-Id: I888ba99d81ca4659858193abfdb16706c989d1c3 --- dsp/adsprpc.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 24b6d57cdb..e93e460c1a 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2612,6 +2612,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) struct fastrpc_mmap *map = ctx->maps[i]; uint64_t buf = ptr_to_uint64(lpra[i].buf.pv); size_t len = lpra[i].buf.len; + uint64_t buf_start = 0; rpra[i].buf.pv = 0; rpra[i].buf.len = len; @@ -2633,7 +2634,17 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) up_read(¤t->mm->mmap_lock); goto bail; } - offset = buf_page_start(buf) - vma->vm_start; + buf_start = buf_page_start(buf); + VERIFY(err, vma->vm_start <= buf_start); + if (err) { + up_read(¤t->mm->mmap_lock); + ADSPRPC_ERR( + "buffer VA invalid for fd %d, IPA 0x%llx, VA 0x%llx, vma start 0x%llx\n", + map->fd, map->phys, map->va, vma->vm_start); + err = -EFAULT; + goto bail; + } + offset = buf_start - vma->vm_start; up_read(¤t->mm->mmap_lock); VERIFY(err, offset + len <= (uintptr_t)map->size); if (err) { From a4befa3f75b74ef6978a09b735e91a3954ae85d8 Mon Sep 17 00:00:00 2001 From: rnallago Date: Mon, 4 Mar 2024 18:09:24 +0530 Subject: [PATCH 114/146] adsprpc: Add check to prevent out of bound access Add -ve value check for index to prevent the array out of bound access. Change-Id: I0d23e2cb258227ef76779d82ec2c8f6b9cf7f95f Signed-off-by: rnallago --- dsp/adsprpc_socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc_socket.c b/dsp/adsprpc_socket.c index 02c3551591..bd4ed51cdd 100644 --- a/dsp/adsprpc_socket.c +++ b/dsp/adsprpc_socket.c @@ -139,7 +139,7 @@ inline int verify_transport_device(int cid, int tvm_remote_domain) struct frpc_transport_session_control *session_control = NULL; remote_domain = tvm_remote_domain; - VERIFY(err, remote_domain < MAX_REMOTE_ID); + VERIFY(err, remote_domain >= 0 && remote_domain < MAX_REMOTE_ID); if (err) { err = -ECHRNG; goto bail; @@ -393,7 +393,7 @@ int fastrpc_transport_send(int cid, void *rpc_msg, uint32_t rpc_msg_size, int tv struct kvec msg = {0}; remote_domain = tvm_remote_domain; - VERIFY(err, remote_domain < MAX_REMOTE_ID); + VERIFY(err, remote_domain >= 0 && remote_domain < MAX_REMOTE_ID); if (err) { err = -ECHRNG; goto bail; From 4e20907ac2f0d67d31bf403233727932ad915b5e Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Wed, 28 Feb 2024 13:53:51 +0530 Subject: [PATCH 115/146] msm: adsprpc: Handle UAF in fastrpc_mmap_remove_ssr Currently unlocking the spinlock during maps list iteration can lead to use after free. Fix is to lock, read one map from list, stop iteration and unlock, repeate same for all the maps complete in the list. Acked-by: Ramesh Nallagopu Change-Id: I834bdcb9dd55a33f6308188ec1f844b7d81cb30e Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 212 ++++++++++++++++++++++--------------------- dsp/adsprpc_shared.h | 1 + 2 files changed, 112 insertions(+), 101 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index e63218c60e..fab0c7e5ce 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5193,13 +5193,95 @@ bail: return err; } +static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, int locked) +{ + struct fastrpc_mmap *match = map; + int err = 0, ret = 0; + struct fastrpc_apps *me = &gfa; + struct qcom_dump_segment ramdump_segments_rh; + struct list_head head; + unsigned long irq_flags = 0; + + if (map->is_persistent && map->in_use) { + struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; + uint64_t phys = map->phys; + size_t size = map->size; + + //scm assign it back to HLOS + if (rhvm->vmid) { + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms = {0}; + uint32_t i = 0; + + for (i = 0; i < rhvm->vmcount; i++) { + src_perms |= BIT(rhvm->vmid[i]); + } + + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; + err = qcom_scm_assign_mem(phys, (uint64_t)size, + &src_perms, &dst_perms, 1); + } + if (err) { + ADSPRPC_ERR( + "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", + err, phys, size); + err = -EADDRNOTAVAIL; + return err; + } + spin_lock_irqsave(&me->hlock, irq_flags); + map->in_use = false; + /* + * decrementing refcount for persistent mappings + * as incrementing it in fastrpc_get_persistent_map + */ + map->refs--; + spin_unlock_irqrestore(&me->hlock, irq_flags); + } + if (!match->is_persistent) { + if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { + err = fastrpc_munmap_rh(match->phys, + match->size, match->flags); + } else if (match->flags == ADSP_MMAP_HEAP_ADDR) { + if (fl) + err = fastrpc_munmap_on_dsp_rh(fl, match->phys, + match->size, match->flags, 0); + else { + pr_err("Cannot communicate with DSP, ADSP is down\n"); + fastrpc_mmap_add(match); + } + } + if (err) + return err; + } + memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); + ramdump_segments_rh.da = match->phys; + ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); + ramdump_segments_rh.size = match->size; + INIT_LIST_HEAD(&head); + list_add(&ramdump_segments_rh.node, &head); + if (me->dev && dump_enabled()) { + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); + if (ret < 0) + pr_err("adsprpc: %s: unable to dump heap (err %d)\n", + __func__, ret); + } + if (!match->is_persistent) { + if (!locked && fl) + mutex_lock(&fl->map_mutex); + fastrpc_mmap_free(match, 0); + if (!locked && fl) + mutex_unlock(&fl->map_mutex); + } + return 0; +} + static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) { struct fastrpc_mmap *match = NULL, *map = NULL; struct hlist_node *n = NULL; - int err = 0, ret = 0, lock = 0; + int err = 0; struct fastrpc_apps *me = &gfa; - struct qcom_dump_segment ramdump_segments_rh; struct list_head head; unsigned long irq_flags = 0; @@ -5211,115 +5293,43 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) goto bail; } } - spin_lock_irqsave(&me->hlock, irq_flags); - lock = 1; - hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (!lock) { - spin_lock_irqsave(&me->hlock, irq_flags); - lock = 1; - } - /* In hibernation suspend case fl is NULL, check !fl to cleanup */ - if (!fl || (fl && map->servloc_name && fl->servloc_name - && !strcmp(map->servloc_name, fl->servloc_name))) { - match = map; - if (map->is_persistent && map->in_use) { - struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; - uint64_t phys = map->phys; - size_t size = map->size; - if (lock) { - spin_unlock_irqrestore(&me->hlock, irq_flags); - lock = 0; - } - //scm assign it back to HLOS - if (rhvm->vmid) { - u64 src_perms = 0; - struct qcom_scm_vmperm dst_perms = {0}; - uint32_t i = 0; + do { + match = NULL; + spin_lock_irqsave(&me->hlock, irq_flags); - for (i = 0; i < rhvm->vmcount; i++) { - src_perms |= BIT(rhvm->vmid[i]); - } - - dst_perms.vmid = QCOM_SCM_VMID_HLOS; - dst_perms.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(phys, (uint64_t)size, - &src_perms, &dst_perms, 1); - } - if (err) { - ADSPRPC_ERR( - "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", - err, phys, size); - err = -EADDRNOTAVAIL; - goto bail; - } - if (!lock) { - spin_lock_irqsave(&me->hlock, irq_flags); - lock = 1; - } - map->in_use = false; - /* - * decrementing refcount for persistent mappings - * as incrementing it in fastrpc_get_persistent_map - */ - map->refs--; - } - if (!match->is_persistent) - hlist_del_init(&map->hn); - } - if (lock) { - spin_unlock_irqrestore(&me->hlock, irq_flags); - lock = 0; - } - - if (match) { - if (!match->is_persistent) { - if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - err = fastrpc_munmap_rh(match->phys, - match->size, match->flags); - } else if (match->flags == ADSP_MMAP_HEAP_ADDR) { - if (fl) - err = fastrpc_munmap_on_dsp_rh(fl, match->phys, - match->size, match->flags, 0); - else { - pr_err("Cannot communicate with DSP, ADSP is down\n"); - fastrpc_mmap_add(match); - } - } - } - memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); - ramdump_segments_rh.da = match->phys; - ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); - ramdump_segments_rh.size = match->size; - INIT_LIST_HEAD(&head); - list_add(&ramdump_segments_rh.node, &head); - if (me->dev && dump_enabled()) { - ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); - if (ret < 0) - pr_err("adsprpc: %s: unable to dump heap (err %d)\n", - __func__, ret); - } - if (!match->is_persistent) { - if (!locked) - mutex_lock(&fl->map_mutex); - fastrpc_mmap_free(match, 0); - if (!locked) - mutex_unlock(&fl->map_mutex); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + if (!map->is_dumped && (!fl || + (fl && map->servloc_name && fl->servloc_name && + !strcmp(map->servloc_name, fl->servloc_name)))) { + map->is_dumped = true; + match = map; + if (!match->is_persistent) + hlist_del_init(&map->hn); + break; } } - } -bail: - if (lock) { spin_unlock_irqrestore(&me->hlock, irq_flags); - lock = 0; - } + if (match) + err = fastrpc_mmap_dump(match, fl, locked); + } while (match && !err); + +bail: if (err && match) { - if (!locked) + if (!locked && fl) mutex_lock(&fl->map_mutex); fastrpc_mmap_add(match); - if (!locked) + if (!locked && fl) mutex_unlock(&fl->map_mutex); } + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + if (map->is_dumped && ((!fl && map->servloc_name) || + (fl && map->servloc_name && fl->servloc_name && + !strcmp(map->servloc_name, fl->servloc_name)))) + map->is_dumped = false; + } + spin_unlock_irqrestore(&me->hlock, irq_flags); return err; } diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 627678b7dd..6414992946 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -785,6 +785,7 @@ struct fastrpc_mmap { struct timespec64 map_end_time; /* Mapping for fastrpc shell */ bool is_filemap; + bool is_dumped; /* flag to indicate map is dumped during SSR */ char *servloc_name; /* Indicate which daemon mapped this */ /* Indicates map is being used by a pending RPC call */ unsigned int ctx_refs; From cc9738786adc5e8e78d0db1f68e299275b301c5a Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Tue, 27 Feb 2024 15:54:03 +0530 Subject: [PATCH 116/146] msm: adsprpc : Fix use after free in fastrpc_update_ramdump_status Thread1 can free up the fl->init memory in fastrpc_init_create_dynamic_process with fl spin lock, same time thread2 adding fl->init_mem to chan->initmems list with global spin lock in fastrpc_update_ramdump_status can lead to use after free in fastrpc_ramdump_collection. Fix is to use global spin lock while handling fl->init_mem. Change-Id: I7a497dc962b6967a4d594a3acce55f8ce0eb3a55 Signed-off-by: rnallago --- dsp/adsprpc.c | 49 ++++++++++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index e63218c60e..34d4eba43b 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -4043,7 +4043,7 @@ bail: static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, struct fastrpc_ioctl_init_attrs *uproc) { - int err = 0, memlen = 0, mflags = 0, locked = 0; + int err = 0, memlen = 0, mflags = 0, locked = 0, glocked = 0; struct fastrpc_ioctl_invoke_async ioctl; struct fastrpc_ioctl_init *init = &uproc->init; /* First page for init-mem and second page for proc-attrs */ @@ -4057,6 +4057,8 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, unsigned int dsp_userpd_memlen = 0; struct fastrpc_buf *init_mem; struct fastrpc_mmap *sharedbuf_map = NULL; + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; struct { int pgid; @@ -4161,20 +4163,6 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl, ADSPRPC_ERR("donated memory allocated in userspace\n"); goto bail; } - /* Free any previous donated memory */ - spin_lock(&fl->hlock); - locked = 1; - if (fl->init_mem) { - init_mem = fl->init_mem; - fl->init_mem = NULL; - spin_unlock(&fl->hlock); - locked = 0; - fastrpc_buf_free(init_mem, 0); - } - if (locked) { - spin_unlock(&fl->hlock); - locked = 0; - } /* Allocate DMA buffer in kernel for donating to remote process * Unsigned PD requires additional memory because of the @@ -4278,13 +4266,21 @@ bail: if (err) { ADSPRPC_ERR("failed with err %d\n", err); fl->dsp_process_state = PROCESS_CREATE_DEFAULT; + spin_unlock(&fl->hlock); + locked = 0; + spin_lock_irqsave(&me->hlock, irq_flags); + glocked = 1; if (!IS_ERR_OR_NULL(fl->init_mem)) { init_mem = fl->init_mem; fl->init_mem = NULL; - spin_unlock(&fl->hlock); - locked = 0; + spin_unlock_irqrestore(&me->hlock, irq_flags); + glocked = 0; fastrpc_buf_free(init_mem, 0); } + if (glocked) { + spin_unlock_irqrestore(&me->hlock, irq_flags); + glocked = 0; + } } else { fl->dsp_process_state = PROCESS_CREATE_SUCCESS; } @@ -5939,6 +5935,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl) unsigned long irq_flags = 0; bool is_locked = false; int i; + struct fastrpc_buf *init_mem = NULL; if (!fl) return 0; @@ -6010,8 +6007,22 @@ skip_dump_wait: wake_up_interruptible(&fl->proc_state_notif.notif_wait_queue); spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags); - if (!IS_ERR_OR_NULL(fl->init_mem)) - fastrpc_buf_free(fl->init_mem, 0); + if (!is_locked) { + spin_lock_irqsave(&fl->apps->hlock, irq_flags); + is_locked = true; + } + if (!IS_ERR_OR_NULL(fl->init_mem)) { + init_mem = fl->init_mem; + fl->init_mem = NULL; + is_locked = false; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + fastrpc_buf_free(init_mem, 0); + } + if (is_locked) { + is_locked = false; + spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); + } + fastrpc_context_list_dtor(fl); fastrpc_cached_buf_list_free(fl); if (!IS_ERR_OR_NULL(fl->hdr_bufs)) From 1d05790e68d8d0a3bbc686ca3232f6e54dfc000e Mon Sep 17 00:00:00 2001 From: quic_anane Date: Thu, 29 Feb 2024 11:52:55 +0530 Subject: [PATCH 117/146] dsp-kernel: Handle race-condition in dsp signal The `fastrpc_dspsignal_wait` function currently checks the signal state before waiting for a signal from the DSP. However, if the signal is already received before the check, it results in an infinite loop, causing excessive resource usage. This change addresses the race condition by checking both the pending and signaled states. If the signal is not in the pending state, it directly checks for the signaled state, resets the states, and returns to avoid looping. Change-Id: I00f80780cccf5a7b0e95f961607042efe62d9d30 Signed-off-by: quic_anane --- dsp/adsprpc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index e63218c60e..94751cb8a7 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7040,6 +7040,11 @@ int fastrpc_dspsignal_wait(struct fastrpc_file *fl, if (s->state != DSPSIGNAL_STATE_PENDING) { if ((s->state == DSPSIGNAL_STATE_CANCELED) || (s->state == DSPSIGNAL_STATE_UNUSED)) err = -EINTR; + if (s->state == DSPSIGNAL_STATE_SIGNALED) { + /* Signal already received from DSP. Reset signal state and return */ + s->state = DSPSIGNAL_STATE_PENDING; + reinit_completion(&s->comp); + } spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); DSPSIGNAL_VERBOSE("Signal %u in state %u, complete wait immediately", signal_id, s->state); From 26432925463ba78fc27090f4ba8416854927f4bb Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Thu, 7 Mar 2024 17:27:24 +0530 Subject: [PATCH 118/146] adsprpc: Skip ramdump during PDR The current code collects RAM dumps for both DSP SSR and PDR, but not required during PDR. Fix is to collect it for SSR and skip it for PDR. Change-Id: Ibcc9c7291488b67fa0570e86eef5867ba7fcb2ed Signed-off-by: rnallago --- dsp/adsprpc.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index fab0c7e5ce..f31ecfa1c9 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3983,7 +3983,7 @@ bail: static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl); static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags); -static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked); +static int fastrpc_dsp_restart_handler(struct fastrpc_file *fl, int locked, bool dump_req); /* * This function makes a call to create a thread group in the root @@ -5193,7 +5193,7 @@ bail: return err; } -static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, int locked) +static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, int locked, bool dump_req) { struct fastrpc_mmap *match = map; int err = 0, ret = 0; @@ -5254,17 +5254,19 @@ static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, if (err) return err; } - memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); - ramdump_segments_rh.da = match->phys; - ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); - ramdump_segments_rh.size = match->size; - INIT_LIST_HEAD(&head); - list_add(&ramdump_segments_rh.node, &head); - if (me->dev && dump_enabled()) { - ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); - if (ret < 0) - pr_err("adsprpc: %s: unable to dump heap (err %d)\n", - __func__, ret); + if (dump_req) { + memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); + ramdump_segments_rh.da = match->phys; + ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); + ramdump_segments_rh.size = match->size; + INIT_LIST_HEAD(&head); + list_add(&ramdump_segments_rh.node, &head); + if (me->dev && dump_enabled()) { + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); + if (ret < 0) + pr_err("adsprpc: %s: unable to dump heap (err %d)\n", + __func__, ret); + } } if (!match->is_persistent) { if (!locked && fl) @@ -5276,7 +5278,7 @@ static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, return 0; } -static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) +static int fastrpc_dsp_restart_handler(struct fastrpc_file *fl, int locked, bool dump_req) { struct fastrpc_mmap *match = NULL, *map = NULL; struct hlist_node *n = NULL; @@ -5311,7 +5313,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) } spin_unlock_irqrestore(&me->hlock, irq_flags); if (match) - err = fastrpc_mmap_dump(match, fl, locked); + err = fastrpc_mmap_dump(match, fl, locked, dump_req); } while (match && !err); bail: @@ -5357,7 +5359,7 @@ static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl) } if (me->channel[cid].spd[session].pdrcount != me->channel[cid].spd[session].prevpdrcount) { - err = fastrpc_mmap_remove_ssr(fl, 0); + err = fastrpc_dsp_restart_handler(fl, 0, false); if (err) ADSPRPC_WARN("failed to unmap remote heap (err %d)\n", err); @@ -6382,7 +6384,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) me->channel[cid].prevssrcount) { mutex_unlock(&me->channel[cid].smd_mutex); mutex_lock(&fl->map_mutex); - err = fastrpc_mmap_remove_ssr(fl, 1); + err = fastrpc_dsp_restart_handler(fl, 1, true); mutex_unlock(&fl->map_mutex); if (err) ADSPRPC_WARN( @@ -8528,7 +8530,7 @@ static int fastrpc_hibernation_suspend(struct device *dev) if (of_device_is_compatible(dev->of_node, "qcom,msm-fastrpc-compute")) { - err = fastrpc_mmap_remove_ssr(NULL, 0); + err = fastrpc_dsp_restart_handler(NULL, 0, true); if (err) ADSPRPC_WARN("failed to unmap remote heap (err %d)\n", err); From 8f36f9526b384247f8592207c123fafec4fce120 Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Tue, 26 Mar 2024 16:34:01 +0530 Subject: [PATCH 119/146] dsp-kernel: Allow audio PD kill call after audio PDR Currently, after audio PDR, all invoke calls are discarded in pd status check, due to this kill does not reach to DSP to clean up the ftq group in guestOS. Fix is to discard only audio pd attachment and allow kill message to clean DSP GuestOS resources. Change-Id: Ica8bff6ed6e81eab4119c59c46fb6be9c0b79704 Signed-off-by: rnallago --- dsp/adsprpc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index b8bfabcf11..08b51bd95d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6876,10 +6876,15 @@ static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name) err = fastrpc_get_spd_session(sloc_name, &session, &cid); if (err || cid != fl->cid) goto bail; + /* + * Audio PD attachment is not allowed after PDR. + * Allow kill message after PDR to clean DSP guestOS resources. + */ if ((!strcmp(fl->servloc_name, AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME)) && (me->channel[cid].spd[session].pdrcount != - me->channel[cid].spd[session].prevpdrcount)) { + me->channel[cid].spd[session].prevpdrcount) && + !fl->dsp_proc_init) { err = -ECONNRESET; goto bail; } From 963c25dc254d1e531ba496c056b38500060362e0 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Wed, 3 Apr 2024 09:23:35 +0530 Subject: [PATCH 120/146] dsp-kernel: Additon of krefs to fastrpc process structure Add krefs reference counters to fastrpc process objects. Process structures are used in multiple places and passed around. Maintaining krefs helps ensure that the release routine for structure is called after last reference to the pointer is done. Co-developed-by: Abhinav Parihar Change-Id: I5fd35af3c5581bf69ebfddf56951d76d9a2d10fb Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 132 +++++++++++++++++++++++++++++++++++-------- dsp/adsprpc_shared.h | 1 + 2 files changed, 109 insertions(+), 24 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 08b51bd95d..5c07016417 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -421,6 +421,9 @@ static inline void fastrpc_update_rxmsg_buf(struct fastrpc_channel_ctx *chan, uint64_t ctx, int retval, uint32_t rsp_flags, uint32_t early_wake_time, uint32_t ver, int64_t ns, uint64_t xo_time_in_us); +static int fastrpc_file_get(struct fastrpc_file *fl); +static void fastrpc_file_put(struct fastrpc_file *fl); + /** * fastrpc_device_create - Create device for the fastrpc process file * @fl : Fastrpc process file @@ -2142,11 +2145,17 @@ static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif struct hlist_node *n; bool is_process_found = false; unsigned long irq_flags = 0; + int err = 0; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { if (fl->tgid_frpc == notif->pid) { is_process_found = true; + err = fastrpc_file_get(fl); + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + is_process_found = false; + } break; } } @@ -2155,6 +2164,7 @@ static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif if (!is_process_found) return; fastrpc_queue_pd_status(fl, domain, notif->status, fl->sessionid); + fastrpc_file_put(fl); } static void context_notify_user(struct smq_invoke_ctx *ctx, @@ -2303,23 +2313,31 @@ static void fastrpc_ramdump_collection(int cid) INIT_LIST_HEAD(&head); list_add(&ramdump_entry.node, &head); - if (fl && fl->sctx && fl->sctx->smmu.dev) - ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); - else { - if (me->dev != NULL) - ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); - } - if (ret < 0) - ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", - __func__, ret); - - hlist_del_init(&buf->hn_init); if (fl) { - spin_lock_irqsave(&me->hlock, irq_flags); - if (fl->file_close) - complete(&fl->work); - fl->is_ramdump_pend = false; - spin_unlock_irqrestore(&me->hlock, irq_flags); + ret = fastrpc_file_get(fl); + if (ret) { + ADSPRPC_ERR("Failed to get user process reference\n"); + continue; + } + if (fl && fl->sctx && fl->sctx->smmu.dev) { + ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); + } else { + if (me->dev != NULL) + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); + } + if (ret < 0) + ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", + __func__, ret); + + hlist_del_init(&buf->hn_init); + if (fl) { + spin_lock_irqsave(&me->hlock, irq_flags); + if (fl->file_close) + complete(&fl->work); + fl->is_ramdump_pend = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + fastrpc_file_put(fl); + } } } } @@ -2408,7 +2426,7 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl) spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, irq_flags); } -static int fastrpc_file_free(struct fastrpc_file *fl); +static void fastrpc_file_free(struct kref *ref); static void fastrpc_file_list_dtor(struct fastrpc_apps *me) { struct fastrpc_file *fl, *free; @@ -2425,7 +2443,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me) } spin_unlock_irqrestore(&me->hlock, irq_flags); if (free) - fastrpc_file_free(free); + fastrpc_file_put(free); } while (free); } @@ -3552,7 +3570,10 @@ read_async_job: VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; - + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto bail; + } spin_lock_irqsave(&fl->aqlock, flags); hlist_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) { hlist_del_init(&ictx->asyncn); @@ -3635,10 +3656,18 @@ read_notif_status: err = -EFAULT; goto bail; } + if (fl->file_close >= FASTRPC_PROCESS_EXIT_START) { + err = -EBADF; + goto bail; + } VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto bail; + } spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); list_for_each_entry_safe(inotif, n, &fl->clst.notif_queue, notifn) { list_del_init(&inotif->notifn); @@ -5935,8 +5964,9 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *chan, mutex_unlock(&chan->smd_mutex); } -static int fastrpc_file_free(struct fastrpc_file *fl) +void fastrpc_file_free(struct kref *ref) { + struct fastrpc_file *fl = NULL; struct hlist_node *n = NULL; struct fastrpc_mmap *map = NULL, *lmap = NULL; unsigned long flags; @@ -5949,8 +5979,9 @@ static int fastrpc_file_free(struct fastrpc_file *fl) int i; struct fastrpc_buf *init_mem = NULL; + fl = container_of(ref, struct fastrpc_file, refcount); if (!fl) - return 0; + return; cid = fl->cid; spin_lock_irqsave(&me->hlock, irq_flags); @@ -6004,7 +6035,8 @@ skip_dump_wait: frpc_tgid_usage_array[fl->tgid_frpc] = false; spin_unlock_irqrestore(&me->hlock, irq_flags); kfree(fl); - return 0; + fl = NULL; + return; } //Dummy wake up to exit Async worker thread @@ -6081,7 +6113,20 @@ skip_dump_wait: kfree(fl->dev_pm_qos_req); kfree(fl->gidlist.gids); kfree(fl); - return 0; + fl = NULL; +} + +static int fastrpc_file_get(struct fastrpc_file *fl) +{ + if (!fl) + return -ENOENT; + return kref_get_unless_zero(&fl->refcount) ? 0 : -ENOENT; +} + +static void fastrpc_file_put(struct fastrpc_file *fl) +{ + if (fl) + kref_put(&fl->refcount, fastrpc_file_free); } static int fastrpc_device_release(struct inode *inode, struct file *file) @@ -6101,7 +6146,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) } } debugfs_remove(fl->debugfs_file); - fastrpc_file_free(fl); + fastrpc_file_put(fl); file->private_data = NULL; return 0; @@ -6200,6 +6245,11 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, } spin_unlock_irqrestore(&me->hlock, irq_flags); } else { + ret = fastrpc_file_get(fl); + if (ret) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto bail; + } len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "\n%s %13s %d\n", "cid", ":", fl->cid); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, @@ -6344,6 +6394,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, ictx->used, ictx->ctxid); } spin_unlock(&fl->hlock); + fastrpc_file_put(fl); } if (len > DEBUGFS_SIZE) len = DEBUGFS_SIZE; @@ -6496,6 +6547,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) spin_lock_irqsave(&me->hlock, irq_flags); hlist_add_head(&fl->hn, &me->drivers); spin_unlock_irqrestore(&me->hlock, irq_flags); + kref_init(&fl->refcount); if (me->lowest_capacity_core_count) fl->dev_pm_qos_req = kzalloc((me->lowest_capacity_core_count) * sizeof(struct dev_pm_qos_request), @@ -7427,6 +7479,11 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, p.inv.perf_dsp = NULL; p.inv.job = NULL; + err = fastrpc_file_get(fl); + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto bail; + } spin_lock(&fl->hlock); if (fl->file_close >= FASTRPC_PROCESS_EXIT_START) { err = -ESHUTDOWN; @@ -7596,6 +7653,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, break; } bail: + fastrpc_file_put(fl); return err; } @@ -7709,6 +7767,11 @@ static void fastrpc_print_debug_data(int cid) } spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + err = fastrpc_file_get(fl); + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto free_buf; + } if (fl->cid == cid && fl->is_ramdump_pend) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), @@ -7808,6 +7871,7 @@ static void fastrpc_print_debug_data(int cid) cid, mini_dump_buff); } spin_unlock(&fl->hlock); + fastrpc_file_put(fl); } } spin_unlock_irqrestore(&me->hlock, irq_flags); @@ -7847,6 +7911,7 @@ static void fastrpc_print_debug_data(int cid) "gmsg_log_rx:\n %s\n", gmsg_log_rx); if (chan && chan->buf) chan->buf->size = strlen(mini_dump_buff); + free_buf: kfree(gmsg_log_tx); kfree(gmsg_log_rx); @@ -8603,6 +8668,7 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) struct fastrpc_apps *me = &gfa; uintptr_t raddr = 0; unsigned long irq_flags = 0; + bool reftaken = 0; p.map = (struct fastrpc_dev_map_dma *)invoke_param; spin_lock_irqsave(&me->hlock, irq_flags); @@ -8621,6 +8687,12 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) return err; } spin_unlock_irqrestore(&me->hlock, irq_flags); + err = fastrpc_file_get(fl); + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto bail; + } + reftaken = 1; mutex_lock(&fl->internal_map_mutex); spin_lock_irqsave(&me->hlock, irq_flags); /* Verify if fastrpc file is being closed, holding device lock*/ @@ -8660,6 +8732,8 @@ bail: spin_unlock_irqrestore(&me->hlock, irq_flags); } mutex_unlock(&fl->internal_map_mutex); + if (reftaken) + fastrpc_file_put(fl); return err; } @@ -8671,6 +8745,7 @@ long fastrpc_dev_unmap_dma(struct fastrpc_device *dev, unsigned long invoke_para struct fastrpc_mmap *map = NULL; struct fastrpc_apps *me = &gfa; unsigned long irq_flags = 0; + bool reftaken = 0; p.unmap = (struct fastrpc_dev_unmap_dma *)invoke_param; spin_lock_irqsave(&me->hlock, irq_flags); @@ -8689,6 +8764,12 @@ long fastrpc_dev_unmap_dma(struct fastrpc_device *dev, unsigned long invoke_para return err; } spin_unlock_irqrestore(&me->hlock, irq_flags); + err = fastrpc_file_get(fl); + if (err) { + ADSPRPC_ERR("Failed to get user process reference\n"); + goto bail; + } + reftaken = 1; mutex_lock(&fl->internal_map_mutex); spin_lock_irqsave(&me->hlock, irq_flags); /* Verify if fastrpc file is being closed, holding device lock*/ @@ -8722,6 +8803,8 @@ bail: spin_unlock_irqrestore(&me->hlock, irq_flags); } mutex_unlock(&fl->internal_map_mutex); + if (reftaken) + fastrpc_file_put(fl); return err; } @@ -8773,6 +8856,7 @@ long fastrpc_driver_invoke(struct fastrpc_device *dev, unsigned int invoke_num, err = -ENOTTY; break; } + return err; } EXPORT_SYMBOL(fastrpc_driver_invoke); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 6414992946..3c450e8717 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -845,6 +845,7 @@ struct fastrpc_file { struct fastrpc_ctx_lst clst; struct fastrpc_session_ctx *sctx; struct fastrpc_buf *init_mem; + struct kref refcount; /* No. of persistent headers */ unsigned int num_pers_hdrs; From 6dab51a3af6f217c1729452fa963d0d3568058ec Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Tue, 5 Mar 2024 17:19:52 +0530 Subject: [PATCH 121/146] dsp-kernel: use-after-free (UAF) in global maps Currently, remote heap maps get added to the global list before the fastrpc_internal_mmap function completes the mapping. Meanwhile, the fastrpc_internal_munmap function accesses the map, starts unmapping, and frees the map before the fastrpc_internal_mmap function completes, resulting in a use-after-free (UAF) issue. Add the map to the list after the fastrpc_internal_mmap function completes the mapping. Signed-off-by: Abhishek Singh Change-Id: I8aa23cf215e53d0613774c2b2657954bca6c72f4 --- dsp/adsprpc.c | 74 ++++++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 08b51bd95d..48f7771937 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -822,54 +822,34 @@ static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl) } while (free); } +static void fastrpc_mmap_add_global(struct fastrpc_mmap *map) +{ + struct fastrpc_apps *me = &gfa; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_add_head(&map->hn, &me->maps); + spin_unlock_irqrestore(&me->hlock, irq_flags); +} + static void fastrpc_mmap_add(struct fastrpc_mmap *map) { - if (map->flags == ADSP_MMAP_HEAP_ADDR || - map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - struct fastrpc_apps *me = &gfa; - unsigned long irq_flags = 0; + struct fastrpc_file *fl = map->fl; - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_add_head(&map->hn, &me->maps); - spin_unlock_irqrestore(&me->hlock, irq_flags); - } else { - struct fastrpc_file *fl = map->fl; + hlist_add_head(&map->hn, &fl->maps); - hlist_add_head(&map->hn, &fl->maps); - } } static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, struct dma_buf *buf, uintptr_t va, size_t len, int mflags, int refs, struct fastrpc_mmap **ppmap) { - struct fastrpc_apps *me = &gfa; struct fastrpc_mmap *match = NULL, *map = NULL; struct hlist_node *n; - unsigned long irq_flags = 0; if ((va + len) < va) return -EFAULT; - if (mflags == ADSP_MMAP_HEAP_ADDR || - mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (va >= map->va && - va + len <= map->va + map->len && - map->fd == fd) { - if (refs) { - if (map->refs + 1 == INT_MAX) { - spin_unlock_irqrestore(&me->hlock, irq_flags); - return -ETOOMANYREFS; - } - map->refs++; - } - match = map; - break; - } - } - spin_unlock_irqrestore(&me->hlock, irq_flags); - } else if (mflags == ADSP_MMAP_DMA_BUFFER) { + if (mflags == ADSP_MMAP_DMA_BUFFER) { hlist_for_each_entry_safe(map, n, &fl->maps, hn) { if (map->buf == buf) { if (refs) { @@ -1490,8 +1470,9 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct dma_buf * else fl->mem_snap.nonheap_bufs_size += map->size; spin_unlock(&fl->hlock); - - fastrpc_mmap_add(map); + if ((mflags != ADSP_MMAP_HEAP_ADDR) && + (mflags != ADSP_MMAP_REMOTE_HEAP_ADDR)) + fastrpc_mmap_add(map); *ppmap = map; bail: @@ -4382,6 +4363,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl, spin_lock_irqsave(&me->hlock, irq_flags); mem->in_use = true; spin_unlock_irqrestore(&me->hlock, irq_flags); + fastrpc_mmap_add_global(mem); } VERIFY(err, mem); if (err) @@ -5244,7 +5226,7 @@ static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, match->size, match->flags, 0); else { pr_err("Cannot communicate with DSP, ADSP is down\n"); - fastrpc_mmap_add(match); + fastrpc_mmap_add_global(match); } } if (err) @@ -5316,7 +5298,7 @@ bail: if (err && match) { if (!locked && fl) mutex_lock(&fl->map_mutex); - fastrpc_mmap_add(match); + fastrpc_mmap_add_global(match); if (!locked && fl) mutex_unlock(&fl->map_mutex); } @@ -5455,7 +5437,11 @@ int fastrpc_internal_munmap(struct fastrpc_file *fl, bail: if (err && map) { mutex_lock(&fl->map_mutex); - fastrpc_mmap_add(map); + if ((map->flags == ADSP_MMAP_HEAP_ADDR) || + (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) + fastrpc_mmap_add_global(map); + else + fastrpc_mmap_add(map); mutex_unlock(&fl->map_mutex); } mutex_unlock(&fl->internal_map_mutex); @@ -5542,6 +5528,9 @@ int fastrpc_internal_mem_map(struct fastrpc_file *fl, if (err) goto bail; ud->m.vaddrout = map->raddr; + if (ud->m.flags == ADSP_MMAP_HEAP_ADDR || + ud->m.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) + fastrpc_mmap_add_global(map); bail: if (err) { ADSPRPC_ERR("failed to map fd %d, len 0x%x, flags %d, map %pK, err %d\n", @@ -5606,7 +5595,11 @@ bail: /* Add back to map list in case of error to unmap on DSP */ if (map) { mutex_lock(&fl->map_mutex); - fastrpc_mmap_add(map); + if ((map->flags == ADSP_MMAP_HEAP_ADDR) || + (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) + fastrpc_mmap_add_global(map); + else + fastrpc_mmap_add(map); mutex_unlock(&fl->map_mutex); } } @@ -5680,6 +5673,9 @@ int fastrpc_internal_mmap(struct fastrpc_file *fl, if (err) goto bail; map->raddr = raddr; + if (ud->flags == ADSP_MMAP_HEAP_ADDR || + ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) + fastrpc_mmap_add_global(map); } ud->vaddrout = raddr; bail: From bc325e502538793f0bc7a7ac8cc3a86ed4aef18c Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Thu, 2 May 2024 11:36:49 +0530 Subject: [PATCH 122/146] dsp-kernel: Handle spin lock in error scenarios Currently, the code flow bails out without releasing the spin lock, leading to spin lock recursion. Additionally, the free function is called during this bail, which is a sleep function. To address this issue, ensure that the spin lock is released before proceeding to the bail. Change-Id: I57884049d7799c3c69eccb4fa2db043b073d5312 Signed-off-by: Abhishek Singh --- dsp/adsprpc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 5c07016417..50bb517890 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7769,6 +7769,7 @@ static void fastrpc_print_debug_data(int cid) hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { err = fastrpc_file_get(fl); if (err) { + spin_unlock_irqrestore(&me->hlock, irq_flags); ADSPRPC_ERR("Failed to get user process reference\n"); goto free_buf; } From 3c85f078253e11a18369ce525f07f8aaf45fced9 Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Thu, 2 May 2024 11:36:49 +0530 Subject: [PATCH 123/146] dsp-kernel: Handle spin lock in error scenarios Currently, the code flow bails out without releasing the spin lock, leading to spin lock recursion. Additionally, the free function is called during this bail, which is a sleep function. To address this issue, ensure that the spin lock is released before proceeding to the bail. Change-Id: I57884049d7799c3c69eccb4fa2db043b073d5312 Signed-off-by: Abhishek Singh --- dsp/adsprpc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 5c07016417..50bb517890 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7769,6 +7769,7 @@ static void fastrpc_print_debug_data(int cid) hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { err = fastrpc_file_get(fl); if (err) { + spin_unlock_irqrestore(&me->hlock, irq_flags); ADSPRPC_ERR("Failed to get user process reference\n"); goto free_buf; } From 26d80835ad33e6a054c60b527ed15e03205b9c3e Mon Sep 17 00:00:00 2001 From: Sudheer Gummalla Date: Thu, 9 May 2024 13:00:44 +0530 Subject: [PATCH 124/146] msm: Add anorak module support Add anorak module to support anorak target Change-Id: Ifc81c5c4b02c40eaa7d2281c5a4c374657457514 Signed-off-by: Sudheer Gummalla --- BUILD.bazel | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/BUILD.bazel b/BUILD.bazel index e18d78aeb2..c0a69780cb 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -33,3 +33,7 @@ define_modules("pitti", "gki") define_modules("volcano", "consolidate") define_modules("volcano", "gki") + +define_modules("anorak", "consolidate") + +define_modules("anorak", "gki") From d028daccf54bb2bdc6c17c80b247356cf758dac9 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Tue, 21 May 2024 15:58:20 +0530 Subject: [PATCH 125/146] dsp-kernel: Adding locks while printing debug data Add locking mechanism while printing file map and cma map in print debug data. Change-Id: I36484d763b56ec88413ca9394c08ff30d85e664a Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index dc104c39c5..e06cbd1ea6 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7716,6 +7716,27 @@ static void fastrpc_print_fastrpcbuf(struct fastrpc_buf *buf, void *buffer) buf->flags, buf->type, buf->in_use); } +/* + * fastrpc_print_map : Print fastrpc_map structure parameter. + * @args1: structure fastrpc_map, map whose details needs + * to because printed. + * @args1: buffer for storing the string consisting details + */ +static void fastrpc_print_map(struct fastrpc_mmap *map, void *buffer) +{ + scnprintf(buffer + + strlen(buffer), + MINI_DUMP_DBG_SIZE - + strlen(buffer), + fastrpc_mmap_params, + map->fd, + map->flags, map->buf, + map->phys, map->size, + map->va, map->raddr, + map->len, map->refs, + map->secure); +} + /* * fastrpc_print_debug_data : Print debug structure variable in CMA memory. * Input cid: Channel id @@ -7797,19 +7818,17 @@ static void fastrpc_print_debug_data(int cid) MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), "\nSession Maps\n"); - hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - scnprintf(mini_dump_buff + - strlen(mini_dump_buff), - MINI_DUMP_DBG_SIZE - - strlen(mini_dump_buff), - fastrpc_mmap_params, - map->fd, - map->flags, map->buf, - map->phys, map->size, - map->va, map->raddr, - map->len, map->refs, - map->secure); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + fastrpc_print_map(map, mini_dump_buff); } + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->map_mutex); + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + fastrpc_print_map(map, mini_dump_buff); + } + mutex_unlock(&fl->map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + spin_lock(&fl->hlock); scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), "\ncached_bufs\n"); @@ -7846,7 +7865,6 @@ static void fastrpc_print_debug_data(int cid) "\nfl->secsctx->smmu.cb : %d\n", fl->secsctx->smmu.cb); } - spin_lock(&fl->hlock); scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - From 74775598d4b326774bcb56d3f2fde0f78369cae2 Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Wed, 8 May 2024 16:29:49 +0530 Subject: [PATCH 126/146] dsp-kernel: Dequeue the CMA mini dump node to avoid infinite loop Currently, the CMA mini dump node is not being dequeued, leading to an infinite loop. Dequeue the CMA mini dump node as well along with all the init mems. Signed-off-by: Abhishek Singh Change-Id: Ie5c24ee4ce43c798ed40a8d766371449bcf27b68 --- dsp/adsprpc.c | 89 +++++++++++++++----------------------------- dsp/adsprpc_shared.h | 2 - 2 files changed, 31 insertions(+), 60 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 50bb517890..1dd170683d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2153,7 +2153,7 @@ static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif is_process_found = true; err = fastrpc_file_get(fl); if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); is_process_found = false; } break; @@ -2277,14 +2277,19 @@ static void fastrpc_update_ramdump_status(int cid) struct fastrpc_apps *me = &gfa; struct fastrpc_channel_ctx *chan = &me->channel[cid]; unsigned long irq_flags = 0; + int ret = 0; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { if (fl->cid == cid && fl->init_mem && fl->file_close < FASTRPC_PROCESS_DSP_EXIT_COMPLETE && fl->dsp_proc_init) { + ret = fastrpc_file_get(fl); + if (ret) { + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); + continue; + } hlist_add_head(&fl->init_mem->hn_init, &chan->initmems); - fl->is_ramdump_pend = true; } } if (chan->buf) @@ -2301,7 +2306,6 @@ static void fastrpc_ramdump_collection(int cid) struct qcom_dump_segment ramdump_entry; struct fastrpc_buf *buf = NULL; int ret = 0; - unsigned long irq_flags = 0; struct list_head head; hlist_for_each_entry_safe(buf, n, &chan->initmems, hn_init) { @@ -2313,32 +2317,18 @@ static void fastrpc_ramdump_collection(int cid) INIT_LIST_HEAD(&head); list_add(&ramdump_entry.node, &head); - if (fl) { - ret = fastrpc_file_get(fl); - if (ret) { - ADSPRPC_ERR("Failed to get user process reference\n"); - continue; - } - if (fl && fl->sctx && fl->sctx->smmu.dev) { - ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); - } else { - if (me->dev != NULL) - ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); - } - if (ret < 0) - ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", - __func__, ret); - - hlist_del_init(&buf->hn_init); - if (fl) { - spin_lock_irqsave(&me->hlock, irq_flags); - if (fl->file_close) - complete(&fl->work); - fl->is_ramdump_pend = false; - spin_unlock_irqrestore(&me->hlock, irq_flags); - fastrpc_file_put(fl); - } + if (fl && fl->sctx && fl->sctx->smmu.dev) { + ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); + } else { + if (me->dev != NULL) + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); } + if (ret < 0) + ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", + __func__, ret); + hlist_del_init(&buf->hn_init); + if (fl) + fastrpc_file_put(fl); } } @@ -3570,10 +3560,6 @@ read_async_job: VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; - if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); - goto bail; - } spin_lock_irqsave(&fl->aqlock, flags); hlist_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) { hlist_del_init(&ictx->asyncn); @@ -3663,11 +3649,6 @@ read_notif_status: VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; - - if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); - goto bail; - } spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); list_for_each_entry_safe(inotif, n, &fl->clst.notif_queue, notifn) { list_del_init(&inotif->notifn); @@ -5980,8 +5961,10 @@ void fastrpc_file_free(struct kref *ref) struct fastrpc_buf *init_mem = NULL; fl = container_of(ref, struct fastrpc_file, refcount); - if (!fl) + if (!fl) { + ADSPRPC_ERR("%s Invalid fl", __func__); return; + } cid = fl->cid; spin_lock_irqsave(&me->hlock, irq_flags); @@ -6010,19 +5993,6 @@ skip_dmainvoke_wait: spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; } - if (!fl->is_ramdump_pend) - goto skip_dump_wait; - is_locked = false; - spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); - wait_for_completion(&fl->work); - -skip_dump_wait: - if (!is_locked) { - spin_lock_irqsave(&fl->apps->hlock, irq_flags); - is_locked = true; - } - hlist_del_init(&fl->hn); - fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; @@ -6134,6 +6104,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; struct fastrpc_apps *me = &gfa; unsigned int ii; + unsigned long irq_flags = 0; if (!fl) return 0; @@ -6146,6 +6117,9 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) } } debugfs_remove(fl->debugfs_file); + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_del_init(&fl->hn); + spin_unlock_irqrestore(&me->hlock, irq_flags); fastrpc_file_put(fl); file->private_data = NULL; @@ -6247,7 +6221,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, } else { ret = fastrpc_file_get(fl); if (ret) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto bail; } len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, @@ -6529,7 +6503,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->init_mem = NULL; fl->qos_request = 0; fl->dsp_proc_init = 0; - fl->is_ramdump_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; fl->is_unsigned_pd = false; fl->is_compat = false; @@ -7770,10 +7743,10 @@ static void fastrpc_print_debug_data(int cid) err = fastrpc_file_get(fl); if (err) { spin_unlock_irqrestore(&me->hlock, irq_flags); - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto free_buf; } - if (fl->cid == cid && fl->is_ramdump_pend) { + if (fl->cid == cid) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - @@ -7872,8 +7845,8 @@ static void fastrpc_print_debug_data(int cid) cid, mini_dump_buff); } spin_unlock(&fl->hlock); - fastrpc_file_put(fl); } + fastrpc_file_put(fl); } spin_unlock_irqrestore(&me->hlock, irq_flags); spin_lock_irqsave(&chan->gmsg_log.lock, flags); @@ -8690,7 +8663,7 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) spin_unlock_irqrestore(&me->hlock, irq_flags); err = fastrpc_file_get(fl); if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto bail; } reftaken = 1; @@ -8767,7 +8740,7 @@ long fastrpc_dev_unmap_dma(struct fastrpc_device *dev, unsigned long invoke_para spin_unlock_irqrestore(&me->hlock, irq_flags); err = fastrpc_file_get(fl); if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto bail; } reftaken = 1; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 3c450e8717..cc5752bb0c 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -900,8 +900,6 @@ struct fastrpc_file { struct fastrpc_device *device; /* Process kill will wait on work when ram dump collection in progress */ struct completion work; - /* Flag to indicate ram dump collection status*/ - bool is_ramdump_pend; /* Process kill will wait on bus driver invoke thread to complete its process */ struct completion dma_invoke; /* Flag to indicate invoke pending */ From a5ee0494e92d683e0d4ab6541dcc66e046e86e78 Mon Sep 17 00:00:00 2001 From: Santosh Sakore Date: Thu, 23 May 2024 19:39:22 +0530 Subject: [PATCH 127/146] dsp-kernel: Move the ssrcount access within a critical section The subsystem ssrcount is write-protected with a channel mutex. In a few places, the code accesses it outside the critical section, which can result in false reads during a race condition. To address this, move the ssrcount access within a critical section. Change-Id: I7df1e05fd892277a10514e3759f7ea67c51bac3b Signed-off-by: Santosh --- dsp/adsprpc.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index dc104c39c5..a4753a1f78 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3510,9 +3510,11 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, trace_fastrpc_msg("context_free: end"); } if (!kernel) { + mutex_lock(&fl->apps->channel[cid].smd_mutex); if (VALID_FASTRPC_CID(cid) && (fl->ssrcount != fl->apps->channel[cid].ssrcount)) err = -ECONNRESET; + mutex_unlock(&fl->apps->channel[cid].smd_mutex); } invoke_end: @@ -6726,8 +6728,8 @@ int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) } } fl->cid = cid; - fl->ssrcount = fl->apps->channel[cid].ssrcount; mutex_lock(&fl->apps->channel[cid].smd_mutex); + fl->ssrcount = fl->apps->channel[cid].ssrcount; err = fastrpc_session_alloc_locked(&fl->apps->channel[cid], 0, fl->sharedcb, fl->pd_type, &fl->sctx); mutex_unlock(&fl->apps->channel[cid].smd_mutex); @@ -7919,7 +7921,9 @@ void fastrpc_restart_drivers(int cid) struct fastrpc_apps *me = &gfa; fastrpc_notify_drivers(me, cid); + mutex_lock(&me->channel[cid].smd_mutex); me->channel[cid].ssrcount++; + mutex_unlock(&me->channel[cid].smd_mutex); } static int fastrpc_restart_notifier_cb(struct notifier_block *nb, @@ -7933,9 +7937,14 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, int cid = -1; struct timespec64 startT = {0}; unsigned long irq_flags = 0; + uint64_t ssrcount = 0; ctx = container_of(nb, struct fastrpc_channel_ctx, nb); cid = ctx - &me->channel[0]; + /* ssrcount should be read within a critical section */ + mutex_lock(&me->channel[cid].smd_mutex); + ssrcount = ctx->ssrcount; + mutex_unlock(&me->channel[cid].smd_mutex); switch (code) { case QCOM_SSR_BEFORE_SHUTDOWN: fastrpc_rproc_trace_events(gcinfo[cid].subsys, @@ -7968,13 +7977,11 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, "QCOM_SSR_BEFORE_POWERUP", "fastrpc_restart_notifier-enter"); pr_info("adsprpc: %s: subsystem %s is about to start\n", __func__, gcinfo[cid].subsys); - if (cid == CDSP_DOMAIN_ID && dump_enabled() && - ctx->ssrcount) + if (cid == CDSP_DOMAIN_ID && dump_enabled() && ssrcount) fastrpc_update_ramdump_status(cid); fastrpc_notify_drivers(me, cid); /* Skip ram dump collection in first boot */ - if (cid == CDSP_DOMAIN_ID && dump_enabled() && - ctx->ssrcount) { + if (cid == CDSP_DOMAIN_ID && dump_enabled() && ssrcount) { mutex_lock(&me->channel[cid].smd_mutex); fastrpc_print_debug_data(cid); mutex_unlock(&me->channel[cid].smd_mutex); From 0abf6ac7b380ca6bbc7fc97bbc7c13c7d63050ab Mon Sep 17 00:00:00 2001 From: Minghao Xue Date: Thu, 23 May 2024 15:48:13 +0800 Subject: [PATCH 128/146] dsp-kernel: Handle dspsignal_wait based on timeout Currently, dsp signal waits definite timeout even though time out set to indefinite wait and returns timeout error. Fix is added proper check for waiting indefinitely and returned proper error code. Change-Id: Ib4d8835cee6c686dae45f8b5ddf128d24c28cdad Signed-off-by: Minghao Xue --- dsp/adsprpc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 96a9495895..1d4a3ee58f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7053,6 +7053,7 @@ int fastrpc_dspsignal_wait(struct fastrpc_file *fl, struct fastrpc_ioctl_dspsignal_wait *wait) { int err = 0, cid = -1; + uint32_t timeout_usec = wait->timeout_usec; unsigned long timeout = usecs_to_jiffies(wait->timeout_usec); uint32_t signal_id = wait->signal_id; struct fastrpc_dspsignal *s = NULL; @@ -7102,14 +7103,15 @@ int fastrpc_dspsignal_wait(struct fastrpc_file *fl, spin_unlock_irqrestore(&fl->dspsignals_lock, irq_flags); trace_fastrpc_dspsignal("wait", signal_id, s->state, wait->timeout_usec); - if (timeout != 0xffffffff) + if (timeout_usec != 0xffffffff) ret = wait_for_completion_interruptible_timeout(&s->comp, timeout); else ret = wait_for_completion_interruptible(&s->comp); trace_fastrpc_dspsignal("wakeup", signal_id, s->state, wait->timeout_usec); - if (ret == 0) { - DSPSIGNAL_VERBOSE("Wait for signal %u timed out\n", signal_id); + if (timeout_usec != 0xffffffff && ret == 0) { + DSPSIGNAL_VERBOSE("Wait for signal %u timed out %ld us\n", + signal_id, timeout_usec); err = -ETIMEDOUT; goto bail; } else if (ret < 0) { From 8d872e28809a7d3ebf7c7d11a25e75bf67b89d7b Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Wed, 8 May 2024 16:29:49 +0530 Subject: [PATCH 129/146] dsp-kernel: Dequeue the CMA mini dump node to avoid infinite loop Currently, the CMA mini dump node is not being dequeued, leading to an infinite loop. Dequeue the CMA mini dump node as well along with all the init mems. Signed-off-by: Abhishek Singh Change-Id: Ie5c24ee4ce43c798ed40a8d766371449bcf27b68 (cherry picked from commit 74775598d4b326774bcb56d3f2fde0f78369cae2) --- dsp/adsprpc.c | 89 +++++++++++++++----------------------------- dsp/adsprpc_shared.h | 2 - 2 files changed, 31 insertions(+), 60 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 50bb517890..1dd170683d 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -2153,7 +2153,7 @@ static void fastrpc_notif_find_process(int domain, struct smq_notif_rspv3 *notif is_process_found = true; err = fastrpc_file_get(fl); if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); is_process_found = false; } break; @@ -2277,14 +2277,19 @@ static void fastrpc_update_ramdump_status(int cid) struct fastrpc_apps *me = &gfa; struct fastrpc_channel_ctx *chan = &me->channel[cid]; unsigned long irq_flags = 0; + int ret = 0; spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { if (fl->cid == cid && fl->init_mem && fl->file_close < FASTRPC_PROCESS_DSP_EXIT_COMPLETE && fl->dsp_proc_init) { + ret = fastrpc_file_get(fl); + if (ret) { + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); + continue; + } hlist_add_head(&fl->init_mem->hn_init, &chan->initmems); - fl->is_ramdump_pend = true; } } if (chan->buf) @@ -2301,7 +2306,6 @@ static void fastrpc_ramdump_collection(int cid) struct qcom_dump_segment ramdump_entry; struct fastrpc_buf *buf = NULL; int ret = 0; - unsigned long irq_flags = 0; struct list_head head; hlist_for_each_entry_safe(buf, n, &chan->initmems, hn_init) { @@ -2313,32 +2317,18 @@ static void fastrpc_ramdump_collection(int cid) INIT_LIST_HEAD(&head); list_add(&ramdump_entry.node, &head); - if (fl) { - ret = fastrpc_file_get(fl); - if (ret) { - ADSPRPC_ERR("Failed to get user process reference\n"); - continue; - } - if (fl && fl->sctx && fl->sctx->smmu.dev) { - ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); - } else { - if (me->dev != NULL) - ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); - } - if (ret < 0) - ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", - __func__, ret); - - hlist_del_init(&buf->hn_init); - if (fl) { - spin_lock_irqsave(&me->hlock, irq_flags); - if (fl->file_close) - complete(&fl->work); - fl->is_ramdump_pend = false; - spin_unlock_irqrestore(&me->hlock, irq_flags); - fastrpc_file_put(fl); - } + if (fl && fl->sctx && fl->sctx->smmu.dev) { + ret = qcom_elf_dump(&head, fl->sctx->smmu.dev, ELF_CLASS); + } else { + if (me->dev != NULL) + ret = qcom_elf_dump(&head, me->dev, ELF_CLASS); } + if (ret < 0) + ADSPRPC_ERR("adsprpc: %s: unable to dump PD memory (err %d)\n", + __func__, ret); + hlist_del_init(&buf->hn_init); + if (fl) + fastrpc_file_put(fl); } } @@ -3570,10 +3560,6 @@ read_async_job: VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; - if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); - goto bail; - } spin_lock_irqsave(&fl->aqlock, flags); hlist_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) { hlist_del_init(&ictx->asyncn); @@ -3663,11 +3649,6 @@ read_notif_status: VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; - - if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); - goto bail; - } spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags); list_for_each_entry_safe(inotif, n, &fl->clst.notif_queue, notifn) { list_del_init(&inotif->notifn); @@ -5980,8 +5961,10 @@ void fastrpc_file_free(struct kref *ref) struct fastrpc_buf *init_mem = NULL; fl = container_of(ref, struct fastrpc_file, refcount); - if (!fl) + if (!fl) { + ADSPRPC_ERR("%s Invalid fl", __func__); return; + } cid = fl->cid; spin_lock_irqsave(&me->hlock, irq_flags); @@ -6010,19 +5993,6 @@ skip_dmainvoke_wait: spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; } - if (!fl->is_ramdump_pend) - goto skip_dump_wait; - is_locked = false; - spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); - wait_for_completion(&fl->work); - -skip_dump_wait: - if (!is_locked) { - spin_lock_irqsave(&fl->apps->hlock, irq_flags); - is_locked = true; - } - hlist_del_init(&fl->hn); - fl->is_ramdump_pend = false; fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; @@ -6134,6 +6104,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; struct fastrpc_apps *me = &gfa; unsigned int ii; + unsigned long irq_flags = 0; if (!fl) return 0; @@ -6146,6 +6117,9 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) } } debugfs_remove(fl->debugfs_file); + spin_lock_irqsave(&me->hlock, irq_flags); + hlist_del_init(&fl->hn); + spin_unlock_irqrestore(&me->hlock, irq_flags); fastrpc_file_put(fl); file->private_data = NULL; @@ -6247,7 +6221,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, } else { ret = fastrpc_file_get(fl); if (ret) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto bail; } len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, @@ -6529,7 +6503,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->init_mem = NULL; fl->qos_request = 0; fl->dsp_proc_init = 0; - fl->is_ramdump_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; fl->is_unsigned_pd = false; fl->is_compat = false; @@ -7770,10 +7743,10 @@ static void fastrpc_print_debug_data(int cid) err = fastrpc_file_get(fl); if (err) { spin_unlock_irqrestore(&me->hlock, irq_flags); - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto free_buf; } - if (fl->cid == cid && fl->is_ramdump_pend) { + if (fl->cid == cid) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - @@ -7872,8 +7845,8 @@ static void fastrpc_print_debug_data(int cid) cid, mini_dump_buff); } spin_unlock(&fl->hlock); - fastrpc_file_put(fl); } + fastrpc_file_put(fl); } spin_unlock_irqrestore(&me->hlock, irq_flags); spin_lock_irqsave(&chan->gmsg_log.lock, flags); @@ -8690,7 +8663,7 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev, unsigned long invoke_param) spin_unlock_irqrestore(&me->hlock, irq_flags); err = fastrpc_file_get(fl); if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto bail; } reftaken = 1; @@ -8767,7 +8740,7 @@ long fastrpc_dev_unmap_dma(struct fastrpc_device *dev, unsigned long invoke_para spin_unlock_irqrestore(&me->hlock, irq_flags); err = fastrpc_file_get(fl); if (err) { - ADSPRPC_ERR("Failed to get user process reference\n"); + ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); goto bail; } reftaken = 1; diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 3c450e8717..cc5752bb0c 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -900,8 +900,6 @@ struct fastrpc_file { struct fastrpc_device *device; /* Process kill will wait on work when ram dump collection in progress */ struct completion work; - /* Flag to indicate ram dump collection status*/ - bool is_ramdump_pend; /* Process kill will wait on bus driver invoke thread to complete its process */ struct completion dma_invoke; /* Flag to indicate invoke pending */ From dea40cdd29ca8876302374b3280f83b7ba2ccb43 Mon Sep 17 00:00:00 2001 From: Ansa Ahmed Date: Tue, 21 May 2024 15:58:20 +0530 Subject: [PATCH 130/146] dsp-kernel: Adding locks while printing debug data Add locking mechanism while printing file map and cma map in print debug data. Change-Id: I36484d763b56ec88413ca9394c08ff30d85e664a Signed-off-by: Ansa Ahmed --- dsp/adsprpc.c | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 1dd170683d..8cec1833fb 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -7693,6 +7693,27 @@ static void fastrpc_print_fastrpcbuf(struct fastrpc_buf *buf, void *buffer) buf->flags, buf->type, buf->in_use); } +/* + * fastrpc_print_map : Print fastrpc_map structure parameter. + * @args1: structure fastrpc_map, map whose details needs + * to because printed. + * @args1: buffer for storing the string consisting details + */ +static void fastrpc_print_map(struct fastrpc_mmap *map, void *buffer) +{ + scnprintf(buffer + + strlen(buffer), + MINI_DUMP_DBG_SIZE - + strlen(buffer), + fastrpc_mmap_params, + map->fd, + map->flags, map->buf, + map->phys, map->size, + map->va, map->raddr, + map->len, map->refs, + map->secure); +} + /* * fastrpc_print_debug_data : Print debug structure variable in CMA memory. * Input cid: Channel id @@ -7774,19 +7795,17 @@ static void fastrpc_print_debug_data(int cid) MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), "\nSession Maps\n"); - hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - scnprintf(mini_dump_buff + - strlen(mini_dump_buff), - MINI_DUMP_DBG_SIZE - - strlen(mini_dump_buff), - fastrpc_mmap_params, - map->fd, - map->flags, map->buf, - map->phys, map->size, - map->va, map->raddr, - map->len, map->refs, - map->secure); + hlist_for_each_entry_safe(map, n, &me->maps, hn) { + fastrpc_print_map(map, mini_dump_buff); } + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_lock(&fl->map_mutex); + hlist_for_each_entry_safe(map, n, &fl->maps, hn) { + fastrpc_print_map(map, mini_dump_buff); + } + mutex_unlock(&fl->map_mutex); + spin_lock_irqsave(&me->hlock, irq_flags); + spin_lock(&fl->hlock); scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), "\ncached_bufs\n"); @@ -7823,7 +7842,6 @@ static void fastrpc_print_debug_data(int cid) "\nfl->secsctx->smmu.cb : %d\n", fl->secsctx->smmu.cb); } - spin_lock(&fl->hlock); scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - From dbd9441b4811bd9e6ed9bc77730400e91ebb9d7b Mon Sep 17 00:00:00 2001 From: Anvesh Jain P Date: Mon, 1 Apr 2024 12:13:29 +0530 Subject: [PATCH 131/146] msm: adsprpc: Add support for cdsp1 remoteproc The fastrpc driver supports 4 remoteproc. There are some products such as automotive which support cdsp1 remoteproc. Add changes to support cdsp1 remoteproc. Change-Id: I3a9b221c53ccd4331de089ab38ccd6d715db4bf4 Signed-off-by: Anvesh Jain P --- dsp/adsprpc_rpmsg.c | 2 ++ dsp/adsprpc_shared.h | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/dsp/adsprpc_rpmsg.c b/dsp/adsprpc_rpmsg.c index f215fc3fc7..1168979035 100644 --- a/dsp/adsprpc_rpmsg.c +++ b/dsp/adsprpc_rpmsg.c @@ -61,6 +61,8 @@ static inline int get_cid_from_rpdev(struct rpmsg_device *rpdev) cid = SDSP_DOMAIN_ID; else if (!strcmp(label, "mdsp")) cid = MDSP_DOMAIN_ID; + else if (!strcmp(label, "cdsp1")) + cid = CDSP1_DOMAIN_ID; return cid; } diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 6414992946..204abd9d95 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -83,9 +83,9 @@ #define MDSP_DOMAIN_ID (1) #define SDSP_DOMAIN_ID (2) #define CDSP_DOMAIN_ID (3) -#define MAX_DOMAIN_ID CDSP_DOMAIN_ID +#define CDSP1_DOMAIN_ID (4) -#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/ +#define NUM_CHANNELS 5 /* adsp, mdsp, slpi, cdsp, cdsp1*/ #define NUM_SESSIONS 14 /* max 11 compute, 3 cpz */ /* Default maximum sessions allowed per process */ From 3463a894b83c5c567f1741190aa22faae4d73c4d Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Sat, 8 Jun 2024 17:19:46 +0530 Subject: [PATCH 132/146] dsp-kernel: Handle the spinlock recursion Currently, in print_debug_data, kref_put is being called inside the global lock, and the same lock is taken in the release callback of kref_put, leading to spinlock recursion. There is no need to get and put the reference for the fastrpce file inside this function because we have already taken the reference inside the update_ramdump_status while adding the init memory entry to the chan->initmems list. Moreover, the same list will be used in print_debug_data. Signed-off-by: Abhishek Singh Change-Id: Ifdc8b3e0c2bbc5cc4237eedaa24c8cd766262dfe --- dsp/adsprpc.c | 25 +++++++------------------ dsp/adsprpc_shared.h | 2 -- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6af0fb31f3..ed8419cf77 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5991,6 +5991,7 @@ skip_dmainvoke_wait: spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; } + hlist_del_init(&fl->hn); fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; @@ -6102,7 +6103,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; struct fastrpc_apps *me = &gfa; unsigned int ii; - unsigned long irq_flags = 0; if (!fl) return 0; @@ -6115,9 +6115,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) } } debugfs_remove(fl->debugfs_file); - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_del_init(&fl->hn); - spin_unlock_irqrestore(&me->hlock, irq_flags); fastrpc_file_put(fl); file->private_data = NULL; @@ -6508,7 +6505,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_async = false; fl->multi_session_support = false; fl->set_session_info = false; - init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; filp->private_data = fl; @@ -7733,7 +7729,7 @@ static void fastrpc_print_debug_data(int cid) struct hlist_node *n = NULL; struct smq_invoke_ctx *ictx = NULL; struct fastrpc_tx_msg *tx_msg = NULL; - struct fastrpc_buf *buf = NULL; + struct fastrpc_buf *buf = NULL, *iter = NULL; struct fastrpc_mmap *map = NULL; unsigned long irq_flags = 0; @@ -7759,15 +7755,10 @@ static void fastrpc_print_debug_data(int cid) tx_index = chan->gmsg_log.tx_index; rx_index = chan->gmsg_log.rx_index; } - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { - err = fastrpc_file_get(fl); - if (err) { - spin_unlock_irqrestore(&me->hlock, irq_flags); - ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); - goto free_buf; - } - if (fl->cid == cid) { + + hlist_for_each_entry_safe(iter, n, &chan->initmems, hn_init) { + fl = iter->fl; + if ( fl && (fl->cid == cid)) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - @@ -7795,6 +7786,7 @@ static void fastrpc_print_debug_data(int cid) MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), "\nSession Maps\n"); + spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(map, n, &me->maps, hn) { fastrpc_print_map(map, mini_dump_buff); } @@ -7804,7 +7796,6 @@ static void fastrpc_print_debug_data(int cid) fastrpc_print_map(map, mini_dump_buff); } mutex_unlock(&fl->map_mutex); - spin_lock_irqsave(&me->hlock, irq_flags); spin_lock(&fl->hlock); scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), @@ -7864,9 +7855,7 @@ static void fastrpc_print_debug_data(int cid) } spin_unlock(&fl->hlock); } - fastrpc_file_put(fl); } - spin_unlock_irqrestore(&me->hlock, irq_flags); spin_lock_irqsave(&chan->gmsg_log.lock, flags); if (rx_index) { for (i = rx_index, count = 0, len = 0 ; i > 0 && diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index cc5752bb0c..15337cfd01 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -898,8 +898,6 @@ struct fastrpc_file { uint32_t ws_timeout; bool untrusted_process; struct fastrpc_device *device; - /* Process kill will wait on work when ram dump collection in progress */ - struct completion work; /* Process kill will wait on bus driver invoke thread to complete its process */ struct completion dma_invoke; /* Flag to indicate invoke pending */ From dab8bb801df358435dd56dcb846ca02fecfc95e7 Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Sat, 8 Jun 2024 17:19:46 +0530 Subject: [PATCH 133/146] dsp-kernel: Handle the spinlock recursion Currently, in print_debug_data, kref_put is being called inside the global lock, and the same lock is taken in the release callback of kref_put, leading to spinlock recursion. There is no need to get and put the reference for the fastrpce file inside this function because we have already taken the reference inside the update_ramdump_status while adding the init memory entry to the chan->initmems list. Moreover, the same list will be used in print_debug_data. Signed-off-by: Abhishek Singh Change-Id: Ifdc8b3e0c2bbc5cc4237eedaa24c8cd766262dfe (cherry picked from commit 3463a894b83c5c567f1741190aa22faae4d73c4d) --- dsp/adsprpc.c | 25 +++++++------------------ dsp/adsprpc_shared.h | 2 -- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6af0fb31f3..ed8419cf77 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -5991,6 +5991,7 @@ skip_dmainvoke_wait: spin_lock_irqsave(&fl->apps->hlock, irq_flags); is_locked = true; } + hlist_del_init(&fl->hn); fl->is_dma_invoke_pend = false; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; is_locked = false; @@ -6102,7 +6103,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data; struct fastrpc_apps *me = &gfa; unsigned int ii; - unsigned long irq_flags = 0; if (!fl) return 0; @@ -6115,9 +6115,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) } } debugfs_remove(fl->debugfs_file); - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_del_init(&fl->hn); - spin_unlock_irqrestore(&me->hlock, irq_flags); fastrpc_file_put(fl); file->private_data = NULL; @@ -6508,7 +6505,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->exit_async = false; fl->multi_session_support = false; fl->set_session_info = false; - init_completion(&fl->work); init_completion(&fl->dma_invoke); fl->file_close = FASTRPC_PROCESS_DEFAULT_STATE; filp->private_data = fl; @@ -7733,7 +7729,7 @@ static void fastrpc_print_debug_data(int cid) struct hlist_node *n = NULL; struct smq_invoke_ctx *ictx = NULL; struct fastrpc_tx_msg *tx_msg = NULL; - struct fastrpc_buf *buf = NULL; + struct fastrpc_buf *buf = NULL, *iter = NULL; struct fastrpc_mmap *map = NULL; unsigned long irq_flags = 0; @@ -7759,15 +7755,10 @@ static void fastrpc_print_debug_data(int cid) tx_index = chan->gmsg_log.tx_index; rx_index = chan->gmsg_log.rx_index; } - spin_lock_irqsave(&me->hlock, irq_flags); - hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { - err = fastrpc_file_get(fl); - if (err) { - spin_unlock_irqrestore(&me->hlock, irq_flags); - ADSPRPC_ERR("Failed to get user process reference for fl (%pK)\n", fl); - goto free_buf; - } - if (fl->cid == cid) { + + hlist_for_each_entry_safe(iter, n, &chan->initmems, hn_init) { + fl = iter->fl; + if ( fl && (fl->cid == cid)) { scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - @@ -7795,6 +7786,7 @@ static void fastrpc_print_debug_data(int cid) MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), "\nSession Maps\n"); + spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(map, n, &me->maps, hn) { fastrpc_print_map(map, mini_dump_buff); } @@ -7804,7 +7796,6 @@ static void fastrpc_print_debug_data(int cid) fastrpc_print_map(map, mini_dump_buff); } mutex_unlock(&fl->map_mutex); - spin_lock_irqsave(&me->hlock, irq_flags); spin_lock(&fl->hlock); scnprintf(mini_dump_buff + strlen(mini_dump_buff), MINI_DUMP_DBG_SIZE - strlen(mini_dump_buff), @@ -7864,9 +7855,7 @@ static void fastrpc_print_debug_data(int cid) } spin_unlock(&fl->hlock); } - fastrpc_file_put(fl); } - spin_unlock_irqrestore(&me->hlock, irq_flags); spin_lock_irqsave(&chan->gmsg_log.lock, flags); if (rx_index) { for (i = rx_index, count = 0, len = 0 ; i > 0 && diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 0f08914af3..651eded3ac 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -898,8 +898,6 @@ struct fastrpc_file { uint32_t ws_timeout; bool untrusted_process; struct fastrpc_device *device; - /* Process kill will wait on work when ram dump collection in progress */ - struct completion work; /* Process kill will wait on bus driver invoke thread to complete its process */ struct completion dma_invoke; /* Flag to indicate invoke pending */ From a4afa6832a7cbe249e5e896981508ca691a9f297 Mon Sep 17 00:00:00 2001 From: Santosh Date: Mon, 24 Jun 2024 16:28:14 +0530 Subject: [PATCH 134/146] dsp-kernel: Validate the CID before accessing the channel mutex In the early stages of fastrpc_internal_invoke, we validate the user CID and handle failure cases. However, in the error scenario, an invalid CID can lead to issues when accessing the channel mutex. To prevent this, we should validate the CID before accessing the channel mutex via fastrpc user structure. Change-Id: Ic1f7ae01a749b57c9b9e69210314d694ebcf300b Signed-off-by: Santosh --- dsp/adsprpc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index ed8419cf77..74edaac4e7 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3499,10 +3499,9 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, context_free(ctx); trace_fastrpc_msg("context_free: end"); } - if (!kernel) { + if (!kernel && VALID_FASTRPC_CID(cid)) { mutex_lock(&fl->apps->channel[cid].smd_mutex); - if (VALID_FASTRPC_CID(cid) - && (fl->ssrcount != fl->apps->channel[cid].ssrcount)) + if (fl->ssrcount != fl->apps->channel[cid].ssrcount) err = -ECONNRESET; mutex_unlock(&fl->apps->channel[cid].smd_mutex); } From 4cb6d58c44b68d61e354d2fe966bbfbc4093609d Mon Sep 17 00:00:00 2001 From: Santosh Date: Mon, 24 Jun 2024 16:28:14 +0530 Subject: [PATCH 135/146] dsp-kernel: Validate the CID before accessing the channel mutex In the early stages of fastrpc_internal_invoke, we validate the user CID and handle failure cases. However, in the error scenario, an invalid CID can lead to issues when accessing the channel mutex. To prevent this, we should validate the CID before accessing the channel mutex via fastrpc user structure. Change-Id: Ic1f7ae01a749b57c9b9e69210314d694ebcf300b Signed-off-by: Santosh --- dsp/adsprpc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index ed8419cf77..74edaac4e7 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -3499,10 +3499,9 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, context_free(ctx); trace_fastrpc_msg("context_free: end"); } - if (!kernel) { + if (!kernel && VALID_FASTRPC_CID(cid)) { mutex_lock(&fl->apps->channel[cid].smd_mutex); - if (VALID_FASTRPC_CID(cid) - && (fl->ssrcount != fl->apps->channel[cid].ssrcount)) + if (fl->ssrcount != fl->apps->channel[cid].ssrcount) err = -ECONNRESET; mutex_unlock(&fl->apps->channel[cid].smd_mutex); } From ddf3406a0cb8b8f44fe95505a509ceee80e36d2b Mon Sep 17 00:00:00 2001 From: Minghao Xue Date: Fri, 28 Jun 2024 11:31:39 +0800 Subject: [PATCH 136/146] dsp-kernel: Unify error code when remote subsystem is not up Currently, different error codes are returned if remote subsystem is not up when client tries to open dynamic PD. Need to unify them to -ECONNREFUSED. Change-Id: Iee6925724a29a4ab265c50f68baa267150b4058d Signed-off-by: Minghao Xue --- dsp/adsprpc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 74edaac4e7..4c04bcaf6f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6396,8 +6396,10 @@ static int fastrpc_channel_open(struct fastrpc_file *fl, uint32_t flags) goto bail; err = verify_transport_device(cid, fl->tvm_remote_domain); - if (err) + if (err) { + err = -ECONNREFUSED; goto bail; + } mutex_lock(&me->channel[cid].smd_mutex); if (me->channel[cid].ssrcount != From e2cfdde491698cf833e922e680935ce1f6c7528c Mon Sep 17 00:00:00 2001 From: Abhishek Singh Date: Fri, 21 Jun 2024 16:04:09 +0530 Subject: [PATCH 137/146] dsp-kernel: Do not search the global map in the process-specific list If a user makes the ioctl call for the fastrpc_internal_mmap with the global map flag, fd, and va corresponding to some map already present in the process-specific list, then this map present in the process- specific list could be added to the global list. Because global maps are also searched in the process-specific list. If a map gets removed from the global list and another concurrent thread is using the same map for a process-specific use case, it could lead to a use-after-free. Avoid searching the global map in the process-specific list. Change-Id: I59c820eb984945d39cd6e4b163307ea43ee4d2f4 Signed-off-by: Abhishek Singh --- dsp/adsprpc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index ed8419cf77..50d9e83629 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -852,7 +852,10 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, if ((va + len) < va) return -EFAULT; - if (mflags == ADSP_MMAP_DMA_BUFFER) { + if ((mflags == ADSP_MMAP_HEAP_ADDR) || + (mflags == ADSP_MMAP_REMOTE_HEAP_ADDR)) { + return -EFAULT; + } else if (mflags == ADSP_MMAP_DMA_BUFFER) { hlist_for_each_entry_safe(map, n, &fl->maps, hn) { if (map->buf == buf) { if (refs) { From c60ac212aabd299304dfbb54b1fc18c59247d9ae Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Fri, 28 Jun 2024 22:17:36 +0530 Subject: [PATCH 138/146] dsp-kernel: Fix to avoid untrusted pointer dereference Currently, the compat ioctl call distinguishes itself using a global flag. If a user sends a compat ioctl call followed by a normal ioctl call, it may result in using a user passed address as a kernel address in the fastrpcdriver. To address this issue, consider localizing the compat flag for the ioctl call. Change-Id: Ie8fc724424534102736b8c0bc594720547ab6ff6 Signed-off-by: rnallago --- dsp/adsprpc.c | 49 ++++++++++++++++++++++---------------------- dsp/adsprpc_compat.c | 5 ++--- dsp/adsprpc_shared.h | 9 +++++--- 3 files changed, 33 insertions(+), 30 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 74edaac4e7..3086e9ab6c 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1838,11 +1838,11 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, struct fastrpc_ioctl_invoke *invoke = &invokefd->inv; struct fastrpc_channel_ctx *chan = NULL; unsigned long irq_flags = 0; - uint32_t is_kernel_memory = 0; + uint32_t kernel_msg = ((kernel == COMPAT_MSG) ? USER_MSG : kernel); spin_lock(&fl->hlock); if (fl->clst.num_active_ctxs > MAX_PENDING_CTX_PER_SESSION && - !(kernel || invoke->handle < FASTRPC_STATIC_HANDLE_MAX)) { + !(kernel_msg || invoke->handle < FASTRPC_STATIC_HANDLE_MAX)) { err = -EDQUOT; spin_unlock(&fl->hlock); goto bail; @@ -1873,12 +1873,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, ctx->overs = (struct overlap *)(&ctx->attrs[bufs]); ctx->overps = (struct overlap **)(&ctx->overs[bufs]); - /* If user message, do not use copy_from_user to copy buffers for - * compat driver,as memory is already copied to kernel memory - * for compat driver - */ - is_kernel_memory = ((kernel == USER_MSG) ? (fl->is_compat) : kernel); - K_COPY_FROM_USER(err, is_kernel_memory, (void *)ctx->lpra, invoke->pra, + K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra, bufs * sizeof(*ctx->lpra)); if (err) { ADSPRPC_ERR( @@ -1961,7 +1956,15 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, spin_lock_irqsave(&chan->ctxlock, irq_flags); me->jobid[cid]++; - for (ii = ((kernel || ctx->handle < FASTRPC_STATIC_HANDLE_MAX) + + /* + * To prevent user invocations from exhausting all entries in context + * table, it is necessary to reserve a few context table entries for + * critical kernel and static RPC calls. The index will begin at 0 for + * static handles, while user handles start from + * NUM_KERNEL_AND_STATIC_ONLY_CONTEXTS. + */ + for (ii = ((kernel_msg || ctx->handle < FASTRPC_STATIC_HANDLE_MAX) ? 0 : NUM_KERNEL_AND_STATIC_ONLY_CONTEXTS); ii < FASTRPC_CTX_MAX; ii++) { if (!chan->ctxtable[ii]) { @@ -3342,7 +3345,7 @@ static void fastrpc_update_invoke_count(uint32_t handle, uint64_t *perf_counter, static int fastrpc_check_pd_status(struct fastrpc_file *fl, char *sloc_name); int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, - uint32_t kernel, + uint32_t msg_type, struct fastrpc_ioctl_invoke_async *inv) { struct smq_invoke_ctx *ctx = NULL; @@ -3351,6 +3354,7 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, struct timespec64 invoket = {0}; uint64_t *perf_counter = NULL; bool isasyncinvoke = false, isworkdone = false; + uint32_t kernel = (msg_type == COMPAT_MSG) ? USER_MSG : msg_type; cid = fl->cid; VERIFY(err, VALID_FASTRPC_CID(cid) && @@ -3377,9 +3381,6 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, cid, invoke->handle); goto bail; } - } - - if (!kernel) { VERIFY(err, 0 == (err = context_restore_interrupted(fl, inv, &ctx))); if (err) @@ -3397,7 +3398,7 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, } trace_fastrpc_msg("context_alloc: begin"); - VERIFY(err, 0 == (err = context_alloc(fl, kernel, inv, &ctx))); + VERIFY(err, 0 == (err = context_alloc(fl, msg_type, inv, &ctx))); trace_fastrpc_msg("context_alloc: end"); if (err) goto bail; @@ -3819,7 +3820,7 @@ bail: } int fastrpc_internal_invoke2(struct fastrpc_file *fl, - struct fastrpc_ioctl_invoke2 *inv2) + struct fastrpc_ioctl_invoke2 *inv2, bool is_compat) { union { struct fastrpc_ioctl_invoke_async inv; @@ -3831,7 +3832,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, struct fastrpc_proc_sess_info sess_info; } p; struct fastrpc_dsp_capabilities *dsp_cap_ptr = NULL; - uint32_t size = 0; + uint32_t size = 0, kernel = 0; int err = 0, domain = fl->cid; if (inv2->req == FASTRPC_INVOKE2_ASYNC || @@ -3859,19 +3860,20 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, goto bail; } if (size > inv2->size) { - K_COPY_FROM_USER(err, fl->is_compat, &p.inv3, (void *)inv2->invparam, + K_COPY_FROM_USER(err, is_compat, &p.inv3, (void *)inv2->invparam, sizeof(struct fastrpc_ioctl_invoke_async_no_perf)); if (err) goto bail; memcpy(&p.inv, &p.inv3, sizeof(struct fastrpc_ioctl_invoke_crc)); memcpy(&p.inv.job, &p.inv3.job, sizeof(p.inv.job)); } else { - K_COPY_FROM_USER(err, fl->is_compat, &p.inv, (void *)inv2->invparam, size); + K_COPY_FROM_USER(err, is_compat, &p.inv, (void *)inv2->invparam, size); if (err) goto bail; } + kernel = (is_compat) ? COMPAT_MSG : USER_MSG; VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode, - USER_MSG, &p.inv))); + kernel, &p.inv))); if (err) goto bail; break; @@ -3891,7 +3893,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, err = -EBADE; goto bail; } - K_COPY_FROM_USER(err, 0, &p.user_concurrency, + K_COPY_FROM_USER(err, is_compat, &p.user_concurrency, (void *)inv2->invparam, size); if (err) goto bail; @@ -3915,7 +3917,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, err = -EBADE; goto bail; } - K_COPY_FROM_USER(err, fl->is_compat, &p.buff_info, + K_COPY_FROM_USER(err, 0, &p.buff_info, (void *)inv2->invparam, inv2->size); if (err) goto bail; @@ -3930,7 +3932,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, err = -EBADE; goto bail; } - K_COPY_FROM_USER(err, fl->is_compat, &p.sess_info, + K_COPY_FROM_USER(err, is_compat, &p.sess_info, (void *)inv2->invparam, inv2->size); if (err) goto bail; @@ -6499,7 +6501,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->dsp_proc_init = 0; fl->dsp_process_state = PROCESS_CREATE_DEFAULT; fl->is_unsigned_pd = false; - fl->is_compat = false; fl->exit_notif = false; fl->exit_async = false; fl->multi_session_support = false; @@ -7499,7 +7500,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, err = -EFAULT; goto bail; } - VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, &p.inv2))); + VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, &p.inv2, false))); if (err) goto bail; break; diff --git a/dsp/adsprpc_compat.c b/dsp/adsprpc_compat.c index 3cc60e220a..7c7b1e59aa 100644 --- a/dsp/adsprpc_compat.c +++ b/dsp/adsprpc_compat.c @@ -347,7 +347,7 @@ static int compat_fastrpc_ioctl_invoke(struct file *filp, if (err) return err; VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, - fl->mode, USER_MSG, inv))); + fl->mode, COMPAT_MSG, inv))); return err; } @@ -484,7 +484,7 @@ static int compat_fastrpc_ioctl_invoke2(struct file *filp, if (err) return err; - VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, inv))); + VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, inv, true))); return err; } @@ -975,7 +975,6 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, if (!filp->f_op || !filp->f_op->unlocked_ioctl) return -ENOTTY; - fl->is_compat = true; switch (cmd) { case COMPAT_FASTRPC_IOCTL_INVOKE: case COMPAT_FASTRPC_IOCTL_INVOKE_FD: diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index 651eded3ac..cd8ed472ff 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -469,9 +469,14 @@ enum fastrpc_buf_type { /* Types of RPC calls to DSP */ enum fastrpc_msg_type { + /* 64 bit user application invoke message */ USER_MSG = 0, + /* kernel invoke message with zero pid */ KERNEL_MSG_WITH_ZERO_PID, + /* kernel invoke message with non zero pid to kill the PD in DSP */ KERNEL_MSG_WITH_NONZERO_PID, + /* 32 bit user application invoke message */ + COMPAT_MSG, }; /* Fastrpc remote pd type */ @@ -911,8 +916,6 @@ struct fastrpc_file { /* Flag to indicate dynamic process creation status*/ enum fastrpc_process_create_state dsp_process_state; bool is_unsigned_pd; - /* Flag to indicate 32 bit driver*/ - bool is_compat; /* Completion objects and state for dspsignals */ struct fastrpc_dspsignal *signal_groups[DSPSIGNAL_NUM_SIGNALS / DSPSIGNAL_GROUP_SIZE]; spinlock_t dspsignals_lock; @@ -939,7 +942,7 @@ int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, struct fastrpc_ioctl_invoke_async *inv); int fastrpc_internal_invoke2(struct fastrpc_file *fl, - struct fastrpc_ioctl_invoke2 *inv2); + struct fastrpc_ioctl_invoke2 *inv2, bool is_compat); int fastrpc_internal_munmap(struct fastrpc_file *fl, struct fastrpc_ioctl_munmap *ud); From e83cbc32d8d1b465f27c104dbd34c58b079e9374 Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Tue, 18 Jun 2024 11:24:25 +0530 Subject: [PATCH 139/146] dsp-kernel: Fix memory leak in compact ioctl invoke Currently, compact fastrpc ioctl functions allocate memory dynamically and return without freeing this memory. Do memory free before return. Change-Id: I4591ccc951e7e43362a4c2d9e0265c89ab8582f8 Signed-off-by: rnallago --- dsp/adsprpc_compat.c | 95 ++++++++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 29 deletions(-) diff --git a/dsp/adsprpc_compat.c b/dsp/adsprpc_compat.c index 7c7b1e59aa..5212518ab8 100644 --- a/dsp/adsprpc_compat.c +++ b/dsp/adsprpc_compat.c @@ -328,7 +328,7 @@ static int compat_fastrpc_ioctl_invoke(struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_fastrpc_ioctl_invoke_async __user *inv32; - struct fastrpc_ioctl_invoke_async *inv; + struct fastrpc_ioctl_invoke_async *inv = NULL; compat_uint_t sc = 0; int err = 0, len = 0; struct fastrpc_file *fl = (struct fastrpc_file *)filp->private_data; @@ -342,12 +342,16 @@ static int compat_fastrpc_ioctl_invoke(struct file *filp, sizeof(*inv) + len * sizeof(union remote_arg), GFP_KERNEL))); if (err) return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32, inv, cmd, sc)); - if (err) + if (err) { + kfree(inv); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode, COMPAT_MSG, inv))); + kfree(inv); return err; } @@ -474,17 +478,20 @@ static int compat_fastrpc_ioctl_invoke2(struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_fastrpc_ioctl_invoke2 __user *inv32; - struct fastrpc_ioctl_invoke2 *inv; + struct fastrpc_ioctl_invoke2 *inv = NULL; int err = 0; struct fastrpc_file *fl = (struct fastrpc_file *)filp->private_data; inv32 = compat_ptr(arg); VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke2(inv32, &inv, cmd)); - if (err) + if (err) { + kfree(inv); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_invoke2(fl, inv, true))); + kfree(inv); return err; } @@ -740,7 +747,7 @@ static int compat_fastrpc_control(struct fastrpc_file *fl, { int err = 0; struct compat_fastrpc_ioctl_control __user *ctrl32; - struct fastrpc_ioctl_control *ctrl; + struct fastrpc_ioctl_control *ctrl = NULL; compat_uptr_t p; ctrl32 = compat_ptr(arg); @@ -751,17 +758,19 @@ static int compat_fastrpc_control(struct fastrpc_file *fl, VERIFY(err, 0 == compat_get_fastrpc_ioctl_control(ctrl32, ctrl)); if (err) - return err; + goto bail; VERIFY(err, 0 == (err = fastrpc_internal_control(fl, ctrl))); if (err) - return err; + goto bail; err = get_user(p, &ctrl32->req); if (err) - return err; + goto bail; if (p == FASTRPC_CONTROL_KALLOC) { memcpy(&p, &ctrl->kalloc.kalloc_support, sizeof(ctrl->kalloc.kalloc_support)); err |= put_user(p, &ctrl32->kalloc.kalloc_support); } +bail: + kfree(ctrl); return err; } @@ -784,20 +793,21 @@ static int compat_fastrpc_get_dsp_info(struct fastrpc_file *fl, } err = get_user(u, &info32->domain); if (err) - return err; + goto bail; memcpy(&info->domain, &u, sizeof(info->domain)); err = get_user(u, &info32->attribute_ID); if (err) - return err; + goto bail; memcpy(&info->attribute_ID, &u, sizeof(info->attribute_ID)); err = fastrpc_get_info_from_kernel(info, fl); if (err) - return err; + goto bail; err = compat_put_fastrpc_ioctl_get_dsp_info(info32, info); - +bail: + kfree(info); return err; } @@ -810,7 +820,7 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, case COMPAT_FASTRPC_IOCTL_MEM_MAP: { struct compat_fastrpc_ioctl_mem_map __user *map32; - struct fastrpc_ioctl_mem_map *map; + struct fastrpc_ioctl_mem_map *map = NULL; map32 = compat_ptr(arg); VERIFY(err, NULL != (map = kmalloc( @@ -819,20 +829,25 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, return -EFAULT; err = compat_get_fastrpc_ioctl_mem_map(map32, map); - if (err) + if (err) { + kfree(map); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_mem_map(fl, map))); - if (err) + if (err) { + kfree(map); return err; + } VERIFY(err, 0 == compat_put_fastrpc_ioctl_mem_map(map32, map)); + kfree(map); return err; } case COMPAT_FASTRPC_IOCTL_MEM_UNMAP: { struct compat_fastrpc_ioctl_mem_unmap __user *unmap32; - struct fastrpc_ioctl_mem_unmap *unmap; + struct fastrpc_ioctl_mem_unmap *unmap = NULL; unmap32 = compat_ptr(arg); unmap = kmalloc(sizeof(*unmap), GFP_KERNEL); @@ -840,17 +855,20 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, return -EFAULT; err = compat_get_fastrpc_ioctl_mem_unmap(unmap32, unmap); - if (err) + if (err) { + kfree(unmap); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_mem_unmap(fl, unmap))); + kfree(unmap); return err; } case COMPAT_FASTRPC_IOCTL_MMAP: { struct compat_fastrpc_ioctl_mmap __user *map32; - struct fastrpc_ioctl_mmap *map; + struct fastrpc_ioctl_mmap *map = NULL; map32 = compat_ptr(arg); VERIFY(err, NULL != (map = kmalloc( @@ -858,18 +876,21 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, if (err) return -EFAULT; VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap(map32, map)); - if (err) + if (err) { + kfree(map); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, map))); VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map)); + kfree(map); return err; } case COMPAT_FASTRPC_IOCTL_MMAP_64: { struct compat_fastrpc_ioctl_mmap_64 __user *map32; - struct fastrpc_ioctl_mmap *map; + struct fastrpc_ioctl_mmap *map = NULL; map32 = compat_ptr(arg); VERIFY(err, NULL != (map = kmalloc( @@ -877,16 +898,19 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, if (err) return -EFAULT; VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map)); - if (err) + if (err) { + kfree(map); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, map))); VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map)); + kfree(map); return err; } case COMPAT_FASTRPC_IOCTL_MUNMAP: { struct compat_fastrpc_ioctl_munmap __user *unmap32; - struct fastrpc_ioctl_munmap *unmap; + struct fastrpc_ioctl_munmap *unmap = NULL; unmap32 = compat_ptr(arg); VERIFY(err, NULL != (unmap = kmalloc( @@ -895,10 +919,13 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, return -EFAULT; VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap(unmap32, unmap)); - if (err) + if (err) { + kfree(unmap); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, unmap))); + kfree(unmap); return err; } default: @@ -991,7 +1018,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, case COMPAT_FASTRPC_IOCTL_MUNMAP_64: { struct compat_fastrpc_ioctl_munmap_64 __user *unmap32; - struct fastrpc_ioctl_munmap *unmap; + struct fastrpc_ioctl_munmap *unmap = NULL; unmap32 = compat_ptr(arg); VERIFY(err, NULL != (unmap = kmalloc( @@ -1001,11 +1028,15 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32, unmap)); - if (err) + if (err) { + kfree(unmap); return err; + } VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, unmap))); + + kfree(unmap); return err; } case COMPAT_FASTRPC_IOCTL_INIT: @@ -1013,7 +1044,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, case COMPAT_FASTRPC_IOCTL_INIT_ATTRS: { struct compat_fastrpc_ioctl_init_attrs __user *init32; - struct fastrpc_ioctl_init_attrs *init; + struct fastrpc_ioctl_init_attrs *init = NULL; init32 = compat_ptr(arg); VERIFY(err, NULL != (init = kmalloc( @@ -1022,17 +1053,20 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; VERIFY(err, 0 == compat_get_fastrpc_ioctl_init(init32, init, cmd)); - if (err) + if (err) { + kfree(init); return err; + } VERIFY(err, 0 == (err = fastrpc_init_process(fl, init))); + kfree(init); return err; } case FASTRPC_IOCTL_GETINFO: { compat_uptr_t __user *info32; - uint32_t *info; + uint32_t *info = NULL; compat_uint_t u; info32 = compat_ptr(arg); @@ -1042,11 +1076,14 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; err = get_user(u, info32); memcpy(info, &u, sizeof(u)); - if (err) + if (err) { + kfree(info); return err; + } VERIFY(err, 0 == (err = fastrpc_get_info(fl, info))); memcpy(&u, info, sizeof(*info)); err |= put_user(u, info32); + kfree(info); return err; } case FASTRPC_IOCTL_SETMODE: From 0e27b6c7d2bd8d0453e4465ac2ca49a8f8c440e2 Mon Sep 17 00:00:00 2001 From: quic_anane Date: Thu, 27 Jun 2024 12:30:41 +0530 Subject: [PATCH 140/146] dsp-kernel: Handle UAF scenario in put_args Currently, the DSP updates header buffers with unused DMA handle fds. In the put_args section, if any DMA handle FDs are present in the header buffer, the corresponding map is freed. However, since the header buffer is exposed to users in unsigned PD, users can update invalid FDs. If this invalid FD matches with any FD that is already in use, it could lead to a use-after-free (UAF) vulnerability. As a solution,add DMA handle references for DMA FDs, and the map for the FD will be freed only when a reference is found. Change-Id: Ie4d19dc0ef0ebdda5ed2fe6f7b64598ef661a63f Signed-off-by: quic_anane --- dsp/adsprpc.c | 64 +++++++++++++++++++++++++++++++------------- dsp/adsprpc_shared.h | 2 ++ 2 files changed, 48 insertions(+), 18 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6ec85443eb..8cfb8e6f28 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -955,6 +955,8 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, int fd, uintptr_t va, map->refs == 1 && /* Remove map only if it isn't being used in any pending RPC calls */ !map->ctx_refs && + /* Remove map only if it isn't being used by DSP */ + !map->dma_handle_refs && /* Skip unmap if it is fastrpc shell memory */ !map->is_filemap) { match = map; @@ -994,8 +996,9 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) if (map->flags == ADSP_MMAP_HEAP_ADDR || map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { spin_lock_irqsave(&me->hlock, irq_flags); - map->refs--; - if (!map->refs && !map->is_persistent && !map->ctx_refs) + if (map->refs) + map->refs--; + if (!map->refs && !map->is_persistent) hlist_del_init(&map->hn); if (map->refs > 0) { ADSPRPC_WARN( @@ -1008,10 +1011,14 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) map->in_use = false; spin_unlock_irqrestore(&me->hlock, irq_flags); } else { - map->refs--; - if (!map->refs && !map->ctx_refs) + if (map->refs) + map->refs--; + /* flags is passed as 1 during fastrpc_file_free (ie process exit), + * so that maps will be cleared even though references are present. + */ + if (flags || (!map->refs && !map->ctx_refs && !map->dma_handle_refs)) hlist_del_init(&map->hn); - if (map->refs > 0 && !flags) + else return; } if (map->flags == ADSP_MMAP_HEAP_ADDR || @@ -2501,21 +2508,22 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) if (err) { for (j = bufs; j < i; j++) { /* - * Due to error decrement ctx refs count before mmap free + * Due to error decrement refs count before mmap free * for each in/out handle, if map created */ - if (ctx->maps[j] && ctx->maps[j]->ctx_refs) - ctx->maps[j]->ctx_refs--; - fastrpc_mmap_free(ctx->maps[j], 0); + if (ctx->maps[j] && ctx->maps[j]->dma_handle_refs) { + ctx->maps[j]->dma_handle_refs--; + fastrpc_mmap_free(ctx->maps[j], 0); + } } mutex_unlock(&ctx->fl->map_mutex); goto bail; } else if (ctx->maps[i]) { /* - * Increment ctx refs count for in/out handle if map created + * Increment refs count for in/out handle if map created * and no error, indicate map under use in remote call */ - ctx->maps[i]->ctx_refs++; + ctx->maps[i]->dma_handle_refs++; } ipage += 1; } @@ -2667,14 +2675,33 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) rpra[i].buf.pv = buf; } PERF_END); + /* Since we are not holidng map_mutex during get args whole time + * it is possible that dma handle map may be removed by some invalid + * fd passed by DSP. Inside the lock check if the map present or not + */ + mutex_lock(&ctx->fl->map_mutex); for (i = bufs; i < bufs + handles; ++i) { - struct fastrpc_mmap *map = ctx->maps[i]; + struct fastrpc_mmap *mmap = NULL; + /* check if map was created */ + if (ctx->maps[i]) { + /* check if map still exist */ + if (!fastrpc_mmap_find(ctx->fl, ctx->fds[i], NULL, 0, 0, + 0, 0, &mmap)) { + if (mmap) { + pages[i].addr = mmap->phys; + pages[i].size = mmap->size; + } - if (map) { - pages[i].addr = map->phys; - pages[i].size = map->size; + } else { + /* map already freed by some other call */ + mutex_unlock(&ctx->fl->map_mutex); + ADSPRPC_ERR("could not find map associated with dma hadle fd %d \n", + ctx->fds[i]); + goto bail; + } } } + mutex_unlock(&ctx->fl->map_mutex); fdlist = (uint64_t *)&pages[bufs + handles]; crclist = (uint32_t *)&fdlist[M_FDLIST]; /* reset fds, crc and early wakeup hint memory */ @@ -2883,9 +2910,10 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, * Decrement ctx refs count before mmap free, * indicate remote call no longer using it */ - if (mmap && mmap->ctx_refs) - mmap->ctx_refs--; - fastrpc_mmap_free(mmap, 0); + if (mmap && mmap->dma_handle_refs) { + mmap->dma_handle_refs = 0; + fastrpc_mmap_free(mmap, 0); + } } } mutex_unlock(&ctx->fl->map_mutex); diff --git a/dsp/adsprpc_shared.h b/dsp/adsprpc_shared.h index cd8ed472ff..0b773fd74e 100644 --- a/dsp/adsprpc_shared.h +++ b/dsp/adsprpc_shared.h @@ -794,6 +794,8 @@ struct fastrpc_mmap { char *servloc_name; /* Indicate which daemon mapped this */ /* Indicates map is being used by a pending RPC call */ unsigned int ctx_refs; + /* Map in use for dma handle */ + unsigned int dma_handle_refs; }; enum fastrpc_perfkeys { From 1145bbef0185405f8aeb8cf46353a7b9fb93b1c1 Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Wed, 17 Jul 2024 12:12:33 +0530 Subject: [PATCH 141/146] dsp-kernel: Fix to use copy from user for compat invoke user pointers When allocating context to copy the compat invoke call arguments data, it incorrectly treats compat invoke call arguments as kernel memory, leading to exceptions. The fix is to recognize them as userspace pointers. Change-Id: I336b33156498103d3c3591768be98e0c105dda89 Signed-off-by: rnallago --- dsp/adsprpc.c | 8 ++++---- dsp/adsprpc_compat.c | 46 +++++++++++++++++--------------------------- 2 files changed, 22 insertions(+), 32 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 8cfb8e6f28..98a0ddd498 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -1894,7 +1894,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, } if (invokefd->fds) { - K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds, + K_COPY_FROM_USER(err, kernel_msg, ctx->fds, invokefd->fds, bufs * sizeof(*ctx->fds)); if (err) { ADSPRPC_ERR( @@ -1907,7 +1907,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, ctx->fds = NULL; } if (invokefd->attrs) { - K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs, + K_COPY_FROM_USER(err, kernel_msg, ctx->attrs, invokefd->attrs, bufs * sizeof(*ctx->attrs)); if (err) { ADSPRPC_ERR( @@ -1952,7 +1952,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, ctx->perf->tid = fl->tgid; } if (invokefd->job) { - K_COPY_FROM_USER(err, kernel, &ctx->asyncjob, invokefd->job, + K_COPY_FROM_USER(err, kernel_msg, &ctx->asyncjob, invokefd->job, sizeof(ctx->asyncjob)); if (err) goto bail; @@ -3924,7 +3924,7 @@ int fastrpc_internal_invoke2(struct fastrpc_file *fl, err = -EBADE; goto bail; } - K_COPY_FROM_USER(err, is_compat, &p.user_concurrency, + K_COPY_FROM_USER(err, 0, &p.user_concurrency, (void *)inv2->invparam, size); if (err) goto bail; diff --git a/dsp/adsprpc_compat.c b/dsp/adsprpc_compat.c index 5212518ab8..944d8b37ae 100644 --- a/dsp/adsprpc_compat.c +++ b/dsp/adsprpc_compat.c @@ -280,7 +280,6 @@ static int compat_get_fastrpc_ioctl_invoke( if (err) return err; pra32 = compat_ptr(p); - pra = (union remote_arg *)(inv + 1); for (j = 0; j < len; j++) { err |= get_user(p, &pra32[j].buf.pv); memcpy((uintptr_t *)&pra[j].buf.pv, &p, sizeof(p)); @@ -288,12 +287,11 @@ static int compat_get_fastrpc_ioctl_invoke( memcpy(&pra[j].buf.len, &s, sizeof(s)); } - inv->fds = NULL; if (cmd != COMPAT_FASTRPC_IOCTL_INVOKE) { err |= get_user(p, &inv32->fds); memcpy(&inv->fds, &p, sizeof(p)); } - inv->attrs = NULL; + if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) || (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) || (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_PERF) || @@ -301,19 +299,17 @@ static int compat_get_fastrpc_ioctl_invoke( err |= get_user(p, &inv32->attrs); memcpy(&inv->attrs, &p, sizeof(p)); } - inv->crc = NULL; + if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) || (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_PERF)) { err |= get_user(p, &inv32->crc); memcpy(&inv->crc, &p, sizeof(p)); } - inv->job = NULL; + if (cmd == FASTRPC_INVOKE2_ASYNC) { err |= get_user(p, &inv32->job); memcpy(&inv->job, &p, sizeof(p)); } - inv->perf_kernel = NULL; - inv->perf_dsp = NULL; if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_PERF) || (cmd == FASTRPC_INVOKE2_ASYNC)) { err |= get_user(k, &inv32->perf_kernel); @@ -338,7 +334,7 @@ static int compat_fastrpc_ioctl_invoke(struct file *filp, if (err) return err; len = REMOTE_SCALARS_LENGTH(sc); - VERIFY(err, NULL != (inv = kmalloc( + VERIFY(err, NULL != (inv = kzalloc( sizeof(*inv) + len * sizeof(union remote_arg), GFP_KERNEL))); if (err) return -EFAULT; @@ -390,7 +386,7 @@ static int compat_get_fastrpc_ioctl_invoke2( if (err) goto bail; len = REMOTE_SCALARS_LENGTH(sc); - VERIFY(err, NULL != (inv2_user = kmalloc( + VERIFY(err, NULL != (inv2_user = kzalloc( sizeof(*inv2_user) + sizeof(*asyncinv_user) + len * sizeof(union remote_arg), GFP_KERNEL))); if (err) { @@ -406,11 +402,8 @@ static int compat_get_fastrpc_ioctl_invoke2( if (err) goto bail; - asyncinv_user->job = NULL; err |= get_user(p, &lasync32_old->job); memcpy(&asyncinv_user->job, &p, sizeof(p)); - asyncinv_user->perf_kernel = NULL; - asyncinv_user->perf_dsp = NULL; } else { VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(lasync32, asyncinv_user, req, sc)); @@ -420,8 +413,6 @@ static int compat_get_fastrpc_ioctl_invoke2( memcpy(&inv2_user->req, &req, sizeof(req)); inv2_user->invparam = (uintptr_t)asyncinv_user; inv2_user->size = sizeof(*asyncinv_user); - if (err) - goto bail; break; } case FASTRPC_INVOKE2_ASYNC_RESPONSE: @@ -452,7 +443,7 @@ static int compat_get_fastrpc_ioctl_invoke2( goto bail; } } - VERIFY(err, NULL != (inv2_user = kmalloc( + VERIFY(err, NULL != (inv2_user = kzalloc( sizeof(*inv2_user), GFP_KERNEL))); if (err) { err = -EFAULT; @@ -461,16 +452,15 @@ static int compat_get_fastrpc_ioctl_invoke2( memcpy(&inv2_user->req, &req, sizeof(req)); memcpy(&inv2_user->invparam, &pparam, sizeof(pparam)); memcpy(&inv2_user->size, &size, sizeof(size)); - if (err) - goto bail; break; } default: err = -ENOTTY; break; } - *inva = inv2_user; + bail: + *inva = inv2_user; return err; } @@ -751,7 +741,7 @@ static int compat_fastrpc_control(struct fastrpc_file *fl, compat_uptr_t p; ctrl32 = compat_ptr(arg); - VERIFY(err, NULL != (ctrl = kmalloc( + VERIFY(err, NULL != (ctrl = kzalloc( sizeof(*ctrl), GFP_KERNEL))); if (err) return -EFAULT; @@ -784,7 +774,7 @@ static int compat_fastrpc_get_dsp_info(struct fastrpc_file *fl, size_t info_size = 0; info32 = compat_ptr(arg); - VERIFY(err, NULL != (info = kmalloc( + VERIFY(err, NULL != (info = kzalloc( sizeof(*info), GFP_KERNEL))); info_size = sizeof(*info); if (err) { @@ -823,7 +813,7 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, struct fastrpc_ioctl_mem_map *map = NULL; map32 = compat_ptr(arg); - VERIFY(err, NULL != (map = kmalloc( + VERIFY(err, NULL != (map = kzalloc( sizeof(*map), GFP_KERNEL))); if (err) return -EFAULT; @@ -850,7 +840,7 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, struct fastrpc_ioctl_mem_unmap *unmap = NULL; unmap32 = compat_ptr(arg); - unmap = kmalloc(sizeof(*unmap), GFP_KERNEL); + unmap = kzalloc(sizeof(*unmap), GFP_KERNEL); if (unmap == NULL) return -EFAULT; @@ -871,7 +861,7 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, struct fastrpc_ioctl_mmap *map = NULL; map32 = compat_ptr(arg); - VERIFY(err, NULL != (map = kmalloc( + VERIFY(err, NULL != (map = kzalloc( sizeof(*map), GFP_KERNEL))); if (err) return -EFAULT; @@ -893,7 +883,7 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, struct fastrpc_ioctl_mmap *map = NULL; map32 = compat_ptr(arg); - VERIFY(err, NULL != (map = kmalloc( + VERIFY(err, NULL != (map = kzalloc( sizeof(*map), GFP_KERNEL))); if (err) return -EFAULT; @@ -913,7 +903,7 @@ static inline long compat_fastrpc_mmap_device_ioctl(struct fastrpc_file *fl, struct fastrpc_ioctl_munmap *unmap = NULL; unmap32 = compat_ptr(arg); - VERIFY(err, NULL != (unmap = kmalloc( + VERIFY(err, NULL != (unmap = kzalloc( sizeof(*unmap), GFP_KERNEL))); if (err) return -EFAULT; @@ -1021,7 +1011,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, struct fastrpc_ioctl_munmap *unmap = NULL; unmap32 = compat_ptr(arg); - VERIFY(err, NULL != (unmap = kmalloc( + VERIFY(err, NULL != (unmap = kzalloc( sizeof(*unmap), GFP_KERNEL))); if (err) @@ -1047,7 +1037,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, struct fastrpc_ioctl_init_attrs *init = NULL; init32 = compat_ptr(arg); - VERIFY(err, NULL != (init = kmalloc( + VERIFY(err, NULL != (init = kzalloc( sizeof(*init), GFP_KERNEL))); if (err) return -EFAULT; @@ -1070,7 +1060,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, compat_uint_t u; info32 = compat_ptr(arg); - VERIFY(err, NULL != (info = kmalloc( + VERIFY(err, NULL != (info = kzalloc( sizeof(*info), GFP_KERNEL))); if (err) return -EFAULT; From 73d3d93c95462c017ac58e7bbd7aacdae4f27369 Mon Sep 17 00:00:00 2001 From: Ramesh Nallagopu Date: Thu, 11 Jul 2024 14:33:45 +0530 Subject: [PATCH 142/146] dsp-kernel: Handle memory leak in fastrpc file free fastrpc file free return if session context is NULL. PM QOS request memory free doesn't happen before this return, which leads to memory leak. Do memory cleanup to handle this scenario. Change-Id: I819ba74a7a0b3e2974df552fad8aca55a892df87 Signed-off-by: rnallago --- dsp/adsprpc.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 6ec85443eb..a3da8ed03f 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6001,16 +6001,8 @@ skip_dmainvoke_wait: is_locked = false; spin_unlock_irqrestore(&fl->apps->hlock, irq_flags); - if (!fl->sctx) { - spin_lock_irqsave(&me->hlock, irq_flags); - /* Reset the tgid usage to false */ - if (fl->tgid_frpc != -1) - frpc_tgid_usage_array[fl->tgid_frpc] = false; - spin_unlock_irqrestore(&me->hlock, irq_flags); - kfree(fl); - fl = NULL; - return; - } + if (!fl->sctx) + goto bail; //Dummy wake up to exit Async worker thread spin_lock_irqsave(&fl->aqlock, flags); @@ -6064,23 +6056,22 @@ skip_dmainvoke_wait: if (fl->device && is_driver_closed) device_unregister(&fl->device->dev); - spin_lock_irqsave(&me->hlock, irq_flags); - /* Reset the tgid usage to false */ - if (fl->tgid_frpc != -1) - frpc_tgid_usage_array[fl->tgid_frpc] = false; - spin_unlock_irqrestore(&me->hlock, irq_flags); - VERIFY(err, VALID_FASTRPC_CID(cid)); if (!err && fl->sctx) fastrpc_session_free(&fl->apps->channel[cid], fl->sctx); if (!err && fl->secsctx) fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx); - for (i = 0; i < (DSPSIGNAL_NUM_SIGNALS / DSPSIGNAL_GROUP_SIZE); i++) kfree(fl->signal_groups[i]); - mutex_destroy(&fl->signal_create_mutex); - fastrpc_remote_buf_list_free(fl); + +bail: + spin_lock_irqsave(&me->hlock, irq_flags); + /* Reset the tgid usage to false */ + if (fl->tgid_frpc != -1) + frpc_tgid_usage_array[fl->tgid_frpc] = false; + spin_unlock_irqrestore(&me->hlock, irq_flags); + mutex_destroy(&fl->signal_create_mutex); mutex_destroy(&fl->map_mutex); mutex_destroy(&fl->internal_map_mutex); kfree(fl->dev_pm_qos_req); From d064f9840e1381c6f77372473fe623694ddc5f0d Mon Sep 17 00:00:00 2001 From: mingzh Date: Fri, 5 Jul 2024 14:45:20 +0800 Subject: [PATCH 143/146] dsp-kernel: use memcpy() instead of strcpy() Current code uses strcpy() to copy strings, which has a problem with potential buffer overflows if the source string is longer than the destination buffer. We can improve this by using memcpy() with a specified length, because it allows us to control the number of bytes copied and thus prevent buffer overflows. Change-Id: I9dd5da343bfd63e4e031a66fa26b103e3da573e0 Signed-off-by: mingzh --- dsp/fastrpc_trace.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/dsp/fastrpc_trace.h b/dsp/fastrpc_trace.h index f89c69df1b..16f0eb65e7 100644 --- a/dsp/fastrpc_trace.h +++ b/dsp/fastrpc_trace.h @@ -385,7 +385,10 @@ TRACE_EVENT(fastrpc_msg, memcpy(__get_str(buf), (message), (sizeof(message) - 1)); __get_str(buf)[sizeof(message) - 1] = '\0'; #else - __assign_str(buf, message); + if (message) + __assign_str_len(buf, message, (sizeof(message) - 1)); + else + memcpy(__get_str(buf), "(null)", sizeof("(null)")); #endif ), TP_printk(" %s", __get_str(buf)) @@ -410,7 +413,10 @@ TRACE_EVENT(fastrpc_dspsignal, memcpy(__get_str(buf), (event), (sizeof(event) - 1)); __get_str(buf)[sizeof(event) - 1] = '\0'; #else - __assign_str(buf, event); + if (event) + __assign_str_len(buf, event, (sizeof(event) - 1)); + else + memcpy(__get_str(buf), "(null)", sizeof("(null)")); #endif __entry->signal_id = signal_id; __entry->state = state; From 2466bcf3cea4ed9b37b7e8983e7e6b7ffd92e8fc Mon Sep 17 00:00:00 2001 From: quic_anane Date: Tue, 16 Jul 2024 23:37:45 +0530 Subject: [PATCH 144/146] msm: ADSPRPC: Avoid Out-Of-Bounds access Currently, when adding duplicate sessions to an array that holds session information, no check is performed to avoid going out of bounds. Add a check to confirm that the index is not out of bounds. Change-Id: Ib7abcc5347ba49a8c787ec32e8519a11085456d9 Signed-off-by: quic_anane --- dsp/adsprpc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index d7e2c3e300..631d1b31d5 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -8172,6 +8172,12 @@ static int fastrpc_cb_probe(struct device *dev) for (j = 1; j < sharedcb_count && chan->sesscount < NUM_SESSIONS; j++) { chan->sesscount++; + VERIFY(err, chan->sesscount < NUM_SESSIONS); + if (err) { + ADSPRPC_WARN("failed to add shared session, maximum sessions (%d) reached \n", + NUM_SESSIONS); + break; + } dup_sess = &chan->session[chan->sesscount]; memcpy(dup_sess, sess, sizeof(struct fastrpc_session_ctx)); From 8168f4e0d505831f93de01088ccb1167253bfac3 Mon Sep 17 00:00:00 2001 From: ANANDU KRISHNAN E Date: Wed, 14 Aug 2024 10:39:55 +0530 Subject: [PATCH 145/146] msm: adsprpc: Avoid taking reference for group_info Currently, the get_current_groups API accesses group info, which increases the usage refcount. If the IOCTL using the get_current_groups API is called many times, the usage counter overflows. To avoid this, access group info without taking a reference. A reference is not required as group info is not released during the IOCTL call. Change-Id: Iec31c90f9fd1a837fb697d5d7d1baba7d285374d Signed-off-by: ANANDU KRISHNAN E --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index 631d1b31d5..1e840563cf 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6551,7 +6551,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) static int fastrpc_get_process_gids(struct gid_list *gidlist) { - struct group_info *group_info = get_current_groups(); + struct group_info *group_info = current_cred()->group_info; int i = 0, err = 0, num_gids = group_info->ngroups + 1; unsigned int *gids = NULL; From 99c2a354139addcb9ed226f82b0e8a0600cdfcbc Mon Sep 17 00:00:00 2001 From: ANANDU KRISHNAN E Date: Wed, 14 Aug 2024 10:39:55 +0530 Subject: [PATCH 146/146] msm: adsprpc: Avoid taking reference for group_info Currently, the get_current_groups API accesses group info, which increases the usage refcount. If the IOCTL using the get_current_groups API is called many times, the usage counter overflows. To avoid this, access group info without taking a reference. A reference is not required as group info is not released during the IOCTL call. Change-Id: Iec31c90f9fd1a837fb697d5d7d1baba7d285374d Signed-off-by: ANANDU KRISHNAN E --- dsp/adsprpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/adsprpc.c b/dsp/adsprpc.c index d7e2c3e300..64a15e8255 100644 --- a/dsp/adsprpc.c +++ b/dsp/adsprpc.c @@ -6551,7 +6551,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) static int fastrpc_get_process_gids(struct gid_list *gidlist) { - struct group_info *group_info = get_current_groups(); + struct group_info *group_info = current_cred()->group_info; int i = 0, err = 0, num_gids = group_info->ngroups + 1; unsigned int *gids = NULL;