remoteproc: Add snapshot of all remoteproc drivers
Add snapshot of all remoteproc drivers from msm-5.15 'commit 4eea71951526 ("virt: gunyah: rm_core: Clean up sequence idr earlier")'. Also remove duplicate definition of sm8350_cdsp_resource. Change-Id: I7391d4c61eb3b389e186cb7846ca0442e1a0bdef Signed-off-by: Guru Das Srinagesh <quic_gurus@quicinc.com>
This commit is contained in:
parent
d1d5c9c42a
commit
69135cae7d
@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2016 Linaro Ltd
|
||||
* Copyright (C) 2015 Sony Mobile Communications Inc
|
||||
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2013, 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
@ -12,20 +12,27 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/remoteproc.h>
|
||||
#include <linux/remoteproc/qcom_rproc.h>
|
||||
#include <linux/rpmsg/qcom_glink.h>
|
||||
#include <linux/rpmsg/qcom_smd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/qcom/mdt_loader.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
#include <trace/hooks/remoteproc.h>
|
||||
#include <trace/events/rproc_qcom.h>
|
||||
|
||||
#include "remoteproc_internal.h"
|
||||
#include "qcom_common.h"
|
||||
|
||||
#define SSR_NOTIF_TIMEOUT CONFIG_RPROC_SSR_NOTIF_TIMEOUT
|
||||
|
||||
#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev)
|
||||
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
|
||||
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
|
||||
|
||||
#define GLINK_SUBDEV_NAME "glink"
|
||||
#define SMD_SUBDEV_NAME "smd"
|
||||
#define SSR_SUBDEV_NAME "ssr"
|
||||
|
||||
#define MAX_NUM_OF_SS 10
|
||||
#define MAX_REGION_NAME_LENGTH 16
|
||||
#define SBL_MINIDUMP_SMEM_ID 602
|
||||
@ -84,12 +91,36 @@ struct minidump_global_toc {
|
||||
struct qcom_ssr_subsystem {
|
||||
const char *name;
|
||||
struct srcu_notifier_head notifier_list;
|
||||
struct srcu_notifier_head early_notifier_list;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static struct kobject *sysfs_kobject;
|
||||
bool qcom_device_shutdown_in_progress;
|
||||
EXPORT_SYMBOL(qcom_device_shutdown_in_progress);
|
||||
|
||||
static LIST_HEAD(qcom_ssr_subsystem_list);
|
||||
static DEFINE_MUTEX(qcom_ssr_subsys_lock);
|
||||
|
||||
static const char * const ssr_timeout_msg = "srcu notifier chain for %s:%s taking too long";
|
||||
|
||||
static ssize_t qcom_rproc_shutdown_request_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
bool val;
|
||||
int ret;
|
||||
|
||||
ret = kstrtobool(buf, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
qcom_device_shutdown_in_progress = val;
|
||||
pr_info("qcom rproc: Device shutdown requested: %s\n", val ? "true" : "false");
|
||||
return count;
|
||||
}
|
||||
static struct kobj_attribute shutdown_requested_attr = __ATTR(shutdown_in_progress, 0220, NULL,
|
||||
qcom_rproc_shutdown_request_store);
|
||||
|
||||
static void qcom_minidump_cleanup(struct rproc *rproc)
|
||||
{
|
||||
struct rproc_dump_segment *entry, *tmp;
|
||||
@ -101,7 +132,8 @@ static void qcom_minidump_cleanup(struct rproc *rproc)
|
||||
}
|
||||
}
|
||||
|
||||
static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem)
|
||||
static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem,
|
||||
rproc_dumpfn_t dumpfn)
|
||||
{
|
||||
struct minidump_region __iomem *ptr;
|
||||
struct minidump_region region;
|
||||
@ -131,7 +163,7 @@ static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsy
|
||||
}
|
||||
da = le64_to_cpu(region.address);
|
||||
size = le32_to_cpu(region.size);
|
||||
rproc_coredump_add_custom_segment(rproc, da, size, NULL, name);
|
||||
rproc_coredump_add_custom_segment(rproc, da, size, dumpfn, name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,7 +171,7 @@ static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsy
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id)
|
||||
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id, rproc_dumpfn_t dumpfn)
|
||||
{
|
||||
int ret;
|
||||
struct minidump_subsystem *subsystem;
|
||||
@ -163,35 +195,83 @@ void qcom_minidump(struct rproc *rproc, unsigned int minidump_id)
|
||||
*/
|
||||
if (subsystem->regions_baseptr == 0 ||
|
||||
le32_to_cpu(subsystem->status) != 1 ||
|
||||
le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED ||
|
||||
le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
|
||||
le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED) {
|
||||
return rproc_coredump(rproc);
|
||||
}
|
||||
|
||||
if (le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
|
||||
dev_err(&rproc->dev, "Minidump not ready, skipping\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = qcom_add_minidump_segments(rproc, subsystem);
|
||||
rproc_coredump_cleanup(rproc);
|
||||
|
||||
ret = qcom_add_minidump_segments(rproc, subsystem, dumpfn);
|
||||
if (ret) {
|
||||
dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
|
||||
goto clean_minidump;
|
||||
}
|
||||
rproc_coredump_using_sections(rproc);
|
||||
|
||||
if (rproc->elf_class == ELFCLASS64)
|
||||
rproc_coredump_using_sections(rproc);
|
||||
else
|
||||
rproc_coredump(rproc);
|
||||
clean_minidump:
|
||||
qcom_minidump_cleanup(rproc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_minidump);
|
||||
|
||||
static int glink_subdev_start(struct rproc_subdev *subdev)
|
||||
static int glink_early_ssr_notifier_event(struct notifier_block *this,
|
||||
unsigned long code, void *data)
|
||||
{
|
||||
struct qcom_rproc_glink *glink = container_of(this, struct qcom_rproc_glink, nb);
|
||||
|
||||
qcom_glink_early_ssr_notify(glink->edge);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int glink_subdev_prepare(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
|
||||
|
||||
trace_rproc_qcom_event(dev_name(glink->dev->parent), GLINK_SUBDEV_NAME, "prepare");
|
||||
|
||||
glink->edge = qcom_glink_smem_register(glink->dev, glink->node);
|
||||
|
||||
return PTR_ERR_OR_ZERO(glink->edge);
|
||||
}
|
||||
|
||||
static int glink_subdev_start(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
|
||||
|
||||
trace_rproc_qcom_event(dev_name(glink->dev->parent), GLINK_SUBDEV_NAME, "start");
|
||||
|
||||
glink->nb.notifier_call = glink_early_ssr_notifier_event;
|
||||
|
||||
glink->notifier_handle = qcom_register_early_ssr_notifier(glink->ssr_name, &glink->nb);
|
||||
if (IS_ERR(glink->notifier_handle)) {
|
||||
dev_err(glink->dev, "Failed to register for SSR notifier\n");
|
||||
glink->notifier_handle = NULL;
|
||||
}
|
||||
|
||||
return qcom_glink_smem_start(glink->edge);
|
||||
}
|
||||
|
||||
static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed)
|
||||
{
|
||||
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
|
||||
int ret;
|
||||
|
||||
if (!glink->edge)
|
||||
return;
|
||||
trace_rproc_qcom_event(dev_name(glink->dev->parent), GLINK_SUBDEV_NAME,
|
||||
crashed ? "crash stop" : "stop");
|
||||
|
||||
ret = qcom_unregister_early_ssr_notifier(glink->notifier_handle, &glink->nb);
|
||||
if (ret)
|
||||
dev_err(glink->dev, "Error in unregistering notifier\n");
|
||||
glink->notifier_handle = NULL;
|
||||
|
||||
qcom_glink_smem_unregister(glink->edge);
|
||||
glink->edge = NULL;
|
||||
@ -201,6 +281,8 @@ static void glink_subdev_unprepare(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
|
||||
|
||||
trace_rproc_qcom_event(dev_name(glink->dev->parent), GLINK_SUBDEV_NAME, "unprepare");
|
||||
|
||||
qcom_glink_ssr_notify(glink->ssr_name);
|
||||
}
|
||||
|
||||
@ -225,6 +307,7 @@ void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
|
||||
|
||||
glink->dev = dev;
|
||||
glink->subdev.start = glink_subdev_start;
|
||||
glink->subdev.prepare = glink_subdev_prepare;
|
||||
glink->subdev.stop = glink_subdev_stop;
|
||||
glink->subdev.unprepare = glink_subdev_unprepare;
|
||||
|
||||
@ -295,6 +378,8 @@ static int smd_subdev_start(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
|
||||
|
||||
trace_rproc_qcom_event(dev_name(smd->dev->parent), SMD_SUBDEV_NAME, "start");
|
||||
|
||||
smd->edge = qcom_smd_register_edge(smd->dev, smd->node);
|
||||
|
||||
return PTR_ERR_OR_ZERO(smd->edge);
|
||||
@ -304,6 +389,11 @@ static void smd_subdev_stop(struct rproc_subdev *subdev, bool crashed)
|
||||
{
|
||||
struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
|
||||
|
||||
if (!smd->edge)
|
||||
return;
|
||||
trace_rproc_qcom_event(dev_name(smd->dev->parent), SMD_SUBDEV_NAME,
|
||||
crashed ? "crash stop" : "stop");
|
||||
|
||||
qcom_smd_unregister_edge(smd->edge);
|
||||
smd->edge = NULL;
|
||||
}
|
||||
@ -348,6 +438,9 @@ static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
|
||||
{
|
||||
struct qcom_ssr_subsystem *info;
|
||||
|
||||
if (!name)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&qcom_ssr_subsys_lock);
|
||||
/* Match in the global qcom_ssr_subsystem_list with name */
|
||||
list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
|
||||
@ -361,6 +454,7 @@ static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
|
||||
}
|
||||
info->name = kstrdup_const(name, GFP_KERNEL);
|
||||
srcu_init_notifier_head(&info->notifier_list);
|
||||
srcu_init_notifier_head(&info->early_notifier_list);
|
||||
|
||||
/* Add to global notification list */
|
||||
list_add_tail(&info->list, &qcom_ssr_subsystem_list);
|
||||
@ -370,6 +464,34 @@ static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
|
||||
return info;
|
||||
}
|
||||
|
||||
void *qcom_register_early_ssr_notifier(const char *name, struct notifier_block *nb)
|
||||
{
|
||||
struct qcom_ssr_subsystem *info;
|
||||
|
||||
info = qcom_ssr_get_subsys(name);
|
||||
if (IS_ERR(info))
|
||||
return info;
|
||||
|
||||
srcu_notifier_chain_register(&info->early_notifier_list, nb);
|
||||
|
||||
return &info->early_notifier_list;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_register_early_ssr_notifier);
|
||||
|
||||
int qcom_unregister_early_ssr_notifier(void *notify, struct notifier_block *nb)
|
||||
{
|
||||
return srcu_notifier_chain_unregister(notify, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_unregister_early_ssr_notifier);
|
||||
|
||||
void qcom_notify_early_ssr_clients(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
|
||||
|
||||
srcu_notifier_call_chain(&ssr->info->early_notifier_list, QCOM_SSR_BEFORE_SHUTDOWN, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_notify_early_ssr_clients);
|
||||
|
||||
/**
|
||||
* qcom_register_ssr_notifier() - register SSR notification handler
|
||||
* @name: Subsystem's SSR name
|
||||
@ -396,6 +518,21 @@ void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
|
||||
|
||||
static void ssr_notif_timeout_handler(struct timer_list *t)
|
||||
{
|
||||
struct qcom_rproc_ssr *ssr = from_timer(ssr, t, timer);
|
||||
|
||||
if (IS_ENABLED(CONFIG_QCOM_PANIC_ON_NOTIF_TIMEOUT) &&
|
||||
system_state != SYSTEM_RESTART &&
|
||||
system_state != SYSTEM_POWER_OFF &&
|
||||
system_state != SYSTEM_HALT &&
|
||||
!qcom_device_shutdown_in_progress)
|
||||
panic(ssr_timeout_msg, ssr->info->name, subdevice_state_string[ssr->notification]);
|
||||
else
|
||||
WARN(1, ssr_timeout_msg, ssr->info->name,
|
||||
subdevice_state_string[ssr->notification]);
|
||||
}
|
||||
|
||||
/**
|
||||
* qcom_unregister_ssr_notifier() - unregister SSR notification handler
|
||||
* @notify: subsystem cookie returned from qcom_register_ssr_notifier
|
||||
@ -412,6 +549,16 @@ int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
|
||||
|
||||
static inline void notify_ssr_clients(struct qcom_rproc_ssr *ssr, struct qcom_ssr_notify_data *data)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(SSR_NOTIF_TIMEOUT);
|
||||
mod_timer(&ssr->timer, timeout);
|
||||
srcu_notifier_call_chain(&ssr->info->notifier_list, ssr->notification, data);
|
||||
del_timer_sync(&ssr->timer);
|
||||
}
|
||||
|
||||
static int ssr_notify_prepare(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
|
||||
@ -420,8 +567,10 @@ static int ssr_notify_prepare(struct rproc_subdev *subdev)
|
||||
.crashed = false,
|
||||
};
|
||||
|
||||
srcu_notifier_call_chain(&ssr->info->notifier_list,
|
||||
QCOM_SSR_BEFORE_POWERUP, &data);
|
||||
trace_rproc_qcom_event(ssr->info->name, SSR_SUBDEV_NAME, "prepare");
|
||||
|
||||
ssr->notification = QCOM_SSR_BEFORE_POWERUP;
|
||||
notify_ssr_clients(ssr, &data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -433,8 +582,10 @@ static int ssr_notify_start(struct rproc_subdev *subdev)
|
||||
.crashed = false,
|
||||
};
|
||||
|
||||
srcu_notifier_call_chain(&ssr->info->notifier_list,
|
||||
QCOM_SSR_AFTER_POWERUP, &data);
|
||||
trace_rproc_qcom_event(ssr->info->name, SSR_SUBDEV_NAME, "start");
|
||||
|
||||
ssr->notification = QCOM_SSR_AFTER_POWERUP;
|
||||
notify_ssr_clients(ssr, &data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -446,8 +597,10 @@ static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
|
||||
.crashed = crashed,
|
||||
};
|
||||
|
||||
srcu_notifier_call_chain(&ssr->info->notifier_list,
|
||||
QCOM_SSR_BEFORE_SHUTDOWN, &data);
|
||||
trace_rproc_qcom_event(ssr->info->name, SSR_SUBDEV_NAME, crashed ? "crash stop" : "stop");
|
||||
|
||||
ssr->notification = QCOM_SSR_BEFORE_SHUTDOWN;
|
||||
notify_ssr_clients(ssr, &data);
|
||||
}
|
||||
|
||||
static void ssr_notify_unprepare(struct rproc_subdev *subdev)
|
||||
@ -458,8 +611,10 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev)
|
||||
.crashed = false,
|
||||
};
|
||||
|
||||
srcu_notifier_call_chain(&ssr->info->notifier_list,
|
||||
QCOM_SSR_AFTER_SHUTDOWN, &data);
|
||||
trace_rproc_qcom_event(ssr->info->name, SSR_SUBDEV_NAME, "unprepare");
|
||||
|
||||
ssr->notification = QCOM_SSR_AFTER_SHUTDOWN;
|
||||
notify_ssr_clients(ssr, &data);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -483,6 +638,8 @@ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
|
||||
return;
|
||||
}
|
||||
|
||||
timer_setup(&ssr->timer, ssr_notif_timeout_handler, 0);
|
||||
|
||||
ssr->info = info;
|
||||
ssr->subdev.prepare = ssr_notify_prepare;
|
||||
ssr->subdev.start = ssr_notify_start;
|
||||
@ -505,5 +662,78 @@ void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
|
||||
|
||||
static void qcom_check_ssr_status(void *data, struct rproc *rproc)
|
||||
{
|
||||
if (!atomic_read(&rproc->power) ||
|
||||
rproc->state == RPROC_RUNNING ||
|
||||
qcom_device_shutdown_in_progress ||
|
||||
system_state == SYSTEM_RESTART ||
|
||||
system_state == SYSTEM_POWER_OFF ||
|
||||
system_state == SYSTEM_HALT)
|
||||
return;
|
||||
|
||||
panic("Panicking, remoteproc %s failed to recover!\n", rproc->name);
|
||||
}
|
||||
|
||||
static void rproc_recovery_notifier(void *data, struct rproc *rproc)
|
||||
{
|
||||
const char *recovery = rproc->recovery_disabled ? "disabled" : "enabled";
|
||||
|
||||
trace_rproc_qcom_event(rproc->name, "recovery", recovery);
|
||||
pr_info("qcom rproc: %s: recovery %s\n", rproc->name, recovery);
|
||||
}
|
||||
|
||||
static int __init qcom_common_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
qcom_device_shutdown_in_progress = false;
|
||||
|
||||
sysfs_kobject = kobject_create_and_add("qcom_rproc", kernel_kobj);
|
||||
if (!sysfs_kobject) {
|
||||
pr_err("qcom rproc: failed to create sysfs kobject\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = sysfs_create_file(sysfs_kobject, &shutdown_requested_attr.attr);
|
||||
if (ret) {
|
||||
pr_err("qcom rproc: failed to create sysfs file\n");
|
||||
goto remove_kobject;
|
||||
}
|
||||
|
||||
ret = register_trace_android_vh_rproc_recovery(qcom_check_ssr_status, NULL);
|
||||
if (ret) {
|
||||
pr_err("qcom rproc: failed to register trace hooks\n");
|
||||
goto remove_sysfs;
|
||||
}
|
||||
|
||||
ret = register_trace_android_vh_rproc_recovery_set(rproc_recovery_notifier, NULL);
|
||||
if (ret) {
|
||||
pr_err("qcom rproc: failed to register recovery_set vendor hook\n");
|
||||
goto unregister_rproc_recovery_vh;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unregister_rproc_recovery_vh:
|
||||
unregister_trace_android_vh_rproc_recovery(qcom_check_ssr_status, NULL);
|
||||
remove_sysfs:
|
||||
sysfs_remove_file(sysfs_kobject, &shutdown_requested_attr.attr);
|
||||
remove_kobject:
|
||||
kobject_put(sysfs_kobject);
|
||||
return ret;
|
||||
|
||||
}
|
||||
module_init(qcom_common_init);
|
||||
|
||||
static void __exit qcom_common_exit(void)
|
||||
{
|
||||
unregister_trace_android_vh_rproc_recovery_set(rproc_recovery_notifier, NULL);
|
||||
sysfs_remove_file(sysfs_kobject, &shutdown_requested_attr.attr);
|
||||
kobject_put(sysfs_kobject);
|
||||
unregister_trace_android_vh_rproc_recovery(qcom_check_ssr_status, NULL);
|
||||
}
|
||||
module_exit(qcom_common_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -1,10 +1,28 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef __RPROC_QCOM_COMMON_H__
|
||||
#define __RPROC_QCOM_COMMON_H__
|
||||
|
||||
#include <linux/timer.h>
|
||||
#include <linux/remoteproc.h>
|
||||
#include "remoteproc_internal.h"
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
#include <linux/remoteproc/qcom_rproc.h>
|
||||
|
||||
static const char * const subdevice_state_string[] = {
|
||||
[QCOM_SSR_BEFORE_POWERUP] = "before_powerup",
|
||||
[QCOM_SSR_AFTER_POWERUP] = "after_powerup",
|
||||
[QCOM_SSR_BEFORE_SHUTDOWN] = "before_shutdown",
|
||||
[QCOM_SSR_AFTER_SHUTDOWN] = "after_shutdown",
|
||||
};
|
||||
|
||||
struct reg_info {
|
||||
struct regulator *reg;
|
||||
int uV;
|
||||
int uA;
|
||||
};
|
||||
|
||||
struct qcom_sysmon;
|
||||
|
||||
@ -16,6 +34,9 @@ struct qcom_rproc_glink {
|
||||
struct device *dev;
|
||||
struct device_node *node;
|
||||
struct qcom_glink *edge;
|
||||
|
||||
struct notifier_block nb;
|
||||
void *notifier_handle;
|
||||
};
|
||||
|
||||
struct qcom_rproc_subdev {
|
||||
@ -30,10 +51,17 @@ struct qcom_ssr_subsystem;
|
||||
|
||||
struct qcom_rproc_ssr {
|
||||
struct rproc_subdev subdev;
|
||||
enum qcom_ssr_notify_type notification;
|
||||
struct timer_list timer;
|
||||
struct qcom_ssr_subsystem *info;
|
||||
};
|
||||
|
||||
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id);
|
||||
extern bool qcom_device_shutdown_in_progress;
|
||||
|
||||
typedef void (*rproc_dumpfn_t)(struct rproc *rproc, struct rproc_dump_segment *segment,
|
||||
void *dest, size_t offset, size_t size);
|
||||
|
||||
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id, rproc_dumpfn_t dumpfn);
|
||||
|
||||
void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
|
||||
const char *ssr_name);
|
||||
@ -46,6 +74,7 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd);
|
||||
|
||||
void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
|
||||
const char *ssr_name);
|
||||
void qcom_notify_early_ssr_clients(struct rproc_subdev *subdev);
|
||||
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr);
|
||||
|
||||
#if IS_ENABLED(CONFIG_QCOM_SYSMON)
|
||||
@ -54,6 +83,8 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
|
||||
int ssctl_instance);
|
||||
void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon);
|
||||
bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon);
|
||||
uint32_t qcom_sysmon_get_txn_id(struct qcom_sysmon *sysmon);
|
||||
int qcom_sysmon_get_reason(struct qcom_sysmon *sysmon, char *buf, size_t len);
|
||||
#else
|
||||
static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
|
||||
const char *name,
|
||||
@ -70,6 +101,16 @@ static inline bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline uint32_t qcom_sysmon_get_txn_id(struct qcom_sysmon *sysmon)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qcom_sysmon_get_reason(struct qcom_sysmon *sysmon, char *buf, size_t len)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019-2020 Linaro Ltd.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@ -26,6 +27,53 @@ struct pil_reloc {
|
||||
|
||||
static struct pil_reloc _reloc __read_mostly;
|
||||
static DEFINE_MUTEX(pil_reloc_lock);
|
||||
static bool timeouts_disabled;
|
||||
|
||||
/**
|
||||
* qcom_pil_timeouts_disabled() - Check if pil timeouts are disabled in imem
|
||||
*
|
||||
* Return: true if the value 0x53444247 is set in the disable timeout pil
|
||||
* imem region, false otherwise.
|
||||
*/
|
||||
bool qcom_pil_timeouts_disabled(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct resource imem;
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
const char *prop = "qcom,msm-imem-pil-disable-timeout";
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, prop);
|
||||
if (!np) {
|
||||
pr_err("%s entry missing!\n", prop);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &imem);
|
||||
of_node_put(np);
|
||||
if (ret < 0) {
|
||||
pr_err("address to resource conversion failed for %s\n", prop);
|
||||
goto out;
|
||||
}
|
||||
|
||||
base = ioremap(imem.start, resource_size(&imem));
|
||||
if (!base) {
|
||||
pr_err("failed to map PIL disable timeouts region\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (__raw_readl(base) == 0x53444247) {
|
||||
pr_info("pil-imem set to disable pil timeouts\n");
|
||||
timeouts_disabled = true;
|
||||
} else
|
||||
timeouts_disabled = false;
|
||||
|
||||
iounmap(base);
|
||||
|
||||
out:
|
||||
return timeouts_disabled;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_pil_timeouts_disabled);
|
||||
|
||||
static int qcom_pil_info_init(void)
|
||||
{
|
||||
@ -73,6 +121,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
|
||||
{
|
||||
char buf[PIL_RELOC_NAME_LEN];
|
||||
void __iomem *entry;
|
||||
size_t entry_size;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@ -104,7 +153,8 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
|
||||
return -ENOMEM;
|
||||
|
||||
found_unused:
|
||||
memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
|
||||
entry_size = min(strlen(image), PIL_RELOC_ENTRY_SIZE - 1);
|
||||
memcpy_toio(entry, image, entry_size);
|
||||
found_existing:
|
||||
/* Use two writel() as base is only aligned to 4 bytes on odd entries */
|
||||
writel(base, entry + PIL_RELOC_NAME_LEN);
|
||||
|
@ -1,9 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef __QCOM_PIL_INFO_H__
|
||||
#define __QCOM_PIL_INFO_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size);
|
||||
bool qcom_pil_timeouts_disabled(void);
|
||||
|
||||
#endif
|
||||
|
@ -4,44 +4,22 @@
|
||||
*
|
||||
* Copyright (C) 2016-2018 Linaro Ltd.
|
||||
* Copyright (C) 2014 Sony Mobile Communications AB
|
||||
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2013, 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interconnect.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/soc/qcom/qcom_aoss.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
#include <linux/soc/qcom/smem_state.h>
|
||||
#include <linux/remoteproc.h>
|
||||
#include <linux/delay.h>
|
||||
#include "qcom_common.h"
|
||||
#include "qcom_q6v5.h"
|
||||
#include <trace/events/rproc_qcom.h>
|
||||
|
||||
#define Q6V5_LOAD_STATE_MSG_LEN 64
|
||||
#define Q6V5_PANIC_DELAY_MS 200
|
||||
|
||||
static int q6v5_load_state_toggle(struct qcom_q6v5 *q6v5, bool enable)
|
||||
{
|
||||
char buf[Q6V5_LOAD_STATE_MSG_LEN];
|
||||
int ret;
|
||||
|
||||
if (!q6v5->qmp)
|
||||
return 0;
|
||||
|
||||
ret = snprintf(buf, sizeof(buf),
|
||||
"{class: image, res: load_state, name: %s, val: %s}",
|
||||
q6v5->load_state, enable ? "on" : "off");
|
||||
|
||||
WARN_ON(ret >= Q6V5_LOAD_STATE_MSG_LEN);
|
||||
|
||||
ret = qmp_send(q6v5->qmp, buf, sizeof(buf));
|
||||
if (ret)
|
||||
dev_err(q6v5->dev, "failed to toggle load state\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
|
||||
* @q6v5: reference to qcom_q6v5 context to be reinitialized
|
||||
@ -50,20 +28,6 @@ static int q6v5_load_state_toggle(struct qcom_q6v5 *q6v5, bool enable)
|
||||
*/
|
||||
int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = icc_set_bw(q6v5->path, 0, UINT_MAX);
|
||||
if (ret < 0) {
|
||||
dev_err(q6v5->dev, "failed to set bandwidth request\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = q6v5_load_state_toggle(q6v5, true);
|
||||
if (ret) {
|
||||
icc_set_bw(q6v5->path, 0, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
reinit_completion(&q6v5->start_done);
|
||||
reinit_completion(&q6v5->stop_done);
|
||||
|
||||
@ -85,15 +49,50 @@ EXPORT_SYMBOL_GPL(qcom_q6v5_prepare);
|
||||
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
|
||||
{
|
||||
disable_irq(q6v5->handover_irq);
|
||||
q6v5_load_state_toggle(q6v5, false);
|
||||
|
||||
/* Disable interconnect vote, in case handover never happened */
|
||||
icc_set_bw(q6v5->path, 0, 0);
|
||||
|
||||
return !q6v5->handover_issued;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare);
|
||||
|
||||
void qcom_q6v5_register_ssr_subdev(struct qcom_q6v5 *q6v5, struct rproc_subdev *ssr_subdev)
|
||||
{
|
||||
q6v5->ssr_subdev = ssr_subdev;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_q6v5_register_ssr_subdev);
|
||||
|
||||
static void qcom_q6v5_crash_handler_work(struct work_struct *work)
|
||||
{
|
||||
struct qcom_q6v5 *q6v5 = container_of(work, struct qcom_q6v5, crash_handler);
|
||||
struct rproc *rproc = q6v5->rproc;
|
||||
struct rproc_subdev *subdev;
|
||||
int votes;
|
||||
|
||||
mutex_lock(&rproc->lock);
|
||||
|
||||
rproc->state = RPROC_CRASHED;
|
||||
|
||||
votes = atomic_xchg(&rproc->power, 0);
|
||||
/* if votes are zero, rproc has already been shutdown */
|
||||
if (votes == 0) {
|
||||
mutex_unlock(&rproc->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
|
||||
if (subdev->stop)
|
||||
subdev->stop(subdev, true);
|
||||
}
|
||||
|
||||
mutex_unlock(&rproc->lock);
|
||||
|
||||
/*
|
||||
* Temporary workaround until ramdump userspace application calls
|
||||
* sync() and fclose() on attempting the dump.
|
||||
*/
|
||||
msleep(100);
|
||||
panic("Panicking, remoteproc %s crashed\n", q6v5->rproc->name);
|
||||
}
|
||||
|
||||
static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
|
||||
{
|
||||
struct qcom_q6v5 *q6v5 = data;
|
||||
@ -102,6 +101,7 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
|
||||
|
||||
/* Sometimes the stop triggers a watchdog rather than a stop-ack */
|
||||
if (!q6v5->running) {
|
||||
dev_info(q6v5->dev, "received wdog irq while q6 is offline\n");
|
||||
complete(&q6v5->stop_done);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -112,7 +112,21 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
|
||||
else
|
||||
dev_err(q6v5->dev, "watchdog without message\n");
|
||||
|
||||
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
|
||||
q6v5->running = false;
|
||||
trace_rproc_qcom_event(dev_name(q6v5->dev), "q6v5_wdog", msg);
|
||||
dev_err(q6v5->dev, "rproc recovery state: %s\n",
|
||||
q6v5->rproc->recovery_disabled ?
|
||||
"disabled and lead to device crash" :
|
||||
"enabled and kick reovery process");
|
||||
|
||||
if (q6v5->rproc->recovery_disabled) {
|
||||
schedule_work(&q6v5->crash_handler);
|
||||
} else {
|
||||
if (q6v5->ssr_subdev)
|
||||
qcom_notify_early_ssr_clients(q6v5->ssr_subdev);
|
||||
|
||||
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -123,6 +137,11 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
|
||||
size_t len;
|
||||
char *msg;
|
||||
|
||||
if (!q6v5->running) {
|
||||
dev_info(q6v5->dev, "received fatal irq while q6 is offline\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
|
||||
if (!IS_ERR(msg) && len > 0 && msg[0])
|
||||
dev_err(q6v5->dev, "fatal error received: %s\n", msg);
|
||||
@ -130,7 +149,18 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
|
||||
dev_err(q6v5->dev, "fatal error without message\n");
|
||||
|
||||
q6v5->running = false;
|
||||
rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
|
||||
trace_rproc_qcom_event(dev_name(q6v5->dev), "q6v5_fatal", msg);
|
||||
dev_err(q6v5->dev, "rproc recovery state: %s\n",
|
||||
q6v5->rproc->recovery_disabled ? "disabled and lead to device crash" :
|
||||
"enabled and kick reovery process");
|
||||
if (q6v5->rproc->recovery_disabled) {
|
||||
schedule_work(&q6v5->crash_handler);
|
||||
} else {
|
||||
if (q6v5->ssr_subdev)
|
||||
qcom_notify_early_ssr_clients(q6v5->ssr_subdev);
|
||||
|
||||
rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -172,8 +202,6 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
|
||||
if (q6v5->handover)
|
||||
q6v5->handover(q6v5);
|
||||
|
||||
icc_set_bw(q6v5->path, 0, 0);
|
||||
|
||||
q6v5->handover_issued = true;
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -201,8 +229,10 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon)
|
||||
|
||||
q6v5->running = false;
|
||||
|
||||
/* Don't perform SMP2P dance if sysmon already shut down the remote */
|
||||
if (qcom_sysmon_shutdown_acked(sysmon))
|
||||
/* Don't perform SMP2P dance if sysmon already shut
|
||||
* down the remote or if it isn't running
|
||||
*/
|
||||
if (q6v5->rproc->state != RPROC_RUNNING || qcom_sysmon_shutdown_acked(sysmon))
|
||||
return 0;
|
||||
|
||||
qcom_smem_state_update_bits(q6v5->state,
|
||||
@ -240,32 +270,42 @@ EXPORT_SYMBOL_GPL(qcom_q6v5_panic);
|
||||
* @pdev: platform_device reference for acquiring resources
|
||||
* @rproc: associated remoteproc instance
|
||||
* @crash_reason: SMEM id for crash reason string, or 0 if none
|
||||
* @load_state: load state resource string
|
||||
* @handover: function to be called when proxy resources should be released
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure
|
||||
*/
|
||||
int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
||||
struct rproc *rproc, int crash_reason, const char *load_state,
|
||||
struct rproc *rproc, int crash_reason,
|
||||
void (*handover)(struct qcom_q6v5 *q6v5))
|
||||
{
|
||||
int ret;
|
||||
struct resource *res;
|
||||
|
||||
q6v5->rproc = rproc;
|
||||
q6v5->dev = &pdev->dev;
|
||||
q6v5->crash_reason = crash_reason;
|
||||
q6v5->handover = handover;
|
||||
q6v5->ssr_subdev = NULL;
|
||||
|
||||
init_completion(&q6v5->start_done);
|
||||
init_completion(&q6v5->stop_done);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (res) {
|
||||
q6v5->rmb_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(q6v5->rmb_base))
|
||||
q6v5->rmb_base = NULL;
|
||||
} else
|
||||
q6v5->rmb_base = NULL;
|
||||
|
||||
|
||||
q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
|
||||
if (q6v5->wdog_irq < 0)
|
||||
return q6v5->wdog_irq;
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
|
||||
NULL, q6v5_wdog_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
IRQF_ONESHOT,
|
||||
"q6v5 wdog", q6v5);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to acquire wdog IRQ\n");
|
||||
@ -325,45 +365,17 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
q6v5->state = devm_qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
|
||||
q6v5->state = qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
|
||||
if (IS_ERR(q6v5->state)) {
|
||||
dev_err(&pdev->dev, "failed to acquire stop state\n");
|
||||
return PTR_ERR(q6v5->state);
|
||||
}
|
||||
|
||||
q6v5->load_state = devm_kstrdup_const(&pdev->dev, load_state, GFP_KERNEL);
|
||||
q6v5->qmp = qmp_get(&pdev->dev);
|
||||
if (IS_ERR(q6v5->qmp)) {
|
||||
if (PTR_ERR(q6v5->qmp) != -ENODEV)
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->qmp),
|
||||
"failed to acquire load state\n");
|
||||
q6v5->qmp = NULL;
|
||||
} else if (!q6v5->load_state) {
|
||||
if (!load_state)
|
||||
dev_err(&pdev->dev, "load state resource string empty\n");
|
||||
|
||||
qmp_put(q6v5->qmp);
|
||||
return load_state ? -ENOMEM : -EINVAL;
|
||||
}
|
||||
|
||||
q6v5->path = devm_of_icc_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(q6v5->path))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->path),
|
||||
"failed to acquire interconnect path\n");
|
||||
INIT_WORK(&q6v5->crash_handler, qcom_q6v5_crash_handler_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_q6v5_init);
|
||||
|
||||
/**
|
||||
* qcom_q6v5_deinit() - deinitialize the q6v5 common struct
|
||||
* @q6v5: reference to qcom_q6v5 context to be deinitialized
|
||||
*/
|
||||
void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5)
|
||||
{
|
||||
qmp_put(q6v5->qmp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_q6v5_deinit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Q6V5");
|
||||
|
@ -1,13 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __QCOM_Q6V5_H__
|
||||
#define __QCOM_Q6V5_H__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/soc/qcom/qcom_aoss.h>
|
||||
|
||||
struct icc_path;
|
||||
#define RMB_BOOT_WAIT_REG 0x8
|
||||
#define RMB_BOOT_CONT_REG 0xC
|
||||
#define RMB_Q6_BOOT_STATUS_REG 0x10
|
||||
|
||||
#define RMB_POLL_MAX_TIMES 250
|
||||
|
||||
struct rproc;
|
||||
struct qcom_smem_state;
|
||||
struct qcom_sysmon;
|
||||
@ -16,11 +23,9 @@ struct qcom_q6v5 {
|
||||
struct device *dev;
|
||||
struct rproc *rproc;
|
||||
|
||||
void __iomem *rmb_base;
|
||||
|
||||
struct qcom_smem_state *state;
|
||||
struct qmp *qmp;
|
||||
|
||||
struct icc_path *path;
|
||||
|
||||
unsigned stop_bit;
|
||||
|
||||
int wdog_irq;
|
||||
@ -29,6 +34,10 @@ struct qcom_q6v5 {
|
||||
int handover_irq;
|
||||
int stop_irq;
|
||||
|
||||
struct rproc_subdev *ssr_subdev;
|
||||
|
||||
struct work_struct crash_handler;
|
||||
|
||||
bool handover_issued;
|
||||
|
||||
struct completion start_done;
|
||||
@ -38,15 +47,13 @@ struct qcom_q6v5 {
|
||||
|
||||
bool running;
|
||||
|
||||
const char *load_state;
|
||||
void (*handover)(struct qcom_q6v5 *q6v5);
|
||||
};
|
||||
|
||||
int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
||||
struct rproc *rproc, int crash_reason, const char *load_state,
|
||||
struct rproc *rproc, int crash_reason,
|
||||
void (*handover)(struct qcom_q6v5 *q6v5));
|
||||
void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5);
|
||||
|
||||
void qcom_q6v5_register_ssr_subdev(struct qcom_q6v5 *q6v5, struct rproc_subdev *ssr_subdev);
|
||||
int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
|
||||
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
|
||||
int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon);
|
||||
|
@ -32,7 +32,6 @@
|
||||
|
||||
/* time out value */
|
||||
#define ACK_TIMEOUT 1000
|
||||
#define ACK_TIMEOUT_US 1000000
|
||||
#define BOOT_FSM_TIMEOUT 10000
|
||||
/* mask values */
|
||||
#define EVB_MASK GENMASK(27, 4)
|
||||
@ -52,8 +51,6 @@
|
||||
#define QDSP6SS_CORE_CBCR 0x20
|
||||
#define QDSP6SS_SLEEP_CBCR 0x3c
|
||||
|
||||
#define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3
|
||||
|
||||
struct adsp_pil_data {
|
||||
int crash_reason_smem;
|
||||
const char *firmware_name;
|
||||
@ -61,13 +58,9 @@ struct adsp_pil_data {
|
||||
const char *ssr_name;
|
||||
const char *sysmon_name;
|
||||
int ssctl_id;
|
||||
bool is_wpss;
|
||||
bool auto_boot;
|
||||
|
||||
const char **clk_ids;
|
||||
int num_clks;
|
||||
const char **proxy_pd_names;
|
||||
const char *load_state;
|
||||
};
|
||||
|
||||
struct qcom_adsp {
|
||||
@ -100,151 +93,11 @@ struct qcom_adsp {
|
||||
void *mem_region;
|
||||
size_t mem_size;
|
||||
|
||||
struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
|
||||
size_t proxy_pd_count;
|
||||
|
||||
struct qcom_rproc_glink glink_subdev;
|
||||
struct qcom_rproc_ssr ssr_subdev;
|
||||
struct qcom_sysmon *sysmon;
|
||||
|
||||
int (*shutdown)(struct qcom_adsp *adsp);
|
||||
};
|
||||
|
||||
static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
|
||||
const char **pd_names)
|
||||
{
|
||||
struct device **devs = adsp->proxy_pds;
|
||||
size_t num_pds = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (!pd_names)
|
||||
return 0;
|
||||
|
||||
/* Handle single power domain */
|
||||
if (dev->pm_domain) {
|
||||
devs[0] = dev;
|
||||
pm_runtime_enable(dev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (pd_names[num_pds])
|
||||
num_pds++;
|
||||
|
||||
if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
|
||||
return -E2BIG;
|
||||
|
||||
for (i = 0; i < num_pds; i++) {
|
||||
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
|
||||
if (IS_ERR_OR_NULL(devs[i])) {
|
||||
ret = PTR_ERR(devs[i]) ? : -ENODATA;
|
||||
goto unroll_attach;
|
||||
}
|
||||
}
|
||||
|
||||
return num_pds;
|
||||
|
||||
unroll_attach:
|
||||
for (i--; i >= 0; i--)
|
||||
dev_pm_domain_detach(devs[i], false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
|
||||
size_t pd_count)
|
||||
{
|
||||
struct device *dev = adsp->dev;
|
||||
int i;
|
||||
|
||||
/* Handle single power domain */
|
||||
if (dev->pm_domain && pd_count) {
|
||||
pm_runtime_disable(dev);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < pd_count; i++)
|
||||
dev_pm_domain_detach(pds[i], false);
|
||||
}
|
||||
|
||||
static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
|
||||
size_t pd_count)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pd_count; i++) {
|
||||
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
|
||||
ret = pm_runtime_get_sync(pds[i]);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(pds[i]);
|
||||
dev_pm_genpd_set_performance_state(pds[i], 0);
|
||||
goto unroll_pd_votes;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unroll_pd_votes:
|
||||
for (i--; i >= 0; i--) {
|
||||
dev_pm_genpd_set_performance_state(pds[i], 0);
|
||||
pm_runtime_put(pds[i]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
|
||||
size_t pd_count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pd_count; i++) {
|
||||
dev_pm_genpd_set_performance_state(pds[i], 0);
|
||||
pm_runtime_put(pds[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
|
||||
|
||||
/* Wait for halt ACK from QDSP6 */
|
||||
regmap_read_poll_timeout(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_HALTACK_REG, val,
|
||||
val, 1000, ACK_TIMEOUT_US);
|
||||
|
||||
/* Assert the WPSS PDC Reset */
|
||||
reset_control_assert(adsp->pdc_sync_reset);
|
||||
|
||||
/* Place the WPSS processor into reset */
|
||||
reset_control_assert(adsp->restart);
|
||||
|
||||
/* wait after asserting subsystem restart from AOSS */
|
||||
usleep_range(200, 205);
|
||||
|
||||
/* Remove the WPSS reset */
|
||||
reset_control_deassert(adsp->restart);
|
||||
|
||||
/* De-assert the WPSS PDC Reset */
|
||||
reset_control_deassert(adsp->pdc_sync_reset);
|
||||
|
||||
usleep_range(100, 105);
|
||||
|
||||
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
|
||||
|
||||
regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
|
||||
|
||||
/* Wait for halt ACK from QDSP6 */
|
||||
regmap_read_poll_timeout(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_HALTACK_REG, val,
|
||||
!val, 1000, ACK_TIMEOUT_US);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
|
||||
{
|
||||
unsigned long timeout;
|
||||
@ -332,18 +185,18 @@ static int adsp_start(struct rproc *rproc)
|
||||
int ret;
|
||||
unsigned int val;
|
||||
|
||||
ret = qcom_q6v5_prepare(&adsp->q6v5);
|
||||
if (ret)
|
||||
return ret;
|
||||
qcom_q6v5_prepare(&adsp->q6v5);
|
||||
|
||||
ret = clk_prepare_enable(adsp->xo);
|
||||
if (ret)
|
||||
goto disable_irqs;
|
||||
|
||||
ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
|
||||
adsp->proxy_pd_count);
|
||||
if (ret < 0)
|
||||
dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
|
||||
ret = pm_runtime_get_sync(adsp->dev);
|
||||
if (ret) {
|
||||
pm_runtime_put_noidle(adsp->dev);
|
||||
goto disable_xo_clk;
|
||||
}
|
||||
|
||||
ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
|
||||
if (ret) {
|
||||
@ -388,7 +241,8 @@ static int adsp_start(struct rproc *rproc)
|
||||
disable_adsp_clks:
|
||||
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
|
||||
disable_power_domain:
|
||||
qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
|
||||
dev_pm_genpd_set_performance_state(adsp->dev, 0);
|
||||
pm_runtime_put(adsp->dev);
|
||||
disable_xo_clk:
|
||||
clk_disable_unprepare(adsp->xo);
|
||||
disable_irqs:
|
||||
@ -402,7 +256,8 @@ static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
|
||||
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
|
||||
|
||||
clk_disable_unprepare(adsp->xo);
|
||||
qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
|
||||
dev_pm_genpd_set_performance_state(adsp->dev, 0);
|
||||
pm_runtime_put(adsp->dev);
|
||||
}
|
||||
|
||||
static int adsp_stop(struct rproc *rproc)
|
||||
@ -415,7 +270,7 @@ static int adsp_stop(struct rproc *rproc)
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_err(adsp->dev, "timed out on wait\n");
|
||||
|
||||
ret = adsp->shutdown(adsp);
|
||||
ret = qcom_adsp_shutdown(adsp);
|
||||
if (ret)
|
||||
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
|
||||
|
||||
@ -571,7 +426,6 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
|
||||
static int adsp_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct adsp_pil_data *desc;
|
||||
const char *firmware_name;
|
||||
struct qcom_adsp *adsp;
|
||||
struct rproc *rproc;
|
||||
int ret;
|
||||
@ -580,22 +434,12 @@ static int adsp_probe(struct platform_device *pdev)
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
firmware_name = desc->firmware_name;
|
||||
ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
|
||||
&firmware_name);
|
||||
if (ret < 0 && ret != -EINVAL) {
|
||||
dev_err(&pdev->dev, "unable to read firmware-name\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
|
||||
firmware_name, sizeof(*adsp));
|
||||
desc->firmware_name, sizeof(*adsp));
|
||||
if (!rproc) {
|
||||
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc->auto_boot = desc->auto_boot;
|
||||
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
|
||||
|
||||
adsp = (struct qcom_adsp *)rproc->priv;
|
||||
@ -604,11 +448,6 @@ static int adsp_probe(struct platform_device *pdev)
|
||||
adsp->info_name = desc->sysmon_name;
|
||||
platform_set_drvdata(pdev, adsp);
|
||||
|
||||
if (desc->is_wpss)
|
||||
adsp->shutdown = qcom_wpss_shutdown;
|
||||
else
|
||||
adsp->shutdown = qcom_adsp_shutdown;
|
||||
|
||||
ret = adsp_alloc_memory_region(adsp);
|
||||
if (ret)
|
||||
goto free_rproc;
|
||||
@ -617,13 +456,7 @@ static int adsp_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto free_rproc;
|
||||
|
||||
ret = qcom_rproc_pds_attach(adsp->dev, adsp,
|
||||
desc->proxy_pd_names);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
|
||||
goto free_rproc;
|
||||
}
|
||||
adsp->proxy_pd_count = ret;
|
||||
pm_runtime_enable(adsp->dev);
|
||||
|
||||
ret = adsp_init_reset(adsp);
|
||||
if (ret)
|
||||
@ -634,7 +467,7 @@ static int adsp_probe(struct platform_device *pdev)
|
||||
goto disable_pm;
|
||||
|
||||
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
|
||||
desc->load_state, qcom_adsp_pil_handover);
|
||||
qcom_adsp_pil_handover);
|
||||
if (ret)
|
||||
goto disable_pm;
|
||||
|
||||
@ -655,8 +488,7 @@ static int adsp_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
disable_pm:
|
||||
qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
|
||||
|
||||
pm_runtime_disable(adsp->dev);
|
||||
free_rproc:
|
||||
rproc_free(rproc);
|
||||
|
||||
@ -669,11 +501,10 @@ static int adsp_remove(struct platform_device *pdev)
|
||||
|
||||
rproc_del(adsp->rproc);
|
||||
|
||||
qcom_q6v5_deinit(&adsp->q6v5);
|
||||
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
|
||||
qcom_remove_sysmon_subdev(adsp->sysmon);
|
||||
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
|
||||
qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
|
||||
pm_runtime_disable(adsp->dev);
|
||||
rproc_free(adsp->rproc);
|
||||
|
||||
return 0;
|
||||
@ -685,16 +516,11 @@ static const struct adsp_pil_data adsp_resource_init = {
|
||||
.ssr_name = "lpass",
|
||||
.sysmon_name = "adsp",
|
||||
.ssctl_id = 0x14,
|
||||
.is_wpss = false,
|
||||
.auto_boot = true,
|
||||
.clk_ids = (const char*[]) {
|
||||
"sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
|
||||
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
|
||||
},
|
||||
.num_clks = 7,
|
||||
.proxy_pd_names = (const char*[]) {
|
||||
"cx", NULL
|
||||
},
|
||||
};
|
||||
|
||||
static const struct adsp_pil_data cdsp_resource_init = {
|
||||
@ -703,39 +529,15 @@ static const struct adsp_pil_data cdsp_resource_init = {
|
||||
.ssr_name = "cdsp",
|
||||
.sysmon_name = "cdsp",
|
||||
.ssctl_id = 0x17,
|
||||
.is_wpss = false,
|
||||
.auto_boot = true,
|
||||
.clk_ids = (const char*[]) {
|
||||
"sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master",
|
||||
"q6_axim", NULL
|
||||
},
|
||||
.num_clks = 7,
|
||||
.proxy_pd_names = (const char*[]) {
|
||||
"cx", NULL
|
||||
},
|
||||
};
|
||||
|
||||
static const struct adsp_pil_data wpss_resource_init = {
|
||||
.crash_reason_smem = 626,
|
||||
.firmware_name = "wpss.mdt",
|
||||
.ssr_name = "wpss",
|
||||
.sysmon_name = "wpss",
|
||||
.ssctl_id = 0x19,
|
||||
.is_wpss = true,
|
||||
.auto_boot = false,
|
||||
.load_state = "wpss",
|
||||
.clk_ids = (const char*[]) {
|
||||
"ahb_bdg", "ahb", "rscp", NULL
|
||||
},
|
||||
.num_clks = 3,
|
||||
.proxy_pd_names = (const char*[]) {
|
||||
"cx", "mx", NULL
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id adsp_of_match[] = {
|
||||
{ .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init },
|
||||
{ .compatible = "qcom,sc7280-wpss-pil", .data = &wpss_resource_init },
|
||||
{ .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
|
||||
{ },
|
||||
};
|
||||
|
@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2016 Linaro Ltd.
|
||||
* Copyright (C) 2014 Sony Mobile Communications AB
|
||||
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2013, 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@ -77,14 +77,6 @@
|
||||
|
||||
#define HALT_ACK_TIMEOUT_US 100000
|
||||
|
||||
/* QACCEPT Register Offsets */
|
||||
#define QACCEPT_ACCEPT_REG 0x0
|
||||
#define QACCEPT_ACTIVE_REG 0x4
|
||||
#define QACCEPT_DENY_REG 0x8
|
||||
#define QACCEPT_REQ_REG 0xC
|
||||
|
||||
#define QACCEPT_TIMEOUT_US 50
|
||||
|
||||
/* QDSP6SS_RESET */
|
||||
#define Q6SS_STOP_CORE BIT(0)
|
||||
#define Q6SS_CORE_ARES BIT(1)
|
||||
@ -125,12 +117,6 @@
|
||||
#define QDSP6SS_BOOT_CMD 0x404
|
||||
#define BOOT_FSM_TIMEOUT 10000
|
||||
|
||||
struct reg_info {
|
||||
struct regulator *reg;
|
||||
int uV;
|
||||
int uA;
|
||||
};
|
||||
|
||||
struct qcom_mss_reg_res {
|
||||
const char *supply;
|
||||
int uV;
|
||||
@ -140,20 +126,17 @@ struct qcom_mss_reg_res {
|
||||
struct rproc_hexagon_res {
|
||||
const char *hexagon_mba_image;
|
||||
struct qcom_mss_reg_res *proxy_supply;
|
||||
struct qcom_mss_reg_res *fallback_proxy_supply;
|
||||
struct qcom_mss_reg_res *active_supply;
|
||||
char **proxy_clk_names;
|
||||
char **reset_clk_names;
|
||||
char **active_clk_names;
|
||||
char **active_pd_names;
|
||||
char **proxy_pd_names;
|
||||
int version;
|
||||
bool need_mem_protection;
|
||||
bool has_alt_reset;
|
||||
bool has_mba_logs;
|
||||
bool has_spare_reg;
|
||||
bool has_qaccept_regs;
|
||||
bool has_ext_cntl_regs;
|
||||
bool has_vq6;
|
||||
};
|
||||
|
||||
struct q6v5 {
|
||||
@ -169,18 +152,8 @@ struct q6v5 {
|
||||
u32 halt_q6;
|
||||
u32 halt_modem;
|
||||
u32 halt_nc;
|
||||
u32 halt_vq6;
|
||||
u32 conn_box;
|
||||
|
||||
u32 qaccept_mdm;
|
||||
u32 qaccept_cx;
|
||||
u32 qaccept_axi;
|
||||
|
||||
u32 axim1_clk_off;
|
||||
u32 crypto_clk_off;
|
||||
u32 force_clk_on;
|
||||
u32 rscc_disable;
|
||||
|
||||
struct reset_control *mss_restart;
|
||||
struct reset_control *pdc_reset;
|
||||
|
||||
@ -189,24 +162,25 @@ struct q6v5 {
|
||||
struct clk *active_clks[8];
|
||||
struct clk *reset_clks[4];
|
||||
struct clk *proxy_clks[4];
|
||||
struct device *active_pds[1];
|
||||
struct device *proxy_pds[3];
|
||||
int active_clk_count;
|
||||
int reset_clk_count;
|
||||
int proxy_clk_count;
|
||||
int active_pd_count;
|
||||
int proxy_pd_count;
|
||||
|
||||
struct reg_info active_regs[1];
|
||||
struct reg_info proxy_regs[1];
|
||||
struct reg_info fallback_proxy_regs[2];
|
||||
struct reg_info proxy_regs[3];
|
||||
int active_reg_count;
|
||||
int proxy_reg_count;
|
||||
int fallback_proxy_reg_count;
|
||||
|
||||
bool dump_mba_loaded;
|
||||
size_t current_dump_size;
|
||||
size_t total_dump_size;
|
||||
|
||||
phys_addr_t mba_phys;
|
||||
void *mba_region;
|
||||
size_t mba_size;
|
||||
size_t dp_size;
|
||||
|
||||
@ -218,14 +192,10 @@ struct q6v5 {
|
||||
struct qcom_rproc_subdev smd_subdev;
|
||||
struct qcom_rproc_ssr ssr_subdev;
|
||||
struct qcom_sysmon *sysmon;
|
||||
struct platform_device *bam_dmux;
|
||||
bool need_mem_protection;
|
||||
bool has_alt_reset;
|
||||
bool has_mba_logs;
|
||||
bool has_spare_reg;
|
||||
bool has_qaccept_regs;
|
||||
bool has_ext_cntl_regs;
|
||||
bool has_vq6;
|
||||
int mpss_perm;
|
||||
int mba_perm;
|
||||
const char *hexagon_mdt_image;
|
||||
@ -238,7 +208,6 @@ enum {
|
||||
MSS_MSM8996,
|
||||
MSS_MSM8998,
|
||||
MSS_SC7180,
|
||||
MSS_SC7280,
|
||||
MSS_SDM845,
|
||||
};
|
||||
|
||||
@ -433,7 +402,7 @@ static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
|
||||
current_perm, next, perms);
|
||||
}
|
||||
|
||||
static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
|
||||
static void q6v5_debug_policy_load(struct q6v5 *qproc)
|
||||
{
|
||||
const struct firmware *dp_fw;
|
||||
|
||||
@ -441,7 +410,7 @@ static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
|
||||
return;
|
||||
|
||||
if (SZ_1M + dp_fw->size <= qproc->mba_size) {
|
||||
memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
|
||||
memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size);
|
||||
qproc->dp_size = dp_fw->size;
|
||||
}
|
||||
|
||||
@ -451,7 +420,6 @@ static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
|
||||
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
|
||||
{
|
||||
struct q6v5 *qproc = rproc->priv;
|
||||
void *mba_region;
|
||||
|
||||
/* MBA is restricted to a maximum size of 1M */
|
||||
if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
|
||||
@ -459,16 +427,8 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
|
||||
if (!mba_region) {
|
||||
dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
|
||||
&qproc->mba_phys, qproc->mba_size);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
memcpy(mba_region, fw->data, fw->size);
|
||||
q6v5_debug_policy_load(qproc, mba_region);
|
||||
memunmap(mba_region);
|
||||
memcpy(qproc->mba_region, fw->data, fw->size);
|
||||
q6v5_debug_policy_load(qproc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -499,12 +459,6 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
|
||||
regmap_update_bits(qproc->conn_map, qproc->conn_box,
|
||||
AXI_GATING_VALID_OVERRIDE, 0);
|
||||
ret = reset_control_deassert(qproc->mss_restart);
|
||||
} else if (qproc->has_ext_cntl_regs) {
|
||||
regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
|
||||
reset_control_assert(qproc->pdc_reset);
|
||||
reset_control_assert(qproc->mss_restart);
|
||||
reset_control_deassert(qproc->pdc_reset);
|
||||
ret = reset_control_deassert(qproc->mss_restart);
|
||||
} else {
|
||||
ret = reset_control_assert(qproc->mss_restart);
|
||||
}
|
||||
@ -522,7 +476,7 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
|
||||
ret = reset_control_reset(qproc->mss_restart);
|
||||
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
|
||||
reset_control_deassert(qproc->pdc_reset);
|
||||
} else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
|
||||
} else if (qproc->has_spare_reg) {
|
||||
ret = reset_control_reset(qproc->mss_restart);
|
||||
} else {
|
||||
ret = reset_control_deassert(qproc->mss_restart);
|
||||
@ -581,7 +535,6 @@ static void q6v5_dump_mba_logs(struct q6v5 *qproc)
|
||||
{
|
||||
struct rproc *rproc = qproc->rproc;
|
||||
void *data;
|
||||
void *mba_region;
|
||||
|
||||
if (!qproc->has_mba_logs)
|
||||
return;
|
||||
@ -590,16 +543,12 @@ static void q6v5_dump_mba_logs(struct q6v5 *qproc)
|
||||
qproc->mba_size))
|
||||
return;
|
||||
|
||||
mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
|
||||
if (!mba_region)
|
||||
data = vmalloc(MBA_LOG_SIZE);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
data = vmalloc(MBA_LOG_SIZE);
|
||||
if (data) {
|
||||
memcpy(data, mba_region, MBA_LOG_SIZE);
|
||||
dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
|
||||
}
|
||||
memunmap(mba_region);
|
||||
memcpy(data, qproc->mba_region, MBA_LOG_SIZE);
|
||||
dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int q6v5proc_reset(struct q6v5 *qproc)
|
||||
@ -636,7 +585,7 @@ static int q6v5proc_reset(struct q6v5 *qproc)
|
||||
}
|
||||
|
||||
goto pbl_wait;
|
||||
} else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
|
||||
} else if (qproc->version == MSS_SC7180) {
|
||||
val = readl(qproc->reg_base + QDSP6SS_SLEEP);
|
||||
val |= Q6SS_CBCR_CLKEN;
|
||||
writel(val, qproc->reg_base + QDSP6SS_SLEEP);
|
||||
@ -819,89 +768,6 @@ static int q6v5proc_reset(struct q6v5 *qproc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
|
||||
{
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
if (!qproc->has_qaccept_regs)
|
||||
return 0;
|
||||
|
||||
if (qproc->has_ext_cntl_regs) {
|
||||
regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
|
||||
regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
|
||||
|
||||
ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
|
||||
!val, 1, Q6SS_CBCR_TIMEOUT_US);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to enable axim1 clock\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
regmap_write(map, offset + QACCEPT_REQ_REG, 1);
|
||||
|
||||
/* Wait for accept */
|
||||
ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
|
||||
QACCEPT_TIMEOUT_US);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "qchannel enable failed\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
|
||||
{
|
||||
int ret;
|
||||
unsigned int val, retry;
|
||||
unsigned int nretry = 10;
|
||||
bool takedown_complete = false;
|
||||
|
||||
if (!qproc->has_qaccept_regs)
|
||||
return;
|
||||
|
||||
while (!takedown_complete && nretry) {
|
||||
nretry--;
|
||||
|
||||
/* Wait for active transactions to complete */
|
||||
regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
|
||||
QACCEPT_TIMEOUT_US);
|
||||
|
||||
/* Request Q-channel transaction takedown */
|
||||
regmap_write(map, offset + QACCEPT_REQ_REG, 0);
|
||||
|
||||
/*
|
||||
* If the request is denied, reset the Q-channel takedown request,
|
||||
* wait for active transactions to complete and retry takedown.
|
||||
*/
|
||||
retry = 10;
|
||||
while (retry) {
|
||||
usleep_range(5, 10);
|
||||
retry--;
|
||||
ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
|
||||
if (!ret && val) {
|
||||
regmap_write(map, offset + QACCEPT_REQ_REG, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
|
||||
if (!ret && !val) {
|
||||
takedown_complete = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!retry)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Rely on mss_restart to clear out pending transactions on takedown failure */
|
||||
if (!takedown_complete)
|
||||
dev_err(qproc->dev, "qchannel takedown failed\n");
|
||||
}
|
||||
|
||||
static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
|
||||
struct regmap *halt_map,
|
||||
u32 offset)
|
||||
@ -929,8 +795,7 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
|
||||
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
|
||||
}
|
||||
|
||||
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
|
||||
const char *fw_name)
|
||||
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
|
||||
{
|
||||
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
|
||||
dma_addr_t phys;
|
||||
@ -941,7 +806,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
|
||||
metadata = qcom_mdt_read_metadata(qproc->dev, fw, qproc->hexagon_mdt_image, &size, NULL);
|
||||
if (IS_ERR(metadata))
|
||||
return PTR_ERR(metadata);
|
||||
|
||||
@ -1008,28 +873,25 @@ static int q6v5_mba_load(struct q6v5 *qproc)
|
||||
int xfermemop_ret;
|
||||
bool mba_load_err = false;
|
||||
|
||||
ret = qcom_q6v5_prepare(&qproc->q6v5);
|
||||
if (ret)
|
||||
return ret;
|
||||
qcom_q6v5_prepare(&qproc->q6v5);
|
||||
|
||||
ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
|
||||
if (ret < 0) {
|
||||
dev_err(qproc->dev, "failed to enable active power domains\n");
|
||||
goto disable_irqs;
|
||||
}
|
||||
|
||||
ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
|
||||
if (ret < 0) {
|
||||
dev_err(qproc->dev, "failed to enable proxy power domains\n");
|
||||
goto disable_irqs;
|
||||
}
|
||||
|
||||
ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
|
||||
qproc->fallback_proxy_reg_count);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
|
||||
goto disable_proxy_pds;
|
||||
goto disable_active_pds;
|
||||
}
|
||||
|
||||
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
|
||||
qproc->proxy_reg_count);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to enable proxy supplies\n");
|
||||
goto disable_fallback_proxy_reg;
|
||||
goto disable_proxy_pds;
|
||||
}
|
||||
|
||||
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
|
||||
@ -1066,12 +928,6 @@ static int q6v5_mba_load(struct q6v5 *qproc)
|
||||
goto assert_reset;
|
||||
}
|
||||
|
||||
ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to enable axi bridge\n");
|
||||
goto disable_active_clks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
|
||||
* the Q6 access to this region.
|
||||
@ -1118,13 +974,8 @@ static int q6v5_mba_load(struct q6v5 *qproc)
|
||||
|
||||
halt_axi_ports:
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
|
||||
if (qproc->has_vq6)
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
|
||||
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
|
||||
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
|
||||
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
|
||||
mba_load_err = true;
|
||||
reclaim_mba:
|
||||
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
|
||||
@ -1154,11 +1005,10 @@ static int q6v5_mba_load(struct q6v5 *qproc)
|
||||
disable_proxy_reg:
|
||||
q6v5_regulator_disable(qproc, qproc->proxy_regs,
|
||||
qproc->proxy_reg_count);
|
||||
disable_fallback_proxy_reg:
|
||||
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
|
||||
qproc->fallback_proxy_reg_count);
|
||||
disable_proxy_pds:
|
||||
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
|
||||
disable_active_pds:
|
||||
q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
|
||||
disable_irqs:
|
||||
qcom_q6v5_unprepare(&qproc->q6v5);
|
||||
|
||||
@ -1174,8 +1024,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
|
||||
qproc->dp_size = 0;
|
||||
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
|
||||
if (qproc->has_vq6)
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
|
||||
if (qproc->version == MSS_MSM8996) {
|
||||
@ -1188,24 +1036,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
|
||||
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
|
||||
}
|
||||
|
||||
if (qproc->has_ext_cntl_regs) {
|
||||
regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
|
||||
|
||||
ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
|
||||
!val, 1, Q6SS_CBCR_TIMEOUT_US);
|
||||
if (ret)
|
||||
dev_err(qproc->dev, "failed to enable axim1 clock\n");
|
||||
|
||||
ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
|
||||
!val, 1, Q6SS_CBCR_TIMEOUT_US);
|
||||
if (ret)
|
||||
dev_err(qproc->dev, "failed to enable crypto clock\n");
|
||||
}
|
||||
|
||||
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
|
||||
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
|
||||
q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
|
||||
|
||||
q6v5_reset_assert(qproc);
|
||||
|
||||
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
|
||||
@ -1214,6 +1044,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
|
||||
qproc->active_clk_count);
|
||||
q6v5_regulator_disable(qproc, qproc->active_regs,
|
||||
qproc->active_reg_count);
|
||||
q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
|
||||
|
||||
/* In case of failure or coredump scenario where reclaiming MBA memory
|
||||
* could not happen reclaim it here.
|
||||
@ -1229,8 +1060,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
|
||||
qproc->proxy_pd_count);
|
||||
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
|
||||
qproc->proxy_clk_count);
|
||||
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
|
||||
qproc->fallback_proxy_reg_count);
|
||||
q6v5_regulator_disable(qproc, qproc->proxy_regs,
|
||||
qproc->proxy_reg_count);
|
||||
}
|
||||
@ -1291,7 +1120,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
|
||||
/* Initialize the RMB validator */
|
||||
writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
|
||||
|
||||
ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
|
||||
ret = q6v5_mpss_init_image(qproc, fw);
|
||||
if (ret)
|
||||
goto release_firmware;
|
||||
|
||||
@ -1609,30 +1438,27 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
|
||||
qproc->proxy_clk_count);
|
||||
q6v5_regulator_disable(qproc, qproc->proxy_regs,
|
||||
qproc->proxy_reg_count);
|
||||
q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
|
||||
qproc->fallback_proxy_reg_count);
|
||||
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
|
||||
}
|
||||
|
||||
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
|
||||
{
|
||||
struct of_phandle_args args;
|
||||
int halt_cell_cnt = 3;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
|
||||
qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(qproc->reg_base))
|
||||
return PTR_ERR(qproc->reg_base);
|
||||
|
||||
qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
|
||||
qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(qproc->rmb_base))
|
||||
return PTR_ERR(qproc->rmb_base);
|
||||
|
||||
if (qproc->has_vq6)
|
||||
halt_cell_cnt++;
|
||||
|
||||
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
|
||||
"qcom,halt-regs", halt_cell_cnt, 0, &args);
|
||||
"qcom,halt-regs", 3, 0, &args);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
|
||||
return -EINVAL;
|
||||
@ -1647,52 +1473,6 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
|
||||
qproc->halt_modem = args.args[1];
|
||||
qproc->halt_nc = args.args[2];
|
||||
|
||||
if (qproc->has_vq6)
|
||||
qproc->halt_vq6 = args.args[3];
|
||||
|
||||
if (qproc->has_qaccept_regs) {
|
||||
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
|
||||
"qcom,qaccept-regs",
|
||||
3, 0, &args);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qproc->qaccept_mdm = args.args[0];
|
||||
qproc->qaccept_cx = args.args[1];
|
||||
qproc->qaccept_axi = args.args[2];
|
||||
}
|
||||
|
||||
if (qproc->has_ext_cntl_regs) {
|
||||
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
|
||||
"qcom,ext-regs",
|
||||
2, 0, &args);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qproc->conn_map = syscon_node_to_regmap(args.np);
|
||||
of_node_put(args.np);
|
||||
if (IS_ERR(qproc->conn_map))
|
||||
return PTR_ERR(qproc->conn_map);
|
||||
|
||||
qproc->force_clk_on = args.args[0];
|
||||
qproc->rscc_disable = args.args[1];
|
||||
|
||||
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
|
||||
"qcom,ext-regs",
|
||||
2, 1, &args);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qproc->axim1_clk_off = args.args[0];
|
||||
qproc->crypto_clk_off = args.args[1];
|
||||
}
|
||||
|
||||
if (qproc->has_spare_reg) {
|
||||
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
|
||||
"qcom,spare-regs",
|
||||
@ -1784,7 +1564,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
|
||||
return PTR_ERR(qproc->mss_restart);
|
||||
}
|
||||
|
||||
if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
|
||||
if (qproc->has_alt_reset || qproc->has_spare_reg) {
|
||||
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
|
||||
"pdc_reset");
|
||||
if (IS_ERR(qproc->pdc_reset)) {
|
||||
@ -1825,6 +1605,12 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
|
||||
|
||||
qproc->mba_phys = r.start;
|
||||
qproc->mba_size = resource_size(&r);
|
||||
qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
|
||||
if (!qproc->mba_region) {
|
||||
dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
|
||||
&r.start, qproc->mba_size);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!child) {
|
||||
node = of_parse_phandle(qproc->dev->of_node,
|
||||
@ -1851,7 +1637,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
|
||||
static int q6v5_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct rproc_hexagon_res *desc;
|
||||
struct device_node *node;
|
||||
struct q6v5 *qproc;
|
||||
struct rproc *rproc;
|
||||
const char *mba_image;
|
||||
@ -1867,10 +1652,8 @@ static int q6v5_probe(struct platform_device *pdev)
|
||||
mba_image = desc->hexagon_mba_image;
|
||||
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
|
||||
0, &mba_image);
|
||||
if (ret < 0 && ret != -EINVAL) {
|
||||
dev_err(&pdev->dev, "unable to read mba firmware-name\n");
|
||||
if (ret < 0 && ret != -EINVAL)
|
||||
return ret;
|
||||
}
|
||||
|
||||
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
|
||||
mba_image, sizeof(*qproc));
|
||||
@ -1888,16 +1671,11 @@ static int q6v5_probe(struct platform_device *pdev)
|
||||
qproc->hexagon_mdt_image = "modem.mdt";
|
||||
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
|
||||
1, &qproc->hexagon_mdt_image);
|
||||
if (ret < 0 && ret != -EINVAL) {
|
||||
dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
|
||||
if (ret < 0 && ret != -EINVAL)
|
||||
goto free_rproc;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, qproc);
|
||||
|
||||
qproc->has_qaccept_regs = desc->has_qaccept_regs;
|
||||
qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
|
||||
qproc->has_vq6 = desc->has_vq6;
|
||||
qproc->has_spare_reg = desc->has_spare_reg;
|
||||
ret = q6v5_init_mem(qproc, pdev);
|
||||
if (ret)
|
||||
@ -1947,24 +1725,21 @@ static int q6v5_probe(struct platform_device *pdev)
|
||||
}
|
||||
qproc->active_reg_count = ret;
|
||||
|
||||
ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
|
||||
desc->active_pd_names);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to attach active power domains\n");
|
||||
goto free_rproc;
|
||||
}
|
||||
qproc->active_pd_count = ret;
|
||||
|
||||
ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
|
||||
desc->proxy_pd_names);
|
||||
/* Fallback to regulators for old device trees */
|
||||
if (ret == -ENODATA && desc->fallback_proxy_supply) {
|
||||
ret = q6v5_regulator_init(&pdev->dev,
|
||||
qproc->fallback_proxy_regs,
|
||||
desc->fallback_proxy_supply);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
|
||||
goto free_rproc;
|
||||
}
|
||||
qproc->fallback_proxy_reg_count = ret;
|
||||
} else if (ret < 0) {
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to init power domains\n");
|
||||
goto free_rproc;
|
||||
} else {
|
||||
qproc->proxy_pd_count = ret;
|
||||
goto detach_active_pds;
|
||||
}
|
||||
qproc->proxy_pd_count = ret;
|
||||
|
||||
qproc->has_alt_reset = desc->has_alt_reset;
|
||||
ret = q6v5_init_reset(qproc);
|
||||
@ -1975,7 +1750,7 @@ static int q6v5_probe(struct platform_device *pdev)
|
||||
qproc->need_mem_protection = desc->need_mem_protection;
|
||||
qproc->has_mba_logs = desc->has_mba_logs;
|
||||
|
||||
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
|
||||
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
|
||||
qcom_msa_handover);
|
||||
if (ret)
|
||||
goto detach_proxy_pds;
|
||||
@ -1995,10 +1770,6 @@ static int q6v5_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto remove_sysmon_subdev;
|
||||
|
||||
node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
|
||||
qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
|
||||
of_node_put(node);
|
||||
|
||||
return 0;
|
||||
|
||||
remove_sysmon_subdev:
|
||||
@ -2009,6 +1780,8 @@ static int q6v5_probe(struct platform_device *pdev)
|
||||
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
|
||||
detach_proxy_pds:
|
||||
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
|
||||
detach_active_pds:
|
||||
q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
|
||||
free_rproc:
|
||||
rproc_free(rproc);
|
||||
|
||||
@ -2020,17 +1793,15 @@ static int q6v5_remove(struct platform_device *pdev)
|
||||
struct q6v5 *qproc = platform_get_drvdata(pdev);
|
||||
struct rproc *rproc = qproc->rproc;
|
||||
|
||||
if (qproc->bam_dmux)
|
||||
of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
|
||||
rproc_del(rproc);
|
||||
|
||||
qcom_q6v5_deinit(&qproc->q6v5);
|
||||
qcom_remove_sysmon_subdev(qproc->sysmon);
|
||||
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
|
||||
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
|
||||
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
|
||||
|
||||
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
|
||||
q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
|
||||
|
||||
rproc_free(rproc);
|
||||
|
||||
@ -2054,6 +1825,10 @@ static const struct rproc_hexagon_res sc7180_mss = {
|
||||
"nav",
|
||||
NULL
|
||||
},
|
||||
.active_pd_names = (char*[]){
|
||||
"load_state",
|
||||
NULL
|
||||
},
|
||||
.proxy_pd_names = (char*[]){
|
||||
"cx",
|
||||
"mx",
|
||||
@ -2064,40 +1839,9 @@ static const struct rproc_hexagon_res sc7180_mss = {
|
||||
.has_alt_reset = false,
|
||||
.has_mba_logs = true,
|
||||
.has_spare_reg = true,
|
||||
.has_qaccept_regs = false,
|
||||
.has_ext_cntl_regs = false,
|
||||
.has_vq6 = false,
|
||||
.version = MSS_SC7180,
|
||||
};
|
||||
|
||||
static const struct rproc_hexagon_res sc7280_mss = {
|
||||
.hexagon_mba_image = "mba.mbn",
|
||||
.proxy_clk_names = (char*[]){
|
||||
"xo",
|
||||
"pka",
|
||||
NULL
|
||||
},
|
||||
.active_clk_names = (char*[]){
|
||||
"iface",
|
||||
"offline",
|
||||
"snoc_axi",
|
||||
NULL
|
||||
},
|
||||
.proxy_pd_names = (char*[]){
|
||||
"cx",
|
||||
"mss",
|
||||
NULL
|
||||
},
|
||||
.need_mem_protection = true,
|
||||
.has_alt_reset = false,
|
||||
.has_mba_logs = true,
|
||||
.has_spare_reg = false,
|
||||
.has_qaccept_regs = true,
|
||||
.has_ext_cntl_regs = true,
|
||||
.has_vq6 = true,
|
||||
.version = MSS_SC7280,
|
||||
};
|
||||
|
||||
static const struct rproc_hexagon_res sdm845_mss = {
|
||||
.hexagon_mba_image = "mba.mbn",
|
||||
.proxy_clk_names = (char*[]){
|
||||
@ -2117,6 +1861,10 @@ static const struct rproc_hexagon_res sdm845_mss = {
|
||||
"mnoc_axi",
|
||||
NULL
|
||||
},
|
||||
.active_pd_names = (char*[]){
|
||||
"load_state",
|
||||
NULL
|
||||
},
|
||||
.proxy_pd_names = (char*[]){
|
||||
"cx",
|
||||
"mx",
|
||||
@ -2127,9 +1875,6 @@ static const struct rproc_hexagon_res sdm845_mss = {
|
||||
.has_alt_reset = true,
|
||||
.has_mba_logs = false,
|
||||
.has_spare_reg = false,
|
||||
.has_qaccept_regs = false,
|
||||
.has_ext_cntl_regs = false,
|
||||
.has_vq6 = false,
|
||||
.version = MSS_SDM845,
|
||||
};
|
||||
|
||||
@ -2158,9 +1903,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
|
||||
.has_alt_reset = false,
|
||||
.has_mba_logs = false,
|
||||
.has_spare_reg = false,
|
||||
.has_qaccept_regs = false,
|
||||
.has_ext_cntl_regs = false,
|
||||
.has_vq6 = false,
|
||||
.version = MSS_MSM8998,
|
||||
};
|
||||
|
||||
@ -2192,22 +1934,12 @@ static const struct rproc_hexagon_res msm8996_mss = {
|
||||
.has_alt_reset = false,
|
||||
.has_mba_logs = false,
|
||||
.has_spare_reg = false,
|
||||
.has_qaccept_regs = false,
|
||||
.has_ext_cntl_regs = false,
|
||||
.has_vq6 = false,
|
||||
.version = MSS_MSM8996,
|
||||
};
|
||||
|
||||
static const struct rproc_hexagon_res msm8916_mss = {
|
||||
.hexagon_mba_image = "mba.mbn",
|
||||
.proxy_supply = (struct qcom_mss_reg_res[]) {
|
||||
{
|
||||
.supply = "pll",
|
||||
.uA = 100000,
|
||||
},
|
||||
{}
|
||||
},
|
||||
.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
|
||||
{
|
||||
.supply = "mx",
|
||||
.uV = 1050000,
|
||||
@ -2216,6 +1948,10 @@ static const struct rproc_hexagon_res msm8916_mss = {
|
||||
.supply = "cx",
|
||||
.uA = 100000,
|
||||
},
|
||||
{
|
||||
.supply = "pll",
|
||||
.uA = 100000,
|
||||
},
|
||||
{}
|
||||
},
|
||||
.proxy_clk_names = (char*[]){
|
||||
@ -2228,31 +1964,16 @@ static const struct rproc_hexagon_res msm8916_mss = {
|
||||
"mem",
|
||||
NULL
|
||||
},
|
||||
.proxy_pd_names = (char*[]){
|
||||
"mx",
|
||||
"cx",
|
||||
NULL
|
||||
},
|
||||
.need_mem_protection = false,
|
||||
.has_alt_reset = false,
|
||||
.has_mba_logs = false,
|
||||
.has_spare_reg = false,
|
||||
.has_qaccept_regs = false,
|
||||
.has_ext_cntl_regs = false,
|
||||
.has_vq6 = false,
|
||||
.version = MSS_MSM8916,
|
||||
};
|
||||
|
||||
static const struct rproc_hexagon_res msm8974_mss = {
|
||||
.hexagon_mba_image = "mba.b00",
|
||||
.proxy_supply = (struct qcom_mss_reg_res[]) {
|
||||
{
|
||||
.supply = "pll",
|
||||
.uA = 100000,
|
||||
},
|
||||
{}
|
||||
},
|
||||
.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
|
||||
{
|
||||
.supply = "mx",
|
||||
.uV = 1050000,
|
||||
@ -2261,6 +1982,10 @@ static const struct rproc_hexagon_res msm8974_mss = {
|
||||
.supply = "cx",
|
||||
.uA = 100000,
|
||||
},
|
||||
{
|
||||
.supply = "pll",
|
||||
.uA = 100000,
|
||||
},
|
||||
{}
|
||||
},
|
||||
.active_supply = (struct qcom_mss_reg_res[]) {
|
||||
@ -2281,18 +2006,10 @@ static const struct rproc_hexagon_res msm8974_mss = {
|
||||
"mem",
|
||||
NULL
|
||||
},
|
||||
.proxy_pd_names = (char*[]){
|
||||
"mx",
|
||||
"cx",
|
||||
NULL
|
||||
},
|
||||
.need_mem_protection = false,
|
||||
.has_alt_reset = false,
|
||||
.has_mba_logs = false,
|
||||
.has_spare_reg = false,
|
||||
.has_qaccept_regs = false,
|
||||
.has_ext_cntl_regs = false,
|
||||
.has_vq6 = false,
|
||||
.version = MSS_MSM8974,
|
||||
};
|
||||
|
||||
@ -2303,7 +2020,6 @@ static const struct of_device_id q6v5_of_match[] = {
|
||||
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
|
||||
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
|
||||
{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
|
||||
{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
|
||||
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
|
||||
{ },
|
||||
};
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2017, Linaro Ltd.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
@ -11,13 +12,27 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/remoteproc/qcom_rproc.h>
|
||||
#include <linux/rpmsg.h>
|
||||
#include <trace/events/rproc_qcom.h>
|
||||
|
||||
#include "qcom_common.h"
|
||||
|
||||
#define SYSMON_NOTIF_TIMEOUT CONFIG_RPROC_SYSMON_NOTIF_TIMEOUT
|
||||
|
||||
#define SYSMON_SUBDEV_NAME "sysmon"
|
||||
|
||||
static const char * const notif_timeout_msg = "sysmon msg from %s to %s for %s taking too long";
|
||||
static const char * const shutdown_timeout_msg = "sysmon_send_shutdown to %s taking too long";
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(sysmon_notifiers);
|
||||
|
||||
struct qcom_sysmon;
|
||||
|
||||
struct notif_timeout_data {
|
||||
struct qcom_sysmon *dest;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
struct qcom_sysmon {
|
||||
struct rproc_subdev subdev;
|
||||
struct rproc *rproc;
|
||||
@ -33,9 +48,11 @@ struct qcom_sysmon {
|
||||
int ssctl_version;
|
||||
int ssctl_instance;
|
||||
|
||||
struct notif_timeout_data timeout_data;
|
||||
struct notifier_block nb;
|
||||
|
||||
struct device *dev;
|
||||
uint32_t transaction_id;
|
||||
|
||||
struct rpmsg_endpoint *ept;
|
||||
struct completion comp;
|
||||
@ -50,42 +67,29 @@ struct qcom_sysmon {
|
||||
struct sockaddr_qrtr ssctl;
|
||||
};
|
||||
|
||||
enum {
|
||||
SSCTL_SSR_EVENT_BEFORE_POWERUP,
|
||||
SSCTL_SSR_EVENT_AFTER_POWERUP,
|
||||
SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
|
||||
SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
|
||||
};
|
||||
|
||||
static const char * const sysmon_state_string[] = {
|
||||
[SSCTL_SSR_EVENT_BEFORE_POWERUP] = "before_powerup",
|
||||
[SSCTL_SSR_EVENT_AFTER_POWERUP] = "after_powerup",
|
||||
[SSCTL_SSR_EVENT_BEFORE_SHUTDOWN] = "before_shutdown",
|
||||
[SSCTL_SSR_EVENT_AFTER_SHUTDOWN] = "after_shutdown",
|
||||
};
|
||||
|
||||
struct sysmon_event {
|
||||
const char *subsys_name;
|
||||
u32 ssr_event;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(sysmon_lock);
|
||||
static LIST_HEAD(sysmon_list);
|
||||
|
||||
uint32_t qcom_sysmon_get_txn_id(struct qcom_sysmon *sysmon)
|
||||
{
|
||||
return sysmon->transaction_id;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_sysmon_get_txn_id);
|
||||
|
||||
/**
|
||||
* sysmon_send_event() - send notification of other remote's SSR event
|
||||
* @sysmon: sysmon context
|
||||
* @event: sysmon event context
|
||||
*/
|
||||
static void sysmon_send_event(struct qcom_sysmon *sysmon,
|
||||
const struct sysmon_event *event)
|
||||
const struct qcom_sysmon *source)
|
||||
{
|
||||
char req[50];
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
len = snprintf(req, sizeof(req), "ssr:%s:%s", event->subsys_name,
|
||||
sysmon_state_string[event->ssr_event]);
|
||||
len = scnprintf(req, sizeof(req), "ssr:%s:%s", source->name,
|
||||
subdevice_state_string[source->state]);
|
||||
if (len >= sizeof(req))
|
||||
return;
|
||||
|
||||
@ -175,6 +179,7 @@ static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
|
||||
#define SSCTL_SHUTDOWN_REQ 0x21
|
||||
#define SSCTL_SHUTDOWN_READY_IND 0x21
|
||||
#define SSCTL_SUBSYS_EVENT_REQ 0x23
|
||||
#define SSCTL_SUBSYS_EVENT_WITH_TID_REQ 0x25
|
||||
|
||||
#define SSCTL_MAX_MSG_LEN 7
|
||||
|
||||
@ -202,22 +207,23 @@ static struct qmi_elem_info ssctl_shutdown_resp_ei[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
struct ssctl_subsys_event_req {
|
||||
struct ssctl_subsys_event_with_tid_req {
|
||||
u8 subsys_name_len;
|
||||
char subsys_name[SSCTL_SUBSYS_NAME_LENGTH];
|
||||
u32 event;
|
||||
uint32_t transaction_id;
|
||||
u8 evt_driven_valid;
|
||||
u32 evt_driven;
|
||||
};
|
||||
|
||||
static struct qmi_elem_info ssctl_subsys_event_req_ei[] = {
|
||||
static struct qmi_elem_info ssctl_subsys_event_with_tid_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_DATA_LEN,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct ssctl_subsys_event_req,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_req,
|
||||
subsys_name_len),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
@ -227,7 +233,7 @@ static struct qmi_elem_info ssctl_subsys_event_req_ei[] = {
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = VAR_LEN_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct ssctl_subsys_event_req,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_req,
|
||||
subsys_name),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
@ -237,17 +243,27 @@ static struct qmi_elem_info ssctl_subsys_event_req_ei[] = {
|
||||
.elem_size = sizeof(uint32_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct ssctl_subsys_event_req,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_req,
|
||||
event),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint32_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x03,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_req,
|
||||
transaction_id),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct ssctl_subsys_event_req,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_req,
|
||||
evt_driven_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
@ -257,31 +273,32 @@ static struct qmi_elem_info ssctl_subsys_event_req_ei[] = {
|
||||
.elem_size = sizeof(uint32_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct ssctl_subsys_event_req,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_req,
|
||||
evt_driven),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct ssctl_subsys_event_resp {
|
||||
struct ssctl_subsys_event_with_tid_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = {
|
||||
static struct qmi_elem_info ssctl_subsys_event_with_tid_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct ssctl_subsys_event_resp,
|
||||
.offset = offsetof(struct ssctl_subsys_event_with_tid_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
static struct qmi_elem_info ssctl_shutdown_ind_ei[] = {
|
||||
{}
|
||||
};
|
||||
@ -294,7 +311,7 @@ static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
|
||||
complete(&sysmon->ind_comp);
|
||||
}
|
||||
|
||||
static const struct qmi_msg_handler qmi_indication_handler[] = {
|
||||
static struct qmi_msg_handler qmi_indication_handler[] = {
|
||||
{
|
||||
.type = QMI_INDICATION,
|
||||
.msg_id = SSCTL_SHUTDOWN_READY_IND,
|
||||
@ -334,6 +351,9 @@ static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
|
||||
bool acked = false;
|
||||
int ret;
|
||||
|
||||
if (sysmon->ssctl_instance == -EINVAL)
|
||||
return false;
|
||||
|
||||
reinit_completion(&sysmon->ind_comp);
|
||||
reinit_completion(&sysmon->shutdown_comp);
|
||||
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
|
||||
@ -352,9 +372,9 @@ static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
dev_err(sysmon->dev, "timeout waiting for shutdown response\n");
|
||||
dev_err(sysmon->dev, "failed receiving QMI response\n");
|
||||
} else if (resp.resp.result) {
|
||||
dev_err(sysmon->dev, "shutdown request rejected\n");
|
||||
dev_err(sysmon->dev, "shutdown request failed\n");
|
||||
} else {
|
||||
dev_dbg(sysmon->dev, "shutdown request completed\n");
|
||||
acked = true;
|
||||
@ -372,43 +392,49 @@ static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
|
||||
* @event: sysmon event context
|
||||
*/
|
||||
static void ssctl_send_event(struct qcom_sysmon *sysmon,
|
||||
const struct sysmon_event *event)
|
||||
const struct qcom_sysmon *source)
|
||||
{
|
||||
struct ssctl_subsys_event_resp resp;
|
||||
struct ssctl_subsys_event_req req;
|
||||
struct ssctl_subsys_event_with_tid_resp resp;
|
||||
struct ssctl_subsys_event_with_tid_req req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
if (sysmon->ssctl_instance == -EINVAL)
|
||||
return;
|
||||
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_subsys_event_resp_ei, &resp);
|
||||
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_subsys_event_with_tid_resp_ei, &resp);
|
||||
if (ret < 0) {
|
||||
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
strlcpy(req.subsys_name, event->subsys_name, sizeof(req.subsys_name));
|
||||
strscpy(req.subsys_name, source->name, sizeof(req.subsys_name));
|
||||
req.subsys_name_len = strlen(req.subsys_name);
|
||||
req.event = event->ssr_event;
|
||||
req.event = source->state;
|
||||
req.evt_driven_valid = true;
|
||||
req.evt_driven = SSCTL_SSR_EVENT_FORCED;
|
||||
req.transaction_id = sysmon->transaction_id;
|
||||
|
||||
ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
|
||||
SSCTL_SUBSYS_EVENT_REQ, 40,
|
||||
ssctl_subsys_event_req_ei, &req);
|
||||
SSCTL_SUBSYS_EVENT_WITH_TID_REQ, 40,
|
||||
ssctl_subsys_event_with_tid_req_ei, &req);
|
||||
if (ret < 0) {
|
||||
dev_err(sysmon->dev, "failed to send subsystem event\n");
|
||||
dev_err(sysmon->dev, "failed to send shutdown request\n");
|
||||
qmi_txn_cancel(&txn);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0)
|
||||
dev_err(sysmon->dev, "timeout waiting for subsystem event response\n");
|
||||
dev_err(sysmon->dev, "failed receiving QMI response\n");
|
||||
else if (resp.resp.result)
|
||||
dev_err(sysmon->dev, "subsystem event rejected\n");
|
||||
dev_err(sysmon->dev, "failed to receive %s ssr %s event. response result: %d\n",
|
||||
source->name, subdevice_state_string[source->state],
|
||||
resp.resp.result);
|
||||
else
|
||||
dev_dbg(sysmon->dev, "subsystem event accepted\n");
|
||||
dev_dbg(sysmon->dev, "ssr event send completed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -465,18 +491,66 @@ static const struct qmi_ops ssctl_ops = {
|
||||
.del_server = ssctl_del_server,
|
||||
};
|
||||
|
||||
static void sysmon_notif_timeout_handler(struct timer_list *t)
|
||||
{
|
||||
struct notif_timeout_data *td = from_timer(td, t, timer);
|
||||
struct qcom_sysmon *sysmon = container_of(td, struct qcom_sysmon, timeout_data);
|
||||
|
||||
if (IS_ENABLED(CONFIG_QCOM_PANIC_ON_NOTIF_TIMEOUT) &&
|
||||
system_state != SYSTEM_RESTART &&
|
||||
system_state != SYSTEM_POWER_OFF &&
|
||||
system_state != SYSTEM_HALT &&
|
||||
!qcom_device_shutdown_in_progress)
|
||||
panic(notif_timeout_msg, sysmon->name, td->dest->name,
|
||||
subdevice_state_string[sysmon->state]);
|
||||
else
|
||||
WARN(1, notif_timeout_msg, sysmon->name, td->dest->name,
|
||||
subdevice_state_string[sysmon->state]);
|
||||
}
|
||||
|
||||
static void sysmon_shutdown_notif_timeout_handler(struct timer_list *t)
|
||||
{
|
||||
struct notif_timeout_data *td = from_timer(td, t, timer);
|
||||
struct qcom_sysmon *sysmon = container_of(td, struct qcom_sysmon, timeout_data);
|
||||
|
||||
if (IS_ENABLED(CONFIG_QCOM_PANIC_ON_NOTIF_TIMEOUT) &&
|
||||
system_state != SYSTEM_RESTART &&
|
||||
system_state != SYSTEM_POWER_OFF &&
|
||||
system_state != SYSTEM_HALT &&
|
||||
!qcom_device_shutdown_in_progress)
|
||||
panic(shutdown_timeout_msg, sysmon->name);
|
||||
else
|
||||
WARN(1, shutdown_timeout_msg, sysmon->name);
|
||||
}
|
||||
|
||||
static inline void send_event(struct qcom_sysmon *sysmon, struct qcom_sysmon *source)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
source->timeout_data.timer.function = sysmon_notif_timeout_handler;
|
||||
source->timeout_data.dest = sysmon;
|
||||
timeout = jiffies + msecs_to_jiffies(SYSMON_NOTIF_TIMEOUT);
|
||||
mod_timer(&source->timeout_data.timer, timeout);
|
||||
|
||||
/* Only SSCTL version 2 supports SSR events */
|
||||
if (sysmon->ssctl_version == 2)
|
||||
ssctl_send_event(sysmon, source);
|
||||
else if (sysmon->ept)
|
||||
sysmon_send_event(sysmon, source);
|
||||
|
||||
del_timer_sync(&source->timeout_data.timer);
|
||||
}
|
||||
|
||||
static int sysmon_prepare(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
|
||||
subdev);
|
||||
struct sysmon_event event = {
|
||||
.subsys_name = sysmon->name,
|
||||
.ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP
|
||||
};
|
||||
|
||||
trace_rproc_qcom_event(dev_name(sysmon->rproc->dev.parent), SYSMON_SUBDEV_NAME, "prepare");
|
||||
|
||||
mutex_lock(&sysmon->state_lock);
|
||||
sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
|
||||
sysmon->state = QCOM_SSR_BEFORE_POWERUP;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)sysmon);
|
||||
mutex_unlock(&sysmon->state_lock);
|
||||
|
||||
return 0;
|
||||
@ -496,29 +570,23 @@ static int sysmon_start(struct rproc_subdev *subdev)
|
||||
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
|
||||
subdev);
|
||||
struct qcom_sysmon *target;
|
||||
struct sysmon_event event = {
|
||||
.subsys_name = sysmon->name,
|
||||
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
|
||||
};
|
||||
|
||||
trace_rproc_qcom_event(dev_name(sysmon->rproc->dev.parent), SYSMON_SUBDEV_NAME, "start");
|
||||
|
||||
mutex_lock(&sysmon->state_lock);
|
||||
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
|
||||
sysmon->state = QCOM_SSR_AFTER_POWERUP;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)sysmon);
|
||||
mutex_unlock(&sysmon->state_lock);
|
||||
|
||||
mutex_lock(&sysmon_lock);
|
||||
list_for_each_entry(target, &sysmon_list, node) {
|
||||
if (target == sysmon)
|
||||
continue;
|
||||
|
||||
mutex_lock(&target->state_lock);
|
||||
event.subsys_name = target->name;
|
||||
event.ssr_event = target->state;
|
||||
if (target == sysmon || target->state != QCOM_SSR_AFTER_POWERUP) {
|
||||
mutex_unlock(&target->state_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (sysmon->ssctl_version == 2)
|
||||
ssctl_send_event(sysmon, &event);
|
||||
else if (sysmon->ept)
|
||||
sysmon_send_event(sysmon, &event);
|
||||
send_event(sysmon, target);
|
||||
mutex_unlock(&target->state_lock);
|
||||
}
|
||||
mutex_unlock(&sysmon_lock);
|
||||
@ -528,41 +596,51 @@ static int sysmon_start(struct rproc_subdev *subdev)
|
||||
|
||||
static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
|
||||
{
|
||||
unsigned long timeout;
|
||||
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, subdev);
|
||||
struct sysmon_event event = {
|
||||
.subsys_name = sysmon->name,
|
||||
.ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN
|
||||
};
|
||||
|
||||
trace_rproc_qcom_event(dev_name(sysmon->rproc->dev.parent), SYSMON_SUBDEV_NAME,
|
||||
crashed ? "crash stop" : "stop");
|
||||
|
||||
sysmon->shutdown_acked = false;
|
||||
|
||||
mutex_lock(&sysmon->state_lock);
|
||||
sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
|
||||
sysmon->state = QCOM_SSR_BEFORE_SHUTDOWN;
|
||||
|
||||
sysmon->transaction_id++;
|
||||
dev_info(sysmon->dev, "Incrementing tid for %s to %d\n", sysmon->name,
|
||||
sysmon->transaction_id);
|
||||
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)sysmon);
|
||||
mutex_unlock(&sysmon->state_lock);
|
||||
|
||||
/* Don't request graceful shutdown if we've crashed */
|
||||
if (crashed)
|
||||
return;
|
||||
|
||||
sysmon->timeout_data.timer.function = sysmon_shutdown_notif_timeout_handler;
|
||||
timeout = jiffies + msecs_to_jiffies(SYSMON_NOTIF_TIMEOUT);
|
||||
mod_timer(&sysmon->timeout_data.timer, timeout);
|
||||
|
||||
if (sysmon->ssctl_version)
|
||||
sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
|
||||
else if (sysmon->ept)
|
||||
sysmon->shutdown_acked = sysmon_request_shutdown(sysmon);
|
||||
|
||||
del_timer_sync(&sysmon->timeout_data.timer);
|
||||
}
|
||||
|
||||
static void sysmon_unprepare(struct rproc_subdev *subdev)
|
||||
{
|
||||
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
|
||||
subdev);
|
||||
struct sysmon_event event = {
|
||||
.subsys_name = sysmon->name,
|
||||
.ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN
|
||||
};
|
||||
|
||||
trace_rproc_qcom_event(dev_name(sysmon->rproc->dev.parent), SYSMON_SUBDEV_NAME,
|
||||
"unprepare");
|
||||
|
||||
mutex_lock(&sysmon->state_lock);
|
||||
sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
|
||||
sysmon->state = QCOM_SSR_AFTER_SHUTDOWN;
|
||||
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)sysmon);
|
||||
mutex_unlock(&sysmon->state_lock);
|
||||
}
|
||||
|
||||
@ -576,20 +654,16 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event,
|
||||
void *data)
|
||||
{
|
||||
struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb);
|
||||
struct sysmon_event *sysmon_event = data;
|
||||
struct qcom_sysmon *source = data;
|
||||
|
||||
/* Skip non-running rprocs and the originating instance */
|
||||
if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP ||
|
||||
!strcmp(sysmon_event->subsys_name, sysmon->name)) {
|
||||
if (sysmon->state != QCOM_SSR_AFTER_POWERUP ||
|
||||
!strcmp(source->name, sysmon->name)) {
|
||||
dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/* Only SSCTL version 2 supports SSR events */
|
||||
if (sysmon->ssctl_version == 2)
|
||||
ssctl_send_event(sysmon, sysmon_event);
|
||||
else if (sysmon->ept)
|
||||
sysmon_send_event(sysmon, sysmon_event);
|
||||
send_event(sysmon, source);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -603,6 +677,140 @@ static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#define QMI_SSCTL_GET_FAILURE_REASON_REQ 0x0022
|
||||
#define QMI_SSCTL_EMPTY_MSG_LENGTH 0
|
||||
#define QMI_SSCTL_ERROR_MSG_LENGTH 90
|
||||
#define QMI_EOTI_DATA_TYPE \
|
||||
{ \
|
||||
.data_type = QMI_EOTI, \
|
||||
.elem_len = 0, \
|
||||
.elem_size = 0, \
|
||||
.array_type = NO_ARRAY,\
|
||||
.tlv_type = 0x00, \
|
||||
.offset = 0, \
|
||||
.ei_array = NULL, \
|
||||
},
|
||||
|
||||
struct qmi_ssctl_get_failure_reason_resp_msg {
|
||||
struct qmi_response_type_v01 resp;
|
||||
uint8_t error_message_valid;
|
||||
uint32_t error_message_len;
|
||||
char error_message[QMI_SSCTL_ERROR_MSG_LENGTH];
|
||||
};
|
||||
|
||||
static struct qmi_elem_info qmi_ssctl_get_failure_reason_req_msg_ei[] = {
|
||||
QMI_EOTI_DATA_TYPE
|
||||
};
|
||||
|
||||
static struct qmi_elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(
|
||||
struct qmi_ssctl_get_failure_reason_resp_msg,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(
|
||||
struct qmi_ssctl_get_failure_reason_resp_msg,
|
||||
error_message_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_DATA_LEN,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(
|
||||
struct qmi_ssctl_get_failure_reason_resp_msg,
|
||||
error_message_len),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = QMI_SSCTL_ERROR_MSG_LENGTH,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = VAR_LEN_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(
|
||||
struct qmi_ssctl_get_failure_reason_resp_msg,
|
||||
error_message),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
QMI_EOTI_DATA_TYPE
|
||||
};
|
||||
|
||||
/**
|
||||
* qcom_sysmon_get_reason() - Retrieve failure reason from a subsystem.
|
||||
* @dest_desc: Subsystem descriptor of the subsystem to query
|
||||
* @buf: Caller-allocated buffer for the returned NUL-terminated reason
|
||||
* @len: Length of @buf
|
||||
*
|
||||
* Reverts to using legacy sysmon API (sysmon_get_reason_no_qmi()) if client
|
||||
* handle is not set.
|
||||
*
|
||||
* Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
|
||||
* the SMD transport channel is not open, -ETIMEDOUT if the destination
|
||||
* subsystem does not respond, and -EPROTO if the destination subsystem
|
||||
* responds with something unexpected.
|
||||
*
|
||||
*/
|
||||
int qcom_sysmon_get_reason(struct qcom_sysmon *sysmon, char *buf, size_t len)
|
||||
{
|
||||
char req = 0;
|
||||
struct qmi_ssctl_get_failure_reason_resp_msg resp;
|
||||
struct qmi_txn txn;
|
||||
const char *dest_ss;
|
||||
int ret;
|
||||
|
||||
if (sysmon == NULL || buf == NULL || len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
dest_ss = sysmon->name;
|
||||
|
||||
ret = qmi_txn_init(&sysmon->qmi, &txn, qmi_ssctl_get_failure_reason_resp_msg_ei,
|
||||
&resp);
|
||||
if (ret < 0) {
|
||||
pr_err("SYSMON QMI tx init failed to dest %s, ret - %d\n", dest_ss, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
|
||||
QMI_SSCTL_GET_FAILURE_REASON_REQ,
|
||||
QMI_SSCTL_EMPTY_MSG_LENGTH,
|
||||
qmi_ssctl_get_failure_reason_req_msg_ei,
|
||||
&req);
|
||||
if (ret < 0) {
|
||||
pr_err("SYSMON QMI send req failed to dest %s, ret - %d\n", dest_ss, ret);
|
||||
qmi_txn_cancel(&txn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("SYSMON QMI qmi txn wait failed to dest %s, ret - %d\n", dest_ss, ret);
|
||||
goto out;
|
||||
} else if (resp.resp.result) {
|
||||
dev_err(sysmon->dev, "failed to receive req. response result: %d\n",
|
||||
resp.resp.result);
|
||||
goto out;
|
||||
}
|
||||
strscpy(buf, resp.error_message, len);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_sysmon_get_reason);
|
||||
|
||||
/**
|
||||
* qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc
|
||||
* @rproc: rproc context to associate the subdev with
|
||||
@ -631,9 +839,13 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
|
||||
init_completion(&sysmon->comp);
|
||||
init_completion(&sysmon->ind_comp);
|
||||
init_completion(&sysmon->shutdown_comp);
|
||||
timer_setup(&sysmon->timeout_data.timer, sysmon_notif_timeout_handler, 0);
|
||||
mutex_init(&sysmon->lock);
|
||||
mutex_init(&sysmon->state_lock);
|
||||
|
||||
if (sysmon->ssctl_instance == -EINVAL)
|
||||
goto add_subdev_callbacks;
|
||||
|
||||
sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
|
||||
"shutdown-ack");
|
||||
if (sysmon->shutdown_irq < 0) {
|
||||
@ -665,6 +877,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
|
||||
|
||||
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
|
||||
|
||||
add_subdev_callbacks:
|
||||
sysmon->subdev.prepare = sysmon_prepare;
|
||||
sysmon->subdev.start = sysmon_start;
|
||||
sysmon->subdev.stop = sysmon_stop;
|
||||
@ -700,7 +913,8 @@ void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
|
||||
|
||||
rproc_remove_subdev(sysmon->rproc, &sysmon->subdev);
|
||||
|
||||
qmi_handle_release(&sysmon->qmi);
|
||||
if (sysmon->ssctl_instance != -EINVAL)
|
||||
qmi_handle_release(&sysmon->qmi);
|
||||
|
||||
kfree(sysmon);
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2016 Linaro Ltd
|
||||
* Copyright (C) 2015 Sony Mobile Communications Inc
|
||||
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2013, 2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
@ -15,6 +15,7 @@
|
||||
#include <linux/qcom_scm.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/soc/qcom/mdt_loader.h>
|
||||
|
||||
static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
|
||||
@ -31,42 +32,24 @@ static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
|
||||
return true;
|
||||
}
|
||||
|
||||
static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
|
||||
unsigned int segment, const char *fw_name,
|
||||
struct device *dev)
|
||||
static bool qcom_mdt_bins_are_split(const struct firmware *fw)
|
||||
{
|
||||
const struct elf32_phdr *phdr = &phdrs[segment];
|
||||
const struct firmware *seg_fw;
|
||||
char *seg_name;
|
||||
ssize_t ret;
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_hdr *ehdr;
|
||||
uint64_t seg_start, seg_end;
|
||||
int i;
|
||||
|
||||
if (strlen(fw_name) < 4)
|
||||
return -EINVAL;
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
|
||||
seg_name = kstrdup(fw_name, GFP_KERNEL);
|
||||
if (!seg_name)
|
||||
return -ENOMEM;
|
||||
|
||||
sprintf(seg_name + strlen(fw_name) - 3, "b%02d", segment);
|
||||
ret = request_firmware_into_buf(&seg_fw, seg_name, dev,
|
||||
ptr, phdr->p_filesz);
|
||||
if (ret) {
|
||||
dev_err(dev, "error %zd loading %s\n", ret, seg_name);
|
||||
kfree(seg_name);
|
||||
return ret;
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
seg_start = phdrs[i].p_offset;
|
||||
seg_end = phdrs[i].p_offset + phdrs[i].p_filesz;
|
||||
if (seg_start > fw->size || seg_end > fw->size)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (seg_fw->size != phdr->p_filesz) {
|
||||
dev_err(dev,
|
||||
"failed to load segment %d from truncated file %s\n",
|
||||
segment, seg_name);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
release_firmware(seg_fw);
|
||||
kfree(seg_name);
|
||||
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -106,8 +89,9 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
|
||||
|
||||
/**
|
||||
* qcom_mdt_read_metadata() - read header and metadata from mdt or mbn
|
||||
* @fw: firmware of mdt header or mbn
|
||||
* @data_len: length of the read metadata blob
|
||||
* @fw: firmware of mdt header or mbn
|
||||
* @data_len: length of the read metadata blob
|
||||
* @metadata_phys: phys address for the assigned metadata buffer
|
||||
*
|
||||
* The mechanism that performs the authentication of the loading firmware
|
||||
* expects an ELF header directly followed by the segment of hashes, with no
|
||||
@ -121,18 +105,19 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
|
||||
*
|
||||
* Return: pointer to data, or ERR_PTR()
|
||||
*/
|
||||
void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
|
||||
const char *fw_name, struct device *dev)
|
||||
void *qcom_mdt_read_metadata(struct device *dev, const struct firmware *fw, const char *firmware,
|
||||
size_t *data_len, bool dma_phys_below_32b, dma_addr_t *metadata_phys)
|
||||
{
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_hdr *ehdr;
|
||||
unsigned int hash_segment = 0;
|
||||
size_t hash_offset;
|
||||
const struct firmware *seg_fw;
|
||||
struct device *scm_dev = NULL;
|
||||
size_t hash_index;
|
||||
size_t hash_size;
|
||||
size_t ehdr_size;
|
||||
unsigned int i;
|
||||
ssize_t ret;
|
||||
char *fw_name;
|
||||
void *data;
|
||||
int ret;
|
||||
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
@ -143,131 +128,99 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
|
||||
if (phdrs[0].p_type == PT_LOAD)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
for (i = 1; i < ehdr->e_phnum; i++) {
|
||||
if ((phdrs[i].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) {
|
||||
hash_segment = i;
|
||||
for (hash_index = 1; hash_index < ehdr->e_phnum; hash_index++) {
|
||||
if (phdrs[hash_index].p_type != PT_LOAD &&
|
||||
(phdrs[hash_index].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hash_segment) {
|
||||
dev_err(dev, "no hash segment found in %s\n", fw_name);
|
||||
if (hash_index >= ehdr->e_phnum)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
ehdr_size = phdrs[0].p_filesz;
|
||||
hash_size = phdrs[hash_segment].p_filesz;
|
||||
hash_size = phdrs[hash_index].p_filesz;
|
||||
|
||||
/*
|
||||
* During the scm call memory protection will be enabled for the metadata
|
||||
* blob, so make sure it's physically contiguous, 4K aligned and
|
||||
* non-cachable to avoid XPU violations.
|
||||
*/
|
||||
if (metadata_phys) {
|
||||
if (!dma_phys_below_32b) {
|
||||
scm_dev = qcom_get_scm_device();
|
||||
if (!scm_dev)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
data = dma_alloc_coherent(scm_dev, ehdr_size + hash_size,
|
||||
metadata_phys, GFP_KERNEL);
|
||||
} else {
|
||||
data = dma_alloc_coherent(dev, ehdr_size + hash_size,
|
||||
metadata_phys, GFP_KERNEL);
|
||||
}
|
||||
} else {
|
||||
data = kmalloc(ehdr_size + hash_size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
data = kmalloc(ehdr_size + hash_size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Copy ELF header */
|
||||
/* copy elf header */
|
||||
memcpy(data, fw->data, ehdr_size);
|
||||
|
||||
if (ehdr_size + hash_size == fw->size) {
|
||||
/* Firmware is split and hash is packed following the ELF header */
|
||||
hash_offset = phdrs[0].p_filesz;
|
||||
memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
|
||||
} else if (phdrs[hash_segment].p_offset + hash_size <= fw->size) {
|
||||
/* Hash is in its own segment, but within the loaded file */
|
||||
hash_offset = phdrs[hash_segment].p_offset;
|
||||
memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
|
||||
} else {
|
||||
/* Hash is in its own segment, beyond the loaded file */
|
||||
ret = mdt_load_split_segment(data + ehdr_size, phdrs, hash_segment, fw_name, dev);
|
||||
if (ret) {
|
||||
kfree(data);
|
||||
return ERR_PTR(ret);
|
||||
if (qcom_mdt_bins_are_split(fw)) {
|
||||
fw_name = kstrdup(firmware, GFP_KERNEL);
|
||||
if (!fw_name) {
|
||||
ret = -ENOMEM;
|
||||
goto free_metadata;
|
||||
|
||||
}
|
||||
snprintf(fw_name + strlen(fw_name) - 3, 4, "b%02d", hash_index);
|
||||
|
||||
ret = request_firmware_into_buf(&seg_fw, fw_name, dev, data + ehdr_size, hash_size);
|
||||
kfree(fw_name);
|
||||
|
||||
if (ret)
|
||||
goto free_metadata;
|
||||
|
||||
release_firmware(seg_fw);
|
||||
} else {
|
||||
memcpy(data + ehdr_size, fw->data + phdrs[hash_index].p_offset, hash_size);
|
||||
}
|
||||
|
||||
*data_len = ehdr_size + hash_size;
|
||||
|
||||
return data;
|
||||
free_metadata:
|
||||
if (metadata_phys) {
|
||||
if (!dma_phys_below_32b)
|
||||
dma_free_coherent(scm_dev, ehdr_size + hash_size, data, *metadata_phys);
|
||||
else
|
||||
dma_free_coherent(dev, ehdr_size + hash_size, data, *metadata_phys);
|
||||
} else
|
||||
kfree(data);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata);
|
||||
|
||||
/**
|
||||
* qcom_mdt_pas_init() - initialize PAS region for firmware loading
|
||||
* @dev: device handle to associate resources with
|
||||
* @fw: firmware object for the mdt file
|
||||
* @firmware: name of the firmware, for construction of segment file names
|
||||
* @pas_id: PAS identifier
|
||||
* @mem_phys: physical address of allocated memory region
|
||||
* @ctx: PAS metadata context, to be released by caller
|
||||
*
|
||||
* Returns 0 on success, negative errno otherwise.
|
||||
*/
|
||||
int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id, phys_addr_t mem_phys,
|
||||
struct qcom_scm_pas_metadata *ctx)
|
||||
{
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_phdr *phdr;
|
||||
const struct elf32_hdr *ehdr;
|
||||
phys_addr_t min_addr = PHYS_ADDR_MAX;
|
||||
phys_addr_t max_addr = 0;
|
||||
size_t metadata_len;
|
||||
void *metadata;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
if (!mdt_phdr_valid(phdr))
|
||||
continue;
|
||||
|
||||
if (phdr->p_paddr < min_addr)
|
||||
min_addr = phdr->p_paddr;
|
||||
|
||||
if (phdr->p_paddr + phdr->p_memsz > max_addr)
|
||||
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
|
||||
}
|
||||
|
||||
metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev);
|
||||
if (IS_ERR(metadata)) {
|
||||
ret = PTR_ERR(metadata);
|
||||
dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx);
|
||||
kfree(metadata);
|
||||
if (ret) {
|
||||
/* Invalid firmware metadata */
|
||||
dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
|
||||
if (ret) {
|
||||
/* Unable to set up relocation */
|
||||
dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
|
||||
|
||||
static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id, void *mem_region,
|
||||
phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base, bool pas_init)
|
||||
static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *firmware,
|
||||
int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base, bool pas_init, bool dma_phys_below_32b,
|
||||
struct qcom_mdt_metadata *mdata)
|
||||
{
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_phdr *phdr;
|
||||
const struct elf32_hdr *ehdr;
|
||||
const struct firmware *seg_fw;
|
||||
phys_addr_t mem_reloc;
|
||||
phys_addr_t min_addr = PHYS_ADDR_MAX;
|
||||
phys_addr_t max_addr = 0;
|
||||
dma_addr_t metadata_phys = 0;
|
||||
struct device *scm_dev = NULL;
|
||||
size_t metadata_len = 0;
|
||||
size_t fw_name_len;
|
||||
ssize_t offset;
|
||||
void *metadata = NULL;
|
||||
char *fw_name;
|
||||
bool relocate = false;
|
||||
bool is_split;
|
||||
void *ptr;
|
||||
int ret = 0;
|
||||
int i;
|
||||
@ -275,9 +228,41 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
if (!fw || !mem_region || !mem_phys || !mem_size)
|
||||
return -EINVAL;
|
||||
|
||||
is_split = qcom_mdt_bins_are_split(fw);
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
|
||||
fw_name_len = strlen(firmware);
|
||||
if (fw_name_len <= 4)
|
||||
return -EINVAL;
|
||||
|
||||
fw_name = kstrdup(firmware, GFP_KERNEL);
|
||||
if (!fw_name)
|
||||
return -ENOMEM;
|
||||
|
||||
if (pas_init) {
|
||||
metadata = qcom_mdt_read_metadata(dev, fw, firmware, &metadata_len,
|
||||
dma_phys_below_32b, &metadata_phys);
|
||||
if (IS_ERR(metadata)) {
|
||||
ret = PTR_ERR(metadata);
|
||||
dev_err(dev, "error %d reading firmware %s metadata\n",
|
||||
ret, fw_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mdata) {
|
||||
mdata->buf = metadata;
|
||||
mdata->buf_phys = metadata_phys;
|
||||
mdata->size = metadata_len;
|
||||
}
|
||||
|
||||
ret = qcom_scm_pas_init_image(pas_id, metadata_phys);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid firmware metadata\n");
|
||||
goto deinit;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
@ -289,9 +274,21 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
|
||||
if (phdr->p_paddr < min_addr)
|
||||
min_addr = phdr->p_paddr;
|
||||
|
||||
if (phdr->p_paddr + phdr->p_memsz > max_addr)
|
||||
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
|
||||
}
|
||||
|
||||
if (relocate) {
|
||||
if (pas_init) {
|
||||
ret = qcom_scm_pas_mem_setup(pas_id, mem_phys,
|
||||
max_addr - min_addr);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to setup relocation\n");
|
||||
goto deinit;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The image is relocatable, so offset each segment based on
|
||||
* the lowest segment address.
|
||||
@ -328,22 +325,31 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
|
||||
ptr = mem_region + offset;
|
||||
|
||||
if (phdr->p_filesz && phdr->p_offset < fw->size &&
|
||||
phdr->p_offset + phdr->p_filesz <= fw->size) {
|
||||
/* Firmware is large enough to be non-split */
|
||||
if (phdr->p_offset + phdr->p_filesz > fw->size) {
|
||||
dev_err(dev, "file %s segment %d would be truncated\n",
|
||||
fw_name, i);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (phdr->p_filesz) {
|
||||
if (!is_split) {
|
||||
/* Firmware is large enough to be non-split */
|
||||
memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
|
||||
} else {
|
||||
/* Firmware not large enough, load split-out segments */
|
||||
snprintf(fw_name + fw_name_len - 3, 4, "b%02d", i);
|
||||
ret = request_firmware_into_buf(&seg_fw, fw_name, dev,
|
||||
ptr, phdr->p_filesz);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to load %s\n", fw_name);
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
|
||||
} else if (phdr->p_filesz) {
|
||||
/* Firmware not large enough, load split-out segments */
|
||||
ret = mdt_load_split_segment(ptr, phdrs, i, fw_name, dev);
|
||||
if (ret)
|
||||
break;
|
||||
if (seg_fw->size != phdr->p_filesz) {
|
||||
dev_err(dev,
|
||||
"failed to load segment %d from truncated file %s\n",
|
||||
i, fw_name);
|
||||
release_firmware(seg_fw);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
release_firmware(seg_fw);
|
||||
}
|
||||
}
|
||||
|
||||
if (phdr->p_memsz > phdr->p_filesz)
|
||||
@ -352,7 +358,24 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
|
||||
if (reloc_base)
|
||||
*reloc_base = mem_reloc;
|
||||
deinit:
|
||||
if (ret)
|
||||
qcom_scm_pas_shutdown(pas_id);
|
||||
|
||||
if (!mdata && pas_init) {
|
||||
if (dma_phys_below_32b) {
|
||||
dma_free_coherent(dev, metadata_len, metadata, metadata_phys);
|
||||
} else {
|
||||
scm_dev = qcom_get_scm_device();
|
||||
if (!scm_dev)
|
||||
goto out;
|
||||
|
||||
dma_free_coherent(scm_dev, metadata_len, metadata, metadata_phys);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(fw_name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -374,14 +397,8 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
|
||||
mem_size, reloc_base, true);
|
||||
mem_size, reloc_base, true, false, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_mdt_load);
|
||||
|
||||
@ -404,9 +421,68 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
|
||||
size_t mem_size, phys_addr_t *reloc_base)
|
||||
{
|
||||
return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
|
||||
mem_size, reloc_base, false);
|
||||
mem_size, reloc_base, false, false, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
|
||||
|
||||
/**
|
||||
* qcom_mdt_load_no_free() - load the firmware which header is loaded as fw
|
||||
* @dev: device handle to associate resources with
|
||||
* @fw: firmware object for the mdt file
|
||||
* @firmware: name of the firmware, for construction of segment file names
|
||||
* @pas_id: PAS identifier
|
||||
* @mem_region: allocated memory region to load firmware into
|
||||
* @mem_phys: physical address of allocated memory region
|
||||
* @mem_size: size of the allocated memory region
|
||||
* @reloc_base: adjusted physical address after relocation
|
||||
*
|
||||
* This function is essentially the same as qcom_mdt_load. The only difference
|
||||
* between the two is that the metadata is not freed at the end of this call.
|
||||
* The client must call qcom_mdt_free_metadata for cleanup.
|
||||
*
|
||||
* Returns 0 on success, negative errno otherwise.
|
||||
*/
|
||||
int qcom_mdt_load_no_free(struct device *dev, const struct firmware *fw, const char *firmware,
|
||||
int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base, bool dma_phys_below_32b,
|
||||
struct qcom_mdt_metadata *metadata)
|
||||
{
|
||||
return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
|
||||
mem_size, reloc_base, true, dma_phys_below_32b, metadata);
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_mdt_load_no_free);
|
||||
|
||||
/**
|
||||
* qcom_mdt_free_metadata() - free the firmware metadata
|
||||
* @dev: device handle to associate resources with
|
||||
* @pas_id: PAS identifier
|
||||
* @mdata: reference to metadata region to be freed
|
||||
* @err: whether this call was made after an error occurred
|
||||
*
|
||||
* Free the metadata that was allocated by mdt loader.
|
||||
*
|
||||
*/
|
||||
void qcom_mdt_free_metadata(struct device *dev, int pas_id, struct qcom_mdt_metadata *mdata,
|
||||
bool dma_phys_below_32b, int err)
|
||||
{
|
||||
struct device *scm_dev;
|
||||
|
||||
if (err && qcom_scm_pas_shutdown_retry(pas_id))
|
||||
panic("Panicking, failed to shutdown peripheral %d\n", pas_id);
|
||||
if (mdata) {
|
||||
if (!dma_phys_below_32b) {
|
||||
scm_dev = qcom_get_scm_device();
|
||||
if (!scm_dev) {
|
||||
pr_err("%s: scm_dev has not been created!\n", __func__);
|
||||
return;
|
||||
}
|
||||
dma_free_coherent(scm_dev, mdata->size, mdata->buf, mdata->buf_phys);
|
||||
} else {
|
||||
dma_free_coherent(dev, mdata->size, mdata->buf, mdata->buf_phys);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qcom_mdt_free_metadata);
|
||||
|
||||
MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -1,3 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __QCOM_RPROC_H__
|
||||
#define __QCOM_RPROC_H__
|
||||
|
||||
@ -27,6 +32,8 @@ struct qcom_ssr_notify_data {
|
||||
#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
|
||||
|
||||
void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
|
||||
void *qcom_register_early_ssr_notifier(const char *name, struct notifier_block *nb);
|
||||
int qcom_unregister_early_ssr_notifier(void *notify, struct notifier_block *nb);
|
||||
int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
|
||||
|
||||
#else
|
||||
@ -37,6 +44,17 @@ static inline void *qcom_register_ssr_notifier(const char *name,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *qcom_register_early_ssr_notifier(const char *name, struct notifier_block *nb)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int qcom_unregister_early_ssr_notifier(void *notify,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int qcom_unregister_ssr_notifier(void *notify,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
|
@ -1,4 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef __QCOM_MDT_LOADER_H__
|
||||
#define __QCOM_MDT_LOADER_H__
|
||||
|
||||
@ -10,14 +13,16 @@
|
||||
|
||||
struct device;
|
||||
struct firmware;
|
||||
struct qcom_scm_pas_metadata;
|
||||
|
||||
struct qcom_mdt_metadata {
|
||||
void *buf;
|
||||
dma_addr_t buf_phys;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
|
||||
|
||||
ssize_t qcom_mdt_get_size(const struct firmware *fw);
|
||||
int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id, phys_addr_t mem_phys,
|
||||
struct qcom_scm_pas_metadata *pas_metadata_ctx);
|
||||
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id, void *mem_region,
|
||||
phys_addr_t mem_phys, size_t mem_size,
|
||||
@ -27,8 +32,15 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id, void *mem_region,
|
||||
phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base);
|
||||
void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
|
||||
const char *fw_name, struct device *dev);
|
||||
void *qcom_mdt_read_metadata(struct device *dev, const struct firmware *fw,
|
||||
const char *firmware, size_t *data_len, bool dma_phys_below_32b,
|
||||
dma_addr_t *metadata_phys);
|
||||
int qcom_mdt_load_no_free(struct device *dev, const struct firmware *fw, const char *firmware,
|
||||
int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base, bool dma_phys_below_32b,
|
||||
struct qcom_mdt_metadata *metadata);
|
||||
void qcom_mdt_free_metadata(struct device *dev, int pas_id, struct qcom_mdt_metadata *mdata,
|
||||
bool dma_phys_below_32b, int err);
|
||||
|
||||
#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
|
||||
|
||||
@ -37,13 +49,6 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id, phys_addr_t mem_phys,
|
||||
struct qcom_scm_pas_metadata *pas_metadata_ctx)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
|
||||
const char *fw_name, int pas_id,
|
||||
void *mem_region, phys_addr_t mem_phys,
|
||||
@ -62,11 +67,23 @@ static inline int qcom_mdt_load_no_init(struct device *dev,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void *qcom_mdt_read_metadata(const struct firmware *fw,
|
||||
size_t *data_len, const char *fw_name,
|
||||
struct device *dev)
|
||||
void *qcom_mdt_read_metadata(struct device *dev, const struct firmware *fw,
|
||||
const char *firmware, size_t *data_len, bool dma_phys_below_32b,
|
||||
dma_addr_t *metadata_phys)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
return NULL;
|
||||
}
|
||||
int qcom_mdt_load_no_free(struct device *dev, const struct firmware *fw, const char *firmware,
|
||||
int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size,
|
||||
phys_addr_t *reloc_base, bool dma_phys_below_32b,
|
||||
struct qcom_mdt_metadata *metadata)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
void qcom_mdt_free_metadata(struct device *dev, int pas_id, struct qcom_mdt_metadata *mdata,
|
||||
bool dma_phys_below_32b, int err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
|
||||
|
37
include/trace/events/rproc_qcom.h
Normal file
37
include/trace/events/rproc_qcom.h
Normal file
@ -0,0 +1,37 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM rproc_qcom
|
||||
|
||||
#if !defined(_TRACE_RPROC_QCOM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_RPROC_QCOM_H
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(rproc_qcom_event,
|
||||
|
||||
TP_PROTO(const char *name, const char *event, const char *subevent),
|
||||
|
||||
TP_ARGS(name, event, subevent),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, name)
|
||||
__string(event, event)
|
||||
__string(subevent, subevent)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, name);
|
||||
__assign_str(event, event);
|
||||
__assign_str(subevent, subevent);
|
||||
),
|
||||
|
||||
TP_printk("%s: %s: %s", __get_str(name), __get_str(event), __get_str(subevent))
|
||||
);
|
||||
#endif /* _TRACE_RPROC_QCOM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
Loading…
Reference in New Issue
Block a user