remoteproc: esoc: Add drivers to support external socs with APQ

Add drivers to support external soc on Waipio. These drivers
enable a gpio based interface between APQ and external soc.
They also provide a userspace interface so that the external
soc can be controlled from userspace. Moving external soc
drivers to remoteproc folder as in the coming patches
support for PIL/SSR will be replaced by remoteproc.

Change-Id: Ie53de42e0a501da372d0d9f7d38e2cb9c7f59f69
Signed-off-by: Rishabh Bhatnagar <rishabhb@codeaurora.org>
This commit is contained in:
Rishabh Bhatnagar 2020-11-19 18:58:51 -08:00 committed by Siddharth Gupta
parent 1a9572f435
commit 2ed7c32419
14 changed files with 4025 additions and 0 deletions

View File

@ -205,6 +205,72 @@ config QCOM_SPSS
remote processor. This also supports remote processors that
are booted before kernel comes up.
config QCOM_ESOC
tristate "External SOCs Control"
help
External SOCs can be powered on and monitored by user
space or kernel drivers. Additionally they can be controlled
to respond to control commands. This framework provides an
interface to track events related to the external slave socs.
config QCOM_ESOC_DEV
tristate "ESOC userspace interface"
help
Say yes here to enable a userspace representation of the control
link. Userspace can register a request engine or a command engine
for the external soc. It can receive event notifications from the
control link.
config QCOM_ESOC_CLIENT
tristate "ESOC client interface"
depends on OF
help
Say yes here to enable client interface for external socs.
Clients can specify the external soc that they are interested in
by using device tree phandles. Based on this, clients can register
for notifications from a specific soc.
config QCOM_ESOC_DEBUG
tristate "ESOC debug support"
help
Say yes here to enable debugging support in the ESOC framework
and individual esoc drivers. The config basically adds extra
logging information such that, in the event of a bug, this
logging information could be helpful to trace it.
config QCOM_ESOC_MDM_4X
tristate "Add support for external modems"
help
In some Qualcomm Technologies, Inc. boards, an external modem such
as mdm9x55 or sdx50m is connected to a primary msm. The primary soc
can control/monitor the modem via gpios. The data communication
with such modems can occur over PCIE or HSIC.
config QCOM_ESOC_MDM_DRV
tristate "Command engine for 4x series external modems"
help
Provides a command engine to control the behavior of an external
modems (such as mdm9x55 or sdx50m). That is, it extends the SSR
framework to power-off, power-on or handle crash scenarios. It
also listens for events on the external modem.
config QCOM_ESOC_DBG_ENG
tristate "debug engine for 4x series external modems"
depends on QCOM_ESOC_MDM_DRV
help
Mainly used as a debug interface to probe the modem against various
scenarios. It basically provides a user interface to mask out certain
commands sent by command engine to the external modem. It also allows
masking of certain notifications being sent to the external modem.
config QCOM_ESOC_DBG_REQ_ENG
bool "manual request engine for 4x series external modems"
depends on QCOM_ESOC_DBG_ENG
help
Provides a user interface to handle incoming requests from
the external modem. Allows for debugging of IPC mechanism
between the external modem and the primary soc.
config QCOM_Q6V5_WCSS
tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
depends on OF && ARCH_QCOM

View File

@ -25,6 +25,14 @@ obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_SPSS) += qcom_spss.o
obj-$(CONFIG_QCOM_ESOC) += qcom_esoc.o
qcom_esoc-y += esoc_bus.o
qcom_esoc-$(CONFIG_QCOM_ESOC_DEV) += esoc_dev.o
qcom_esoc-$(CONFIG_QCOM_ESOC_CLIENT) += esoc_client.o
qcom_esoc-$(CONFIG_QCOM_ESOC_MDM_4X) += esoc-mdm-pon.o esoc-mdm-4x.o
qcom_esoc-$(CONFIG_QCOM_ESOC_MDM_DRV) += esoc-mdm-drv.o
qcom_esoc-$(CONFIG_QCOM_ESOC_DBG_ENG) += esoc-mdm-dbg-eng.o
ccflags-$(CONFIG_ESOC_DEBUG) := -DDEBUG
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,354 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, 2017-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/atomic.h>
#include <linux/device.h>
#include "esoc.h"
/*
* cmd_mask : Specifies if a command/notifier is masked, and
* whats the trigger value for mask to take effect.
* @mask_trigger: trigger value for mask.
* @mask: boolean to determine if command should be masked.
*/
struct esoc_mask {
atomic_t mask_trigger;
bool mask;
};
/*
* manual_to_esoc_cmd: Converts a user provided command
* to a corresponding esoc command.
* @cmd: ESOC command
* @manual_cmd: user specified command string.
*/
struct manual_to_esoc_cmd {
unsigned int cmd;
char manual_cmd[20];
};
/*
* manual_to_esoc_notify: Converts a user provided notification
* to corresponding esoc notification for Primary SOC.
* @notfication: ESOC notification.
* @manual_notifier: user specified notification string.
*/
struct manual_to_esoc_notify {
unsigned int notify;
char manual_notify[20];
};
static const struct manual_to_esoc_cmd cmd_map[] = {
{
.cmd = ESOC_PWR_ON,
.manual_cmd = "PON",
},
{
.cmd = ESOC_PREPARE_DEBUG,
.manual_cmd = "ENTER_DLOAD",
},
{ .cmd = ESOC_PWR_OFF,
.manual_cmd = "POFF",
},
{
.cmd = ESOC_FORCE_PWR_OFF,
.manual_cmd = "FORCE_POFF",
},
};
static struct esoc_mask cmd_mask[] = {
[ESOC_PWR_ON] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(1),
},
[ESOC_PREPARE_DEBUG] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
[ESOC_PWR_OFF] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
[ESOC_FORCE_PWR_OFF] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
};
static const struct manual_to_esoc_notify notify_map[] = {
{
.notify = ESOC_PRIMARY_REBOOT,
.manual_notify = "REBOOT",
},
{
.notify = ESOC_PRIMARY_CRASH,
.manual_notify = "PANIC",
},
};
static struct esoc_mask notify_mask[] = {
[ESOC_PRIMARY_REBOOT] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
[ESOC_PRIMARY_CRASH] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
};
bool dbg_check_cmd_mask(unsigned int cmd)
{
pr_debug("command to mask %d\n", cmd);
if (cmd_mask[cmd].mask)
return atomic_add_negative(-1, &cmd_mask[cmd].mask_trigger);
else
return false;
}
EXPORT_SYMBOL(dbg_check_cmd_mask);
bool dbg_check_notify_mask(unsigned int notify)
{
pr_debug("notifier to mask %d\n", notify);
if (notify_mask[notify].mask)
return atomic_add_negative(-1, &notify_mask[notify].mask_trigger);
else
return false;
}
EXPORT_SYMBOL(dbg_check_notify_mask);
/*
* Create driver attributes that let you mask
* specific commands.
*/
static ssize_t command_mask_store(struct device_driver *drv, const char *buf, size_t count)
{
unsigned int cmd, i;
pr_debug("user input command %s\n", buf);
for (i = 0; i < ARRAY_SIZE(cmd_map); i++) {
if (!strcmp(cmd_map[i].manual_cmd, buf)) {
/*
* Map manual command string to ESOC command
* set mask for ESOC command
*/
cmd = cmd_map[i].cmd;
cmd_mask[cmd].mask = true;
pr_debug("Setting mask for manual command %s\n", buf);
break;
}
}
if (i >= ARRAY_SIZE(cmd_map))
pr_err("invalid command specified\n");
return count;
}
static DRIVER_ATTR_WO(command_mask);
static ssize_t notifier_mask_store(struct device_driver *drv, const char *buf, size_t count)
{
unsigned int notify, i;
pr_debug("user input notifier %s\n", buf);
for (i = 0; i < ARRAY_SIZE(notify_map); i++) {
if (!strcmp(buf, notify_map[i].manual_notify)) {
/*
* Map manual notifier string to primary soc
* notifier. Also set mask for the notifier.
*/
notify = notify_map[i].notify;
notify_mask[notify].mask = true;
pr_debug("Setting mask for manual notification %s\n", buf);
break;
}
}
if (i >= ARRAY_SIZE(notify_map))
pr_err("invalid notifier specified\n");
return count;
}
static DRIVER_ATTR_WO(notifier_mask);
#ifdef CONFIG_QCOM_ESOC_DBG_REQ_ENG
static struct esoc_clink *dbg_clink;
/* Last recorded request from esoc */
static enum esoc_req last_req;
static DEFINE_SPINLOCK(req_lock);
/*
* esoc_to_user: Conversion of esoc ids to user visible strings
* id: esoc request, command, notifier, event id
* str: string equivalent of the above
*/
struct esoc_to_user {
unsigned int id;
char str[20];
};
static struct esoc_to_user in_to_resp[] = {
{
.id = ESOC_IMG_XFER_DONE,
.str = "XFER_DONE",
},
{
.id = ESOC_BOOT_DONE,
.str = "BOOT_DONE",
},
{
.id = ESOC_BOOT_FAIL,
.str = "BOOT_FAIL",
},
{
.id = ESOC_IMG_XFER_RETRY,
.str = "XFER_RETRY",
},
{ .id = ESOC_IMG_XFER_FAIL,
.str = "XFER_FAIL",
},
{
.id = ESOC_UPGRADE_AVAILABLE,
.str = "UPGRADE",
},
{ .id = ESOC_DEBUG_DONE,
.str = "DEBUG_DONE",
},
{
.id = ESOC_DEBUG_FAIL,
.str = "DEBUG_FAIL",
},
};
static struct esoc_to_user req_to_str[] = {
{
.id = ESOC_REQ_IMG,
.str = "REQ_IMG",
},
{
.id = ESOC_REQ_DEBUG,
.str = "REQ_DEBUG",
},
{
.id = ESOC_REQ_SHUTDOWN,
.str = "REQ_SHUTDOWN",
},
};
static ssize_t req_eng_resp_store(struct device_driver *drv, const char *buf,
size_t count)
{
unsigned int i;
const struct esoc_clink_ops *const clink_ops = dbg_clink->clink_ops;
dev_dbg(&dbg_clink->dev, "user input req eng response %s\n", buf);
for (i = 0; i < ARRAY_SIZE(in_to_resp); i++) {
size_t len1 = strlen(buf);
size_t len2 = strlen(in_to_resp[i].str);
if (len1 == len2 && !strcmp(buf, in_to_resp[i].str)) {
clink_ops->notify(in_to_resp[i].id, dbg_clink);
break;
}
}
if (i > ARRAY_SIZE(in_to_resp))
dev_err(&dbg_clink->dev, "Invalid resp %s, specified\n", buf);
return count;
}
static DRIVER_ATTR_WO(req_eng_resp);
static ssize_t last_esoc_req_show(struct device_driver *drv, char *buf)
{
unsigned int i;
unsigned long flags;
size_t count = 0;
spin_lock_irqsave(&req_lock, flags);
for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
if (last_req == req_to_str[i].id) {
count = scnprintf(buf, PAGE_SIZE, "%s\n", req_to_str[i].str);
break;
}
}
spin_unlock_irqrestore(&req_lock, flags);
return count;
}
static DRIVER_ATTR_RO(last_esoc_req);
static void esoc_handle_req(enum esoc_req req, struct esoc_eng *eng)
{
unsigned long flags;
spin_lock_irqsave(&req_lock, flags);
last_req = req;
spin_unlock_irqrestore(&req_lock, flags);
}
static void esoc_handle_evt(enum esoc_evt evt, struct esoc_eng *eng)
{
}
static struct esoc_eng dbg_req_eng = {
.handle_clink_req = esoc_handle_req,
.handle_clink_evt = esoc_handle_evt,
};
int register_dbg_req_eng(struct esoc_clink *clink, struct device_driver *drv)
{
int ret;
dbg_clink = clink;
ret = driver_create_file(drv, &driver_attr_req_eng_resp);
if (ret)
return ret;
ret = driver_create_file(drv, &driver_attr_last_esoc_req);
if (ret) {
dev_err(&clink->dev, "Unable to create last esoc req\n");
goto last_req_err;
}
ret = esoc_clink_register_req_eng(clink, &dbg_req_eng);
if (ret) {
pr_err("Unable to register req eng\n");
goto req_eng_fail;
}
spin_lock_init(&req_lock);
return 0;
last_req_err:
driver_remove_file(drv, &driver_attr_last_esoc_req);
req_eng_fail:
driver_remove_file(drv, &driver_attr_req_eng_resp);
return ret;
}
#else
int register_dbg_req_eng(struct esoc_clink *clink, struct device_driver *d)
{
return 0;
}
#endif
int mdm_dbg_eng_init(struct esoc_drv *esoc_drv, struct esoc_clink *clink)
{
int ret;
struct device_driver *drv = &esoc_drv->driver;
ret = driver_create_file(drv, &driver_attr_command_mask);
if (ret) {
pr_err("Unable to create command mask file\n");
goto cmd_mask_err;
}
ret = driver_create_file(drv, &driver_attr_notifier_mask);
if (ret) {
pr_err("Unable to create notify mask file\n");
goto notify_mask_err;
}
ret = register_dbg_req_eng(clink, drv);
if (ret) {
pr_err("Failed to register esoc dbg req eng\n");
goto dbg_req_fail;
}
return 0;
dbg_req_fail:
driver_remove_file(drv, &driver_attr_notifier_mask);
notify_mask_err:
driver_remove_file(drv, &driver_attr_command_mask);
cmd_mask_err:
return ret;
}
EXPORT_SYMBOL(mdm_dbg_eng_init);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,600 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2015, 2017-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/of.h>
#include <linux/esoc_client.h>
#include "esoc-mdm.h"
#include "mdm-dbg.h"
/* Default number of powerup trial requests per session */
#define ESOC_DEF_PON_REQ 3
#define ESOC_MAX_PON_TRIES 5
#define BOOT_FAIL_ACTION_DEF BOOT_FAIL_ACTION_PANIC
#define S3_RESET_DELAY_MS 1000
enum esoc_pon_state {
PON_INIT,
PON_SUCCESS,
PON_RETRY,
PON_FAIL
};
enum {
PWR_OFF = 0x1,
SHUTDOWN,
RESET,
PEER_CRASH,
IN_DEBUG,
CRASH,
PWR_ON,
BOOT,
RUN,
};
struct mdm_drv {
unsigned int mode;
struct esoc_eng cmd_eng;
struct completion pon_done;
struct completion ssr_ready;
struct completion req_eng_wait;
struct esoc_clink *esoc_clink;
enum esoc_pon_state pon_state;
struct workqueue_struct *mdm_queue;
struct work_struct ssr_work;
struct notifier_block esoc_restart;
struct mutex poff_lock;
atomic_t boot_fail_action;
atomic_t n_pon_tries;
};
#define to_mdm_drv(d) container_of(d, struct mdm_drv, cmd_eng)
static void esoc_client_link_power_off(struct esoc_clink *esoc_clink, unsigned int flags);
static void esoc_client_link_mdm_crash(struct esoc_clink *esoc_clink);
int esoc_set_boot_fail_action(struct esoc_clink *esoc_clink, u32 action)
{
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
if (action >= BOOT_FAIL_ACTION_LAST) {
esoc_mdm_log("Unknown boot fail action requested: %u\n", action);
return -EINVAL;
}
if (!mdm_drv) {
esoc_mdm_log("esoc-mdm driver not present\n");
return -EAGAIN;
}
atomic_set(&mdm_drv->boot_fail_action, action);
esoc_mdm_log("Boot fail action configured to %u\n", action);
return 0;
}
int esoc_set_n_pon_tries(struct esoc_clink *esoc_clink, u32 n_tries)
{
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
if (n_tries > ESOC_MAX_PON_TRIES) {
esoc_mdm_log("Num PON tries requested (%u) is over the limit: %u\n", n_tries,
ESOC_MAX_PON_TRIES);
}
if (!mdm_drv) {
esoc_mdm_log("esoc-mdm driver not present\n");
return -EAGAIN;
}
atomic_set(&mdm_drv->n_pon_tries, n_tries);
esoc_mdm_log("Num PON tries configured to %u\n", n_tries);
return 0;
}
static int esoc_msm_restart_handler(struct notifier_block *nb, unsigned long action, void *data)
{
struct mdm_drv *mdm_drv = container_of(nb, struct mdm_drv, esoc_restart);
struct esoc_clink *esoc_clink = mdm_drv->esoc_clink;
const struct esoc_clink_ops *const clink_ops = esoc_clink->clink_ops;
if (action == SYS_RESTART) {
if (mdm_dbg_stall_notify(ESOC_PRIMARY_REBOOT))
return NOTIFY_OK;
mutex_lock(&mdm_drv->poff_lock);
if (mdm_drv->mode == PWR_OFF) {
esoc_mdm_log("Reboot notifier: mdm already powered-off\n");
mutex_unlock(&mdm_drv->poff_lock);
return NOTIFY_OK;
}
esoc_client_link_power_off(esoc_clink, ESOC_HOOK_MDM_DOWN);
esoc_mdm_log("Reboot notifier: Notifying esoc of cold reboot\n");
dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n");
clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink);
mdm_drv->mode = PWR_OFF;
mutex_unlock(&mdm_drv->poff_lock);
}
return NOTIFY_OK;
}
static void mdm_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng)
{
struct mdm_drv *mdm_drv = to_mdm_drv(eng);
bool unexpected_state = false;
switch (evt) {
case ESOC_INVALID_STATE:
esoc_mdm_log("ESOC_INVALID_STATE: Calling complete with state: PON_FAIL\n");
mdm_drv->pon_state = PON_FAIL;
complete(&mdm_drv->pon_done);
complete(&mdm_drv->ssr_ready);
break;
case ESOC_BOOT_STATE:
if (mdm_drv->mode == PWR_OFF) {
esoc_mdm_log("ESOC_BOOT_STATE: Observed status high from modem.\n");
mdm_drv->mode = BOOT;
}
break;
case ESOC_RUN_STATE:
esoc_mdm_log("ESOC_RUN_STATE: Calling complete with state: PON_SUCCESS\n");
mdm_drv->pon_state = PON_SUCCESS;
mdm_drv->mode = RUN;
complete(&mdm_drv->pon_done);
complete(&mdm_drv->ssr_ready);
break;
case ESOC_RETRY_PON_EVT:
esoc_mdm_log("ESOC_RETRY_PON_EVT: Calling complete with state: PON_RETRY\n");
mdm_drv->pon_state = PON_RETRY;
complete(&mdm_drv->pon_done);
complete(&mdm_drv->ssr_ready);
break;
case ESOC_UNEXPECTED_RESET:
esoc_mdm_log("evt_state: ESOC_UNEXPECTED_RESET\n");
unexpected_state = true;
case ESOC_ERR_FATAL:
if (!unexpected_state)
esoc_mdm_log("evt_state: ESOC_ERR_FATAL\n");
/*
* Ignore all modem errfatals if the status is not up
* or modem in run state.
*/
if (mdm_drv->mode <= CRASH) {
esoc_mdm_log("Modem in crash state or not booted. Ignoring.\n");
return;
}
esoc_mdm_log("Setting crash flag\n");
mdm_drv->mode = CRASH;
queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
break;
case ESOC_REQ_ENG_ON:
esoc_mdm_log("evt_state: ESOC_REQ_ENG_ON; Registered a req engine\n");
complete(&mdm_drv->req_eng_wait);
break;
default:
break;
}
}
static void mdm_ssr_fn(struct work_struct *work)
{
struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
/* Wait for pon to complete. Start SSR only if pon is success */
wait_for_completion(&mdm_drv->ssr_ready);
if (mdm_drv->pon_state != PON_SUCCESS) {
esoc_mdm_log("Got errfatal but ignoring as boot failed\n");
return;
}
esoc_client_link_mdm_crash(mdm_drv->esoc_clink);
mdm_wait_for_status_low(mdm, false);
esoc_mdm_log("Starting SSR work\n");
/*
* If restarting esoc fails, the SSR framework triggers a kernel panic
*/
esoc_clink_request_ssr(mdm_drv->esoc_clink);
}
static void esoc_client_link_power_on(struct esoc_clink *esoc_clink, unsigned int flags)
{
int i;
struct esoc_client_hook *client_hook;
dev_dbg(&esoc_clink->dev, "Calling power_on hooks\n");
esoc_mdm_log("Calling power_on hooks with flags: 0x%x\n", flags);
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
client_hook = esoc_clink->client_hook[i];
if (client_hook && client_hook->esoc_link_power_on)
client_hook->esoc_link_power_on(client_hook->priv, flags);
}
}
static void esoc_client_link_power_off(struct esoc_clink *esoc_clink, unsigned int flags)
{
int i;
struct esoc_client_hook *client_hook;
dev_dbg(&esoc_clink->dev, "Calling power_off hooks\n");
esoc_mdm_log("Calling power_off hooks with flags: 0x%x\n", flags);
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
client_hook = esoc_clink->client_hook[i];
if (client_hook && client_hook->esoc_link_power_off)
client_hook->esoc_link_power_off(client_hook->priv, flags);
}
}
static void esoc_client_link_mdm_crash(struct esoc_clink *esoc_clink)
{
int i;
struct esoc_client_hook *client_hook;
dev_dbg(&esoc_clink->dev, "Calling mdm_crash hooks\n");
esoc_mdm_log("Calling mdm_crash hooks\n");
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
client_hook = esoc_clink->client_hook[i];
if (client_hook && client_hook->esoc_link_mdm_crash)
client_hook->esoc_link_mdm_crash(client_hook->priv);
}
}
static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
{
struct esoc_clink *esoc_clink = container_of(mdm_subsys, struct esoc_clink, subsys);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
esoc_mdm_log("MDM crashed notification from SSR\n");
if (mdm_dbg_stall_notify(ESOC_PRIMARY_CRASH))
return;
clink_ops->notify(ESOC_PRIMARY_CRASH, esoc_clink);
}
static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
bool force_stop)
{
int ret = 0;
struct esoc_clink *esoc_clink = container_of(crashed_subsys, struct esoc_clink, subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
esoc_mdm_log("Shutdown request from SSR\n");
mutex_lock(&mdm_drv->poff_lock);
if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
esoc_mdm_log("Shutdown in crash mode\n");
mdm_wait_for_status_low(mdm, false);
if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG)) {
/* We want to mask debug command.
* In this case return success
* to move to next stage
*/
goto unlock;
}
esoc_clink_queue_request(ESOC_REQ_CRASH_SHUTDOWN, esoc_clink);
esoc_client_link_power_off(esoc_clink, ESOC_HOOK_MDM_CRASH);
esoc_mdm_log("Executing the ESOC_PREPARE_DEBUG command\n");
ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG, esoc_clink);
if (ret) {
esoc_mdm_log("ESOC_PREPARE_DEBUG command failed\n");
dev_err(&esoc_clink->dev, "failed to enter debug\n");
goto unlock;
}
mdm_drv->mode = IN_DEBUG;
} else {
esoc_mdm_log("Graceful shutdown mode\n");
if (mdm_drv->mode == PWR_OFF) {
esoc_mdm_log("mdm already powered-off\n");
goto unlock;
}
if (esoc_clink->subsys.sysmon_shutdown_ret) {
esoc_mdm_log(
"Executing the ESOC_FORCE_PWR_OFF command\n");
ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF,
esoc_clink);
} else {
if (mdm_dbg_stall_cmd(ESOC_PWR_OFF)) {
/* Since power off command is masked
* we return success, and leave the state
* of the command engine as is.
*/
goto unlock;
}
dev_dbg(&esoc_clink->dev, "Sending sysmon-shutdown\n");
esoc_mdm_log("Executing the ESOC_PWR_OFF command\n");
ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink);
}
if (ret) {
esoc_mdm_log(
"Executing the ESOC_PWR_OFF command failed\n");
dev_err(&esoc_clink->dev, "failed to exe power off\n");
goto unlock;
}
esoc_client_link_power_off(esoc_clink, ESOC_HOOK_MDM_DOWN);
/* Pull the reset line low to turn off the device */
clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF, esoc_clink);
mdm_drv->mode = PWR_OFF;
}
esoc_mdm_log("Shutdown completed\n");
unlock:
mutex_unlock(&mdm_drv->poff_lock);
return ret;
}
static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink, unsigned int poff_flags)
{
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
esoc_mdm_log("Doing cleanup\n");
esoc_client_link_power_off(esoc_clink, poff_flags);
mdm_disable_irqs(mdm);
mdm_drv->pon_state = PON_INIT;
mdm_drv->mode = PWR_OFF;
reinit_completion(&mdm_drv->pon_done);
reinit_completion(&mdm_drv->ssr_ready);
reinit_completion(&mdm_drv->req_eng_wait);
}
/* Returns 0 to proceed towards another retry, or an error code to quit */
static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
{
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
if (*pon_trial == atomic_read(&mdm_drv->n_pon_tries)) {
esoc_mdm_log("Reached max. number of boot trials\n");
atomic_set(&mdm_drv->boot_fail_action, BOOT_FAIL_ACTION_PANIC);
}
switch (atomic_read(&mdm_drv->boot_fail_action)) {
case BOOT_FAIL_ACTION_RETRY:
mdm_subsys_retry_powerup_cleanup(esoc_clink, 0);
esoc_mdm_log("Request to retry a warm reset\n");
(*pon_trial)++;
break;
/*
* Issue a shutdown here and rerun the powerup again.
* This way it becomes a cold reset. Else, we end up
* issuing a cold reset & a warm reset back to back.
*/
case BOOT_FAIL_ACTION_COLD_RESET:
mdm_subsys_retry_powerup_cleanup(esoc_clink, ESOC_HOOK_MDM_DOWN);
esoc_mdm_log("Doing cold reset by power-down and warm reset\n");
(*pon_trial)++;
mdm_power_down(mdm);
break;
case BOOT_FAIL_ACTION_S3_RESET:
mdm_subsys_retry_powerup_cleanup(esoc_clink, ESOC_HOOK_MDM_DOWN);
esoc_mdm_log("Doing an S3 reset\n");
(*pon_trial)++;
mdm_power_down(mdm);
msleep(S3_RESET_DELAY_MS);
break;
case BOOT_FAIL_ACTION_PANIC:
esoc_mdm_log("Calling panic!!\n");
panic("Panic requested on external modem boot failure\n");
break;
case BOOT_FAIL_ACTION_NOP:
esoc_mdm_log("Leaving the modem in its curent state\n");
mdm_drv->mode = PWR_OFF;
return -EIO;
case BOOT_FAIL_ACTION_SHUTDOWN:
default:
mdm_subsys_retry_powerup_cleanup(esoc_clink, ESOC_HOOK_MDM_DOWN);
esoc_mdm_log("Shutdown the modem and quit\n");
mdm_power_down(mdm);
return -EIO;
}
return 0;
}
static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
int ret;
struct esoc_clink *esoc_clink = container_of(crashed_subsys, struct esoc_clink, subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
int timeout = INT_MAX;
u8 pon_trial = 0;
esoc_mdm_log("Powerup request from SSR\n");
do {
esoc_mdm_log("Boot trial: %d\n", pon_trial);
if (!esoc_clink->auto_boot &&
!esoc_req_eng_enabled(esoc_clink)) {
esoc_mdm_log("Wait for req eng registration\n");
dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
wait_for_completion(&mdm_drv->req_eng_wait);
}
esoc_mdm_log("Req eng available\n");
if (mdm_drv->mode == PWR_OFF) {
esoc_mdm_log("In normal power-on mode\n");
if (mdm_dbg_stall_cmd(ESOC_PWR_ON))
return -EBUSY;
esoc_mdm_log("Executing the ESOC_PWR_ON command\n");
ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
if (ret) {
esoc_mdm_log("ESOC_PWR_ON command failed\n");
dev_err(&esoc_clink->dev, "pwr on fail\n");
return ret;
}
esoc_client_link_power_on(esoc_clink, 0);
} else if (mdm_drv->mode == IN_DEBUG) {
esoc_mdm_log("In SSR power-on mode\n");
esoc_mdm_log("Executing the ESOC_EXIT_DEBUG command\n");
ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink);
if (ret) {
esoc_mdm_log(
"ESOC_EXIT_DEBUG command failed\n");
dev_err(&esoc_clink->dev, "cannot exit debug mode\n");
return ret;
}
mdm_drv->mode = PWR_OFF;
esoc_mdm_log("Executing the ESOC_PWR_ON command\n");
ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
if (ret) {
dev_err(&esoc_clink->dev, "pwr on fail\n");
return ret;
}
esoc_client_link_power_on(esoc_clink, ESOC_HOOK_MDM_CRASH);
}
/*
* In autoboot case, it is possible that we can forever wait for
* boot completion, when esoc fails to boot. This is because
* there is no helper application which can alert esoc driver
* about boot failure. Prevent going to wait forever in such
* case.
*/
if (esoc_clink->auto_boot)
timeout = 10 * HZ;
esoc_mdm_log("Modem turned-on. Waiting for pon_done notification..\n");
ret = wait_for_completion_timeout(&mdm_drv->pon_done, msecs_to_jiffies(timeout));
if (mdm_drv->pon_state == PON_FAIL || ret <= 0) {
dev_err(&esoc_clink->dev, "booting failed\n");
esoc_mdm_log("booting failed\n");
ret = mdm_handle_boot_fail(esoc_clink, &pon_trial);
if (ret)
return ret;
} else if (mdm_drv->pon_state == PON_RETRY) {
esoc_mdm_log("Boot failed. Doing cleanup and attempting to retry\n");
mdm_subsys_retry_powerup_cleanup(esoc_clink, 0);
} else if (mdm_drv->pon_state == PON_SUCCESS) {
break;
}
} while (pon_trial <= atomic_read(&mdm_drv->n_pon_tries));
return 0;
}
static int mdm_subsys_ramdumps(int want_dumps,
const struct subsys_desc *crashed_subsys)
{
int ret;
struct esoc_clink *esoc_clink =
container_of(crashed_subsys, struct esoc_clink,
subsys);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
esoc_mdm_log("Ramdumps called from SSR\n");
if (want_dumps) {
esoc_mdm_log("Executing the ESOC_EXE_DEBUG command\n");
ret = clink_ops->cmd_exe(ESOC_EXE_DEBUG, esoc_clink);
if (ret) {
esoc_mdm_log(
"Failed executing the ESOC_EXE_DEBUG command\n");
dev_err(&esoc_clink->dev, "debugging failed\n");
return ret;
}
}
return 0;
}
static int mdm_register_ssr(struct esoc_clink *esoc_clink)
{
struct subsys_desc *subsys = &esoc_clink->subsys;
subsys->shutdown = mdm_subsys_shutdown;
subsys->ramdump = mdm_subsys_ramdumps;
subsys->powerup = mdm_subsys_powerup;
subsys->crash_shutdown = mdm_crash_shutdown;
return esoc_clink_register_ssr(esoc_clink);
}
int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
{
int ret;
struct mdm_drv *mdm_drv;
struct esoc_eng *esoc_eng;
mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
if (IS_ERR_OR_NULL(mdm_drv))
return PTR_ERR(mdm_drv);
esoc_eng = &mdm_drv->cmd_eng;
esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
ret = esoc_clink_register_cmd_eng(esoc_clink, esoc_eng);
if (ret) {
dev_err(&esoc_clink->dev, "failed to register cmd engine\n");
return ret;
}
mutex_init(&mdm_drv->poff_lock);
ret = mdm_register_ssr(esoc_clink);
if (ret)
goto ssr_err;
mdm_drv->mdm_queue = alloc_workqueue("mdm_drv_queue", 0, 0);
if (!mdm_drv->mdm_queue) {
dev_err(&esoc_clink->dev, "could not create mdm_queue\n");
goto queue_err;
}
esoc_set_drv_data(esoc_clink, mdm_drv);
init_completion(&mdm_drv->pon_done);
init_completion(&mdm_drv->ssr_ready);
init_completion(&mdm_drv->req_eng_wait);
INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
mdm_drv->esoc_clink = esoc_clink;
mdm_drv->mode = PWR_OFF;
mdm_drv->pon_state = PON_INIT;
atomic_set(&mdm_drv->boot_fail_action, BOOT_FAIL_ACTION_DEF);
atomic_set(&mdm_drv->n_pon_tries, ESOC_DEF_PON_REQ);
mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler;
ret = register_reboot_notifier(&mdm_drv->esoc_restart);
if (ret)
dev_err(&esoc_clink->dev, "register for reboot failed\n");
ret = mdm_dbg_eng_init(drv, esoc_clink);
if (ret) {
debug_init_done = false;
dev_err(&esoc_clink->dev, "dbg engine failure\n");
} else {
dev_dbg(&esoc_clink->dev, "dbg engine initialized\n");
debug_init_done = true;
}
return 0;
queue_err:
esoc_clink_unregister_ssr(esoc_clink);
ssr_err:
esoc_clink_unregister_cmd_eng(esoc_clink, esoc_eng);
return ret;
}
struct esoc_compat compat_table[] = {
{
.name = "MDM9x55",
.data = NULL,
},
{
.name = "SDX50M",
.data = NULL,
},
{
.name = "SDX55M",
.data = NULL,
},
};
struct esoc_drv esoc_ssr_drv = {
.owner = THIS_MODULE,
.probe = esoc_ssr_probe,
.compat_table = compat_table,
.compat_entries = ARRAY_SIZE(compat_table),
.driver = {
.name = "mdm-4x",
},
};

View File

@ -0,0 +1,271 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, 2017-2021, The Linux Foundation. All rights reserved.
*/
#include "esoc-mdm.h"
#include <linux/input/qpnp-power-on.h>
/* This function can be called from atomic context. */
static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
int soft_reset_direction_assert = 0, soft_reset_direction_de_assert = 1;
uint32_t reset_time_us = mdm->reset_time_ms * 1000;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
soft_reset_direction_de_assert = 0;
}
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_assert);
/*
* Allow PS hold assert to be detected
*/
if (!atomic)
usleep_range(reset_time_us, reset_time_us + 100000);
else
/*
* The flow goes through this path as a part of the
* panic handler, which has to executed atomically.
*/
mdelay(mdm->reset_time_ms);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert);
return 0;
}
/* This function can be called from atomic context. */
static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
int soft_reset_direction_assert = 0, soft_reset_direction_de_assert = 1;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
soft_reset_direction_de_assert = 0;
}
esoc_mdm_log("RESET GPIO value (before doing a reset): %d\n",
gpio_get_value(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)));
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", soft_reset_direction_assert);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_assert);
/*
* Allow PS hold assert to be detected
*/
if (!atomic)
usleep_range(80000, 180000);
else
/*
* The flow goes through this path as a part of the
* panic handler, which has to executed atomically.
*/
mdelay(100);
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", soft_reset_direction_de_assert);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert);
return 0;
}
/* This function can be called from atomic context. */
static int sdx55m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
int rc;
esoc_mdm_log("Doing a Warm reset using SPMI\n");
rc = qpnp_pon_modem_pwr_off(PON_POWER_OFF_WARM_RESET);
if (rc) {
dev_err(mdm->dev, "SPMI warm reset failed\n");
esoc_mdm_log("SPMI warm reset failed\n");
return rc;
}
esoc_mdm_log("Warm reset done using SPMI\n");
return 0;
}
static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
{
int i;
int pblrdy;
struct device *dev = mdm->dev;
esoc_mdm_log("Powering on modem for the first time\n");
dev_dbg(dev, "Powering on modem for the first time\n");
if (mdm->esoc->auto_boot)
return 0;
mdm_toggle_soft_reset(mdm, false);
/* Add a delay to allow PON sequence to complete*/
msleep(150);
esoc_mdm_log("Setting AP2MDM_STATUS = 1\n");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
for (i = 0; i < MDM_PBLRDY_CNT; i++) {
pblrdy = gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY));
if (pblrdy)
break;
usleep_range(5000, 6000);
}
dev_dbg(dev, "pblrdy i:%d\n", i);
msleep(200);
}
/*
* No PBLRDY gpio associated with this modem
* Send request for image. Let userspace confirm establishment of
* link to external modem.
*/
else {
esoc_mdm_log("Queueing the request: ESOC_REQ_IMG\n");
esoc_clink_queue_request(ESOC_REQ_IMG, mdm->esoc);
}
return 0;
}
static int mdm9x55_power_down(struct mdm_ctrl *mdm)
{
struct device *dev = mdm->dev;
int soft_reset_direction_assert = 0, soft_reset_direction_de_assert = 1;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
soft_reset_direction_de_assert = 0;
}
/* Assert the soft reset line whether mdm2ap_status went low or not */
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_assert);
dev_dbg(dev, "Doing a hard reset\n");
/*
* Currently, there is a debounce timer on the charm PMIC. It is
* necessary to hold the PMIC RESET low for 406ms
* for the reset to fully take place. Sleep here to ensure the
* reset has occurred before the function exits.
*/
msleep(406);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert);
return 0;
}
static int sdx50m_power_down(struct mdm_ctrl *mdm)
{
struct device *dev = mdm->dev;
int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
/* Assert the soft reset line whether mdm2ap_status went low or not */
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", soft_reset_direction);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction);
dev_dbg(dev, "Doing a hard reset\n");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction);
/*
* Currently, there is a debounce timer on the charm PMIC. It is
* necessary to hold the PMIC RESET low for 406ms
* for the reset to fully take place. Sleep here to ensure the
* reset has occurred before the function exits.
*/
msleep(300);
return 0;
}
static int sdx55m_power_down(struct mdm_ctrl *mdm)
{
esoc_mdm_log("Performing warm reset as cold reset is not supported\n");
return sdx55m_toggle_soft_reset(mdm, false);
}
static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
{
dev_dbg(mdm->dev, "Triggering mdm cold reset");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted);
/*
* The function is executed as a part of the atomic reboot handler.
* Hence, go with a busy loop instead of sleep.
*/
mdelay(334);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted);
}
static void sdx50m_cold_reset(struct mdm_ctrl *mdm)
{
dev_dbg(mdm->dev, "Triggering mdm cold reset");
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", !!mdm->soft_reset_inverted);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted);
/*
* The function is executed as a part of the atomic reboot handler.
* Hence, go with a busy loop instead of sleep.
*/
mdelay(600);
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", !!mdm->soft_reset_inverted);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted);
}
static int mdm9x55_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
struct device_node *node = mdm->dev->of_node;
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
val = of_property_read_u32(node, "qcom,reset-time-ms", &mdm->reset_time_ms);
if (val)
mdm->reset_time_ms = DEF_MDM9X55_RESET_TIME;
val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio", 0, &flags);
if (val >= 0) {
MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
if (flags & OF_GPIO_ACTIVE_LOW)
mdm->soft_reset_inverted = 1;
return 0;
} else
return -EIO;
}
static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
struct device_node *node = mdm->dev->of_node;
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio", 0, &flags);
if (val >= 0) {
MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
if (flags & OF_GPIO_ACTIVE_LOW)
mdm->soft_reset_inverted = 1;
return 0;
} else
return -EIO;
}
static int mdm4x_pon_setup(struct mdm_ctrl *mdm)
{
struct device *dev = mdm->dev;
if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET))) {
if (gpio_request(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), "AP2MDM_SOFT_RESET")) {
dev_err(dev, "Cannot config AP2MDM_SOFT_RESET gpio\n");
return -EIO;
}
}
return 0;
}
struct mdm_pon_ops mdm9x55_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = mdm9x55_toggle_soft_reset,
.poff_force = mdm9x55_power_down,
.cold_reset = mdm9x55_cold_reset,
.dt_init = mdm9x55_pon_dt_init,
.setup = mdm4x_pon_setup,
};
struct mdm_pon_ops sdx50m_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = sdx50m_toggle_soft_reset,
.poff_force = sdx50m_power_down,
.cold_reset = sdx50m_cold_reset,
.dt_init = mdm4x_pon_dt_init,
.setup = mdm4x_pon_setup,
};
struct mdm_pon_ops sdx55m_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = sdx55m_toggle_soft_reset,
.poff_force = sdx55m_power_down,
};

View File

@ -0,0 +1,163 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2015, 2017-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __ESOC_MDM_H__
#define __ESOC_MDM_H__
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include "esoc.h"
#define MDM_PBLRDY_CNT 20
#define INVALID_GPIO (-1)
#define MDM_GPIO(mdm, i) (mdm->gpios[i])
#define MDM9x55_LABEL "MDM9x55"
#define MDM9x55_PCIE "PCIe"
#define SDX50M_LABEL "SDX50M"
#define SDX50M_PCIE "PCIe"
#define SDX55M_LABEL "SDX55M"
#define SDX55M_PCIE "PCIe"
#define MDM2AP_STATUS_TIMEOUT_MS 120000L
#define MDM_MODEM_TIMEOUT 3000
#define DEF_RAMDUMP_TIMEOUT 120000
#define DEF_RAMDUMP_DELAY 2000
#define DEF_SHUTDOWN_TIMEOUT 10000
#define DEF_MDM9X55_RESET_TIME 203
#define RD_BUF_SIZE 100
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
#define MDM_DBG_OFFSET 0x934
#define MDM_DBG_MODE 0x53444247
#define MDM_CTI_NAME "coresight-cti-rpm-cpu0"
#define MDM_CTI_TRIG 0
#define MDM_CTI_CH 0
enum mdm_gpio {
AP2MDM_WAKEUP = 0,
AP2MDM_STATUS,
AP2MDM_SOFT_RESET,
AP2MDM_VDD_MIN,
AP2MDM_CHNLRDY,
AP2MDM_ERRFATAL,
AP2MDM_VDDMIN,
AP2MDM_PMIC_PWR_EN,
MDM2AP_WAKEUP,
MDM2AP_ERRFATAL,
MDM2AP_PBLRDY,
MDM2AP_STATUS,
MDM2AP_VDDMIN,
MDM_LINK_DETECT,
NUM_GPIOS,
};
struct mdm_pon_ops;
struct mdm_ctrl {
unsigned int gpios[NUM_GPIOS];
spinlock_t status_lock;
struct workqueue_struct *mdm_queue;
struct delayed_work mdm2ap_status_check_work;
struct work_struct mdm_status_work;
struct work_struct restart_reason_work;
struct completion debug_done;
struct device *dev;
struct pinctrl *pinctrl;
struct pinctrl_state *gpio_state_booting;
struct pinctrl_state *gpio_state_running;
struct pinctrl_state *gpio_state_active;
struct pinctrl_state *gpio_state_suspend;
int mdm2ap_status_valid_old_config;
int soft_reset_inverted;
int errfatal_irq;
int status_irq;
int pblrdy_irq;
int debug;
int init;
bool debug_fail;
unsigned int dump_timeout_ms;
unsigned int ramdump_delay_ms;
unsigned int shutdown_timeout_ms;
unsigned int reset_time_ms;
struct esoc_clink *esoc;
bool get_restart_reason;
unsigned long irq_mask;
bool ready;
bool dual_interface;
u32 status;
void __iomem *dbg_addr;
bool dbg_mode;
struct coresight_cti *cti;
int trig_cnt;
const struct mdm_pon_ops *pon_ops;
bool skip_restart_for_mdm_crash;
};
struct mdm_pon_ops {
int (*pon)(struct mdm_ctrl *mdm);
int (*soft_reset)(struct mdm_ctrl *mdm, bool atomic);
int (*poff_force)(struct mdm_ctrl *mdm);
int (*poff_cleanup)(struct mdm_ctrl *mdm);
void (*cold_reset)(struct mdm_ctrl *mdm);
int (*dt_init)(struct mdm_ctrl *mdm);
int (*setup)(struct mdm_ctrl *mdm);
};
struct mdm_ops {
struct esoc_clink_ops *clink_ops;
struct mdm_pon_ops *pon_ops;
int (*config_hw)(struct mdm_ctrl *mdm, const struct mdm_ops *ops,
struct platform_device *pdev);
};
void mdm_disable_irqs(struct mdm_ctrl *mdm);
void mdm_wait_for_status_low(struct mdm_ctrl *mdm, bool atomic);
static inline int mdm_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
if (mdm->pon_ops->soft_reset)
return mdm->pon_ops->soft_reset(mdm, atomic);
return -ENOENT;
}
static inline int mdm_do_first_power_on(struct mdm_ctrl *mdm)
{
if (mdm->pon_ops->pon)
return mdm->pon_ops->pon(mdm);
return -ENOENT;
}
static inline int mdm_power_down(struct mdm_ctrl *mdm)
{
if (mdm->pon_ops->poff_force)
return mdm->pon_ops->poff_force(mdm);
return -ENOENT;
}
static inline void mdm_cold_reset(struct mdm_ctrl *mdm)
{
if (mdm->pon_ops->cold_reset)
mdm->pon_ops->cold_reset(mdm);
}
static inline int mdm_pon_dt_init(struct mdm_ctrl *mdm)
{
if (mdm->pon_ops->dt_init)
return mdm->pon_ops->dt_init(mdm);
return -ENOENT;
}
static inline int mdm_pon_setup(struct mdm_ctrl *mdm)
{
if (mdm->pon_ops->setup)
return mdm->pon_ops->setup(mdm);
return -ENOENT;
}
extern struct mdm_pon_ops mdm9x55_pon_ops;
extern struct mdm_pon_ops sdx50m_pon_ops;
extern struct mdm_pon_ops sdx55m_pon_ops;
#endif

177
drivers/remoteproc/esoc.h Normal file
View File

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2015, 2017-2018, 2020-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __ESOC_H__
#define __ESOC_H__
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/esoc_ctrl.h>
#include <linux/esoc_client.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
#include <linux/ipc_logging.h>
#define ESOC_MDM_IPC_PAGES 10
extern void *ipc_log;
#define esoc_mdm_log(__msg, ...) ipc_log_string(ipc_log, "[%s]: "__msg, __func__, ##__VA_ARGS__)
#define ESOC_DEV_MAX 4
#define ESOC_NAME_LEN 20
#define ESOC_LINK_LEN 20
struct esoc_clink;
/**
* struct esoc_eng: Engine of the esoc control link
* @handle_clink_req: handle incoming esoc requests.
* @handle_clink_evt: handle for esoc events.
* @esoc_clink: pointer to esoc control link.
*/
struct esoc_eng {
void (*handle_clink_req)(enum esoc_req req, struct esoc_eng *eng);
void (*handle_clink_evt)(enum esoc_evt evt, struct esoc_eng *eng);
struct esoc_clink *esoc_clink;
};
/**
* struct esoc_clink: Representation of external esoc device
* @name: Name of the external esoc.
* @link_name: name of the physical link.
* @link_info: additional info about the physical link.
* @parent: parent device.
* @dev: device for userspace interface.
* @pdev: platform device to interface with SSR driver.
* @id: id of the external device.
* @owner: owner of the device.
* @clink_ops: control operations for the control link
* @req_eng: handle for request engine.
* @cmd_eng: handle for command engine.
* @clink_data: private data of esoc control link.
* @compat_data: compat data of esoc driver.
* @subsys_desc: descriptor for subsystem restart
* @subsys_dev: ssr device handle.
* @np: device tree node for esoc_clink.
* @auto_boot: boots independently.
* @primary: primary esoc controls(reset/poweroff) all secondary
* esocs, but not otherway around.
* @statusline_not_a_powersource: True if status line to esoc is not a
* power source.
* @userspace_handle_shutdown: True if user space handles shutdown requests.
*/
struct esoc_clink {
const char *name;
const char *link_name;
const char *link_info;
struct device *parent;
struct device dev;
struct platform_device *pdev;
unsigned int id;
struct module *owner;
const struct esoc_clink_ops *clink_ops;
struct esoc_eng *req_eng;
struct esoc_eng *cmd_eng;
spinlock_t notify_lock;
void *clink_data;
void *compat_data;
struct subsys_desc subsys;
struct subsys_device *subsys_dev;
struct device_node *np;
bool auto_boot;
bool primary;
bool statusline_not_a_powersource;
bool userspace_handle_shutdown;
struct esoc_client_hook *client_hook[ESOC_MAX_HOOKS];
};
/**
* struct esoc_clink_ops: Operations to control external soc
* @cmd_exe: Execute control command
* @get_status: Get current status, or response to previous command
* @get_err_fatal: Get status of err fatal signal
* @notify_esoc: notify external soc of events
*/
struct esoc_clink_ops {
int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
void (*get_status)(u32 *status, struct esoc_clink *dev);
void (*get_err_fatal)(u32 *status, struct esoc_clink *dev);
void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
};
/**
* struct esoc_compat: Compatibility of esoc drivers.
* @name: esoc link that driver is compatible with.
* @data: driver data associated with esoc clink.
*/
struct esoc_compat {
const char *name;
void *data;
};
/**
* struct esoc_drv: Driver for an esoc clink
* @driver: drivers for esoc.
* @owner: module owner of esoc driver.
* @compat_table: compatible table for driver.
* @compat_entries
* @probe: probe function for esoc driver.
*/
struct esoc_drv {
struct device_driver driver;
struct module *owner;
struct esoc_compat *compat_table;
unsigned int compat_entries;
int (*probe)(struct esoc_clink *esoc_clink, struct esoc_drv *drv);
};
extern struct esoc_drv esoc_ssr_drv;
#define to_esoc_clink(d) container_of(d, struct esoc_clink, dev)
#define to_esoc_drv(d) container_of(d, struct esoc_drv, driver)
typedef int (*esoc_func_t)(struct device *dev, void *data);
extern struct bus_type esoc_bus_type;
/* Exported apis */
void esoc_dev_exit(void);
int esoc_dev_init(void);
int esoc_bus_init(void);
void esoc_clink_unregister(struct esoc_clink *esoc_dev);
int esoc_clink_register(struct esoc_clink *esoc_dev);
struct esoc_clink *get_esoc_clink(int id);
struct esoc_clink *get_esoc_clink_by_node(struct device_node *node);
void put_esoc_clink(struct esoc_clink *esoc_clink);
void *get_esoc_clink_data(struct esoc_clink *esoc);
void set_esoc_clink_data(struct esoc_clink *esoc, void *data);
void esoc_clink_evt_notify(enum esoc_evt, struct esoc_clink *esoc_dev);
void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_dev);
void esoc_for_each_dev(void *data, esoc_func_t fn);
int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink, struct esoc_eng *eng);
void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink, struct esoc_eng *eng);
int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink, struct esoc_eng *eng);
void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink, struct esoc_eng *eng);
int esoc_drv_register(struct esoc_drv *driver);
void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data);
void *esoc_get_drv_data(struct esoc_clink *esoc_clink);
/* ssr operations */
int esoc_clink_register_ssr(struct esoc_clink *esoc_clink);
int esoc_clink_request_ssr(struct esoc_clink *esoc_clink);
void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink);
/* client notification */
void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt);
bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink);
bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink);
#endif
/* Modem boot fail actions */
int esoc_set_boot_fail_action(struct esoc_clink *esoc_clink, u32 action);
int esoc_set_n_pon_tries(struct esoc_clink *esoc_clink, u32 n_tries);

View File

@ -0,0 +1,374 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2015, 2017-2018, 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/idr.h>
#include <linux/slab.h>
#include "esoc.h"
static DEFINE_IDA(esoc_ida);
/* SYSFS */
static ssize_t esoc_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return scnprintf(buf, ESOC_NAME_LEN, "%s", to_esoc_clink(dev)->name);
}
static ssize_t esoc_link_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return scnprintf(buf, ESOC_LINK_LEN, "%s", to_esoc_clink(dev)->link_name);
}
static ssize_t esoc_link_info_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return scnprintf(buf, ESOC_LINK_LEN, "%s", to_esoc_clink(dev)->link_info);
}
static DEVICE_ATTR_RO(esoc_name);
static DEVICE_ATTR_RO(esoc_link);
static DEVICE_ATTR_RO(esoc_link_info);
static struct attribute *esoc_clink_attrs[] = {
&dev_attr_esoc_name.attr,
&dev_attr_esoc_link.attr,
&dev_attr_esoc_link_info.attr,
NULL
};
static struct attribute_group esoc_clink_attr_group = {
.attrs = esoc_clink_attrs,
};
const struct attribute_group *esoc_clink_attr_groups[] = {
&esoc_clink_attr_group,
NULL,
};
static int esoc_bus_match(struct device *dev, struct device_driver *drv)
{
int i = 0, match = 1;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
struct esoc_drv *esoc_drv = to_esoc_drv(drv);
int entries = esoc_drv->compat_entries;
struct esoc_compat *table = esoc_drv->compat_table;
for (i = 0; i < entries; i++) {
if (strcasecmp(esoc_clink->name, table[i].name) == 0)
return match;
}
return 0;
}
static int esoc_bus_probe(struct device *dev)
{
int ret;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
struct esoc_drv *esoc_drv = to_esoc_drv(dev->driver);
ret = esoc_drv->probe(esoc_clink, esoc_drv);
if (ret) {
pr_err("failed to probe %s dev\n", esoc_clink->name);
return ret;
}
return 0;
}
struct bus_type esoc_bus_type = {
.name = "esoc",
.match = esoc_bus_match,
.dev_groups = esoc_clink_attr_groups,
};
struct device esoc_bus = {
.init_name = "esoc-bus"
};
/* bus accessor */
static void esoc_clink_release(struct device *dev)
{
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
ida_simple_remove(&esoc_ida, esoc_clink->id);
kfree(esoc_clink);
}
static int esoc_clink_match_id(struct device *dev, const void *id)
{
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
int *esoc_id = (int *)id;
if (esoc_clink->id == *esoc_id) {
if (!try_module_get(esoc_clink->owner))
return 0;
return 1;
}
return 0;
}
static int esoc_clink_match_node(struct device *dev, const void *id)
{
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
if (esoc_clink->np == id) {
if (!try_module_get(esoc_clink->owner))
return 0;
return 1;
}
return 0;
}
void esoc_for_each_dev(void *data, esoc_func_t fn)
{
bus_for_each_dev(&esoc_bus_type, NULL, data, fn);
}
struct esoc_clink *get_esoc_clink(int id)
{
struct esoc_clink *esoc_clink;
struct device *dev;
dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
if (IS_ERR_OR_NULL(dev))
return NULL;
esoc_clink = to_esoc_clink(dev);
return esoc_clink;
}
struct esoc_clink *get_esoc_clink_by_node(struct device_node *node)
{
struct esoc_clink *esoc_clink;
struct device *dev;
dev = bus_find_device(&esoc_bus_type, NULL, node, esoc_clink_match_node);
if (IS_ERR_OR_NULL(dev))
return NULL;
esoc_clink = to_esoc_clink(dev);
return esoc_clink;
}
void put_esoc_clink(struct esoc_clink *esoc_clink)
{
module_put(esoc_clink->owner);
}
bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink)
{
return !esoc_clink->req_eng ? false : true;
}
bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink)
{
return !esoc_clink->cmd_eng ? false : true;
}
/* ssr operations */
int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
{
int ret;
int len;
char *subsys_name;
len = strlen("esoc") + sizeof(esoc_clink->id);
subsys_name = kzalloc(len, GFP_KERNEL);
if (IS_ERR_OR_NULL(subsys_name))
return PTR_ERR(subsys_name);
snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
esoc_clink->dev.of_node = esoc_clink->np;
esoc_clink->subsys.name = subsys_name;
esoc_clink->subsys.dev = &esoc_clink->pdev->dev;
esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
dev_err(&esoc_clink->dev, "failed to register ssr node\n");
ret = PTR_ERR(esoc_clink->subsys_dev);
goto subsys_err;
}
return 0;
subsys_err:
kfree(subsys_name);
return ret;
}
void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink)
{
subsys_unregister(esoc_clink->subsys_dev);
kfree(esoc_clink->subsys.name);
}
int esoc_clink_request_ssr(struct esoc_clink *esoc_clink)
{
subsystem_restart_dev(esoc_clink->subsys_dev);
return 0;
}
/* bus operations */
void esoc_clink_evt_notify(enum esoc_evt evt, struct esoc_clink *esoc_clink)
{
unsigned long flags;
spin_lock_irqsave(&esoc_clink->notify_lock, flags);
notify_esoc_clients(esoc_clink, evt);
if (esoc_clink->req_eng && esoc_clink->req_eng->handle_clink_evt)
esoc_clink->req_eng->handle_clink_evt(evt, esoc_clink->req_eng);
if (esoc_clink->cmd_eng && esoc_clink->cmd_eng->handle_clink_evt)
esoc_clink->cmd_eng->handle_clink_evt(evt, esoc_clink->cmd_eng);
spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
}
void *get_esoc_clink_data(struct esoc_clink *esoc)
{
return esoc->clink_data;
}
void set_esoc_clink_data(struct esoc_clink *esoc, void *data)
{
esoc->clink_data = data;
}
void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_clink)
{
unsigned long flags;
struct esoc_eng *req_eng;
spin_lock_irqsave(&esoc_clink->notify_lock, flags);
if (esoc_clink->req_eng != NULL) {
req_eng = esoc_clink->req_eng;
req_eng->handle_clink_req(req, req_eng);
}
spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
}
void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data)
{
dev_set_drvdata(&esoc_clink->dev, data);
}
void *esoc_get_drv_data(struct esoc_clink *esoc_clink)
{
return dev_get_drvdata(&esoc_clink->dev);
}
/* bus registration functions */
void esoc_clink_unregister(struct esoc_clink *esoc_clink)
{
if (get_device(&esoc_clink->dev) != NULL) {
device_unregister(&esoc_clink->dev);
put_device(&esoc_clink->dev);
}
}
int esoc_clink_register(struct esoc_clink *esoc_clink)
{
int id, err;
struct device *dev;
if (!esoc_clink->name || !esoc_clink->link_name || !esoc_clink->clink_ops) {
dev_err(esoc_clink->parent, "invalid esoc arguments\n");
return -EINVAL;
}
id = ida_simple_get(&esoc_ida, 0, ESOC_DEV_MAX, GFP_KERNEL);
if (id < 0) {
err = id;
goto exit_ida;
}
esoc_clink->id = id;
dev = &esoc_clink->dev;
dev->bus = &esoc_bus_type;
dev->release = esoc_clink_release;
if (!esoc_clink->parent)
dev->parent = &esoc_bus;
else
dev->parent = esoc_clink->parent;
dev_set_name(dev, "esoc%d", id);
err = device_register(dev);
if (err) {
dev_err(esoc_clink->parent, "esoc device register failed\n");
goto exit_ida;
}
spin_lock_init(&esoc_clink->notify_lock);
return 0;
exit_ida:
ida_simple_remove(&esoc_ida, id);
pr_err("unable to register %s, err = %d\n", esoc_clink->name, err);
return err;
}
int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
if (esoc_clink->req_eng)
return -EBUSY;
if (!eng->handle_clink_req)
return -EINVAL;
esoc_clink->req_eng = eng;
eng->esoc_clink = esoc_clink;
esoc_clink_evt_notify(ESOC_REQ_ENG_ON, esoc_clink);
return 0;
}
int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
if (esoc_clink->cmd_eng)
return -EBUSY;
esoc_clink->cmd_eng = eng;
eng->esoc_clink = esoc_clink;
esoc_clink_evt_notify(ESOC_CMD_ENG_ON, esoc_clink);
return 0;
}
void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
esoc_clink->req_eng = NULL;
esoc_clink_evt_notify(ESOC_REQ_ENG_OFF, esoc_clink);
}
void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
esoc_clink->cmd_eng = NULL;
esoc_clink_evt_notify(ESOC_CMD_ENG_OFF, esoc_clink);
}
int esoc_drv_register(struct esoc_drv *driver)
{
int ret;
driver->driver.bus = &esoc_bus_type;
driver->driver.probe = esoc_bus_probe;
ret = driver_register(&driver->driver);
if (ret)
return ret;
return 0;
}
int esoc_bus_init(void)
{
int ret;
ret = device_register(&esoc_bus);
if (ret) {
pr_err("esoc bus device register fail\n");
return ret;
}
ret = bus_register(&esoc_bus_type);
if (ret) {
pr_err("esoc bus register fail\n");
return ret;
}
pr_debug("esoc bus registration done\n");
//TODO: add cleanup path
ret = esoc_dev_init();
if (ret) {
pr_err("esoc userspace driver registration failed\n");
return ret;
}
ret = esoc_drv_register(&esoc_ssr_drv);
if (ret) {
pr_err("esoc ssr driver registration failed\n");
return ret;
}
return 0;
}

View File

@ -0,0 +1,190 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2018, 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/esoc_client.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include "esoc.h"
static DEFINE_SPINLOCK(notify_lock);
static ATOMIC_NOTIFIER_HEAD(client_notify);
static void devm_esoc_desc_release(struct device *dev, void *res)
{
struct esoc_desc *esoc_desc = res;
kfree(esoc_desc->name);
kfree(esoc_desc->link);
put_esoc_clink(esoc_desc->priv);
}
static int devm_esoc_desc_match(struct device *dev, void *res, void *data)
{
struct esoc_desc *esoc_desc = res;
return esoc_desc == data;
}
struct esoc_desc *devm_register_esoc_client(struct device *dev,
const char *name)
{
int ret, index;
const char *client_desc;
char *esoc_prop;
const __be32 *parp;
struct device_node *esoc_node;
struct device_node *np = dev->of_node;
struct esoc_clink *esoc_clink;
struct esoc_desc *desc;
char *esoc_name, *esoc_link, *esoc_link_info;
for (index = 0;; index++) {
esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
if (IS_ERR_OR_NULL(esoc_prop))
return ERR_PTR(-ENOMEM);
parp = of_get_property(np, esoc_prop, NULL);
if (parp == NULL) {
dev_err(dev, "esoc device not present\n");
kfree(esoc_prop);
return NULL;
}
ret = of_property_read_string_index(np, "esoc-names", index, &client_desc);
if (ret) {
dev_err(dev, "cannot find matching string\n");
kfree(esoc_prop);
return NULL;
}
if (strcmp(client_desc, name)) {
kfree(esoc_prop);
continue;
}
kfree(esoc_prop);
esoc_node = of_find_node_by_phandle(be32_to_cpup(parp));
esoc_clink = get_esoc_clink_by_node(esoc_node);
if (IS_ERR_OR_NULL(esoc_clink)) {
dev_err(dev, "matching esoc clink not present\n");
return ERR_PTR(-EPROBE_DEFER);
}
esoc_name = kasprintf(GFP_KERNEL, "esoc%d", esoc_clink->id);
if (IS_ERR_OR_NULL(esoc_name)) {
dev_err(dev, "unable to allocate esoc name\n");
return ERR_PTR(-ENOMEM);
}
esoc_link = kasprintf(GFP_KERNEL, "%s", esoc_clink->link_name);
if (IS_ERR_OR_NULL(esoc_link)) {
dev_err(dev, "unable to allocate esoc link name\n");
kfree(esoc_name);
return ERR_PTR(-ENOMEM);
}
esoc_link_info = kasprintf(GFP_KERNEL, "%s", esoc_clink->link_info);
if (IS_ERR_OR_NULL(esoc_link_info)) {
dev_err(dev, "unable to alloc link info name\n");
kfree(esoc_name);
kfree(esoc_link);
return ERR_PTR(-ENOMEM);
}
desc = devres_alloc(devm_esoc_desc_release, sizeof(*desc), GFP_KERNEL);
if (IS_ERR_OR_NULL(desc)) {
kfree(esoc_name);
kfree(esoc_link);
kfree(esoc_link_info);
dev_err(dev, "unable to allocate esoc descriptor\n");
return ERR_PTR(-ENOMEM);
}
desc->name = esoc_name;
desc->link = esoc_link;
desc->link_info = esoc_link_info;
desc->priv = esoc_clink;
devres_add(dev, desc);
return desc;
}
return NULL;
}
EXPORT_SYMBOL(devm_register_esoc_client);
void devm_unregister_esoc_client(struct device *dev, struct esoc_desc *esoc_desc)
{
int ret;
ret = devres_release(dev, devm_esoc_desc_release, devm_esoc_desc_match, esoc_desc);
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_unregister_esoc_client);
int esoc_register_client_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&client_notify, nb);
}
EXPORT_SYMBOL(esoc_register_client_notifier);
void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt)
{
unsigned int id;
unsigned long flags;
spin_lock_irqsave(&notify_lock, flags);
id = esoc_clink->id;
atomic_notifier_call_chain(&client_notify, evt, &id);
spin_unlock_irqrestore(&notify_lock, flags);
}
EXPORT_SYMBOL(notify_esoc_clients);
int esoc_register_client_hook(struct esoc_desc *desc, struct esoc_client_hook *client_hook)
{
int i;
struct esoc_clink *esoc_clink;
if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(client_hook)) {
pr_debug("%s: Invalid parameters\n", __func__);
return -EINVAL;
}
esoc_clink = desc->priv;
if (IS_ERR_OR_NULL(esoc_clink)) {
pr_debug("%s: Invalid esoc link\n", __func__);
return -EINVAL;
}
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
if (i == client_hook->prio &&
esoc_clink->client_hook[i] == NULL) {
esoc_clink->client_hook[i] = client_hook;
dev_dbg(&esoc_clink->dev, "Client hook registration successful\n");
return 0;
}
}
dev_dbg(&esoc_clink->dev, "Client hook registration failed!\n");
return -EINVAL;
}
EXPORT_SYMBOL(esoc_register_client_hook);
int esoc_unregister_client_hook(struct esoc_desc *desc, struct esoc_client_hook *client_hook)
{
int i;
struct esoc_clink *esoc_clink;
if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(client_hook)) {
pr_debug("%s: Invalid parameters\n", __func__);
return -EINVAL;
}
esoc_clink = desc->priv;
if (IS_ERR_OR_NULL(esoc_clink)) {
pr_debug("%s: Invalid esoc link\n", __func__);
return -EINVAL;
}
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
if (i == client_hook->prio && esoc_clink->client_hook[i] != NULL) {
esoc_clink->client_hook[i] = NULL;
dev_dbg(&esoc_clink->dev, "Client hook unregistration successful\n");
return 0;
}
}
dev_dbg(&esoc_clink->dev, "Client hook unregistration failed!\n");
return -EINVAL;
}
EXPORT_SYMBOL(esoc_unregister_client_hook);

View File

@ -0,0 +1,466 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2014, 2017-2018, 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kfifo.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/esoc_client.h>
#include "esoc.h"
/**
* struct esoc_udev: Userspace char interface
* @dev: interface device.
* @req_fifio: fifo for clink requests.
* @req_wait: signal availability of request from clink
* @req_fifo_lock: serialize access to req fifo
* @evt_fito: fifo for clink events
* @evt_wait: signal availability of clink event
* @evt_fifo_lock: serialize access to event fifo
* @list: entry in esoc dev list.
* @clink: reference to contorl link
*/
struct esoc_udev {
struct device *dev;
struct kfifo req_fifo;
wait_queue_head_t req_wait;
spinlock_t req_fifo_lock;
struct kfifo evt_fifo;
wait_queue_head_t evt_wait;
spinlock_t evt_fifo_lock;
struct list_head list;
struct esoc_clink *clink;
};
/**
* struct esoc_uhandle: Userspace handle of esoc
* @esoc_clink: esoc control link.
* @eng: esoc engine for commands/ requests.
* @esoc_udev: user interface device.
* @req_eng_reg: indicates if engine is registered as request eng
* @cmd_eng_reg: indicates if engine is registered as cmd eng
*/
struct esoc_uhandle {
struct esoc_clink *esoc_clink;
struct esoc_eng eng;
struct esoc_udev *esoc_udev;
bool req_eng_reg;
bool cmd_eng_reg;
};
#define ESOC_MAX_MINOR 256
#define ESOC_MAX_REQ 8
#define ESOC_MAX_EVT 4
static LIST_HEAD(esoc_udev_list);
static DEFINE_SPINLOCK(esoc_udev_list_lock);
struct class *esoc_class;
static int esoc_major;
static struct esoc_udev *get_free_esoc_udev(struct esoc_clink *esoc_clink)
{
struct esoc_udev *esoc_udev;
int err;
if (esoc_clink->id > ESOC_MAX_MINOR) {
pr_err("too many esoc devices\n");
return ERR_PTR(-ENODEV);
}
esoc_udev = kzalloc(sizeof(*esoc_udev), GFP_KERNEL);
if (!esoc_udev)
return ERR_PTR(-ENOMEM);
err = kfifo_alloc(&esoc_udev->req_fifo, (sizeof(u32)) * ESOC_MAX_REQ, GFP_KERNEL);
if (err) {
pr_err("unable to allocate request fifo for %s\n", esoc_clink->name);
goto req_fifo_fail;
}
err = kfifo_alloc(&esoc_udev->evt_fifo, (sizeof(u32)) * ESOC_MAX_EVT, GFP_KERNEL);
if (err) {
pr_err("unable to allocate evt fifo for %s\n", esoc_clink->name);
goto evt_fifo_fail;
}
init_waitqueue_head(&esoc_udev->req_wait);
init_waitqueue_head(&esoc_udev->evt_wait);
spin_lock_init(&esoc_udev->req_fifo_lock);
spin_lock_init(&esoc_udev->evt_fifo_lock);
esoc_udev->clink = esoc_clink;
spin_lock(&esoc_udev_list_lock);
list_add_tail(&esoc_udev->list, &esoc_udev_list);
spin_unlock(&esoc_udev_list_lock);
return esoc_udev;
evt_fifo_fail:
kfifo_free(&esoc_udev->req_fifo);
req_fifo_fail:
kfree(esoc_udev);
return ERR_PTR(-ENODEV);
}
static void return_esoc_udev(struct esoc_udev *esoc_udev)
{
spin_lock(&esoc_udev_list_lock);
list_del(&esoc_udev->list);
spin_unlock(&esoc_udev_list_lock);
kfifo_free(&esoc_udev->req_fifo);
kfifo_free(&esoc_udev->evt_fifo);
kfree(esoc_udev);
}
static struct esoc_udev *esoc_udev_get_by_minor(unsigned int index)
{
struct esoc_udev *esoc_udev;
spin_lock(&esoc_udev_list_lock);
list_for_each_entry(esoc_udev, &esoc_udev_list, list) {
if (esoc_udev->clink->id == index)
goto found;
}
esoc_udev = NULL;
found:
spin_unlock(&esoc_udev_list_lock);
return esoc_udev;
}
void esoc_udev_handle_clink_req(enum esoc_req req, struct esoc_eng *eng)
{
int err;
u32 clink_req;
struct esoc_clink *esoc_clink = eng->esoc_clink;
struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev) {
esoc_mdm_log("esoc_udev not found\n");
return;
}
clink_req = (u32)req;
err = kfifo_in_spinlocked(&esoc_udev->req_fifo, &clink_req, sizeof(clink_req),
&esoc_udev->req_fifo_lock);
if (err != sizeof(clink_req)) {
esoc_mdm_log("Unable to queue request %d; err: %d\n", req, err);
pr_err("unable to queue request for %s\n", esoc_clink->name);
return;
}
wake_up_interruptible(&esoc_udev->req_wait);
}
void esoc_udev_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng)
{
int err;
u32 clink_evt;
struct esoc_clink *esoc_clink = eng->esoc_clink;
struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev) {
esoc_mdm_log("esoc_udev not found\n");
return;
}
clink_evt = (u32)evt;
err = kfifo_in_spinlocked(&esoc_udev->evt_fifo, &clink_evt, sizeof(clink_evt),
&esoc_udev->evt_fifo_lock);
if (err != sizeof(clink_evt)) {
esoc_mdm_log("Unable to queue event %d; err: %d\n", evt, err);
pr_err("unable to queue event for %s\n", esoc_clink->name);
return;
}
wake_up_interruptible(&esoc_udev->evt_wait);
}
static int esoc_get_link_id(struct esoc_clink *esoc_clink, unsigned long arg)
{
struct esoc_link_data link_data;
struct esoc_client_hook *client_hook;
struct esoc_link_data __user *user_arg;
user_arg = (struct esoc_link_data __user *) arg;
if (!user_arg) {
dev_err(&esoc_clink->dev, "Missing argument for link id\n");
return -EINVAL;
}
if (copy_from_user((void *) &link_data, user_arg, sizeof(*user_arg))) {
dev_err(&esoc_clink->dev, "Unable to copy the data from the user\n");
return -EFAULT;
}
if (link_data.prio < 0 || link_data.prio >= ESOC_MAX_HOOKS) {
dev_err(&esoc_clink->dev, "Invalid client identifier passed\n");
return -EINVAL;
}
client_hook = esoc_clink->client_hook[link_data.prio];
if (client_hook && client_hook->esoc_link_get_id) {
link_data.link_id = client_hook->esoc_link_get_id(client_hook->priv);
if (copy_to_user((void *) user_arg, &link_data, sizeof(*user_arg))) {
dev_err(&esoc_clink->dev, "Failed to send the data to the user\n");
return -EFAULT;
}
return 0;
}
dev_err(&esoc_clink->dev, "Client hooks not registered for the device\n");
return -EINVAL;
}
static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err;
u32 esoc_cmd, status, req, evt;
struct esoc_uhandle *uhandle = file->private_data;
struct esoc_udev *esoc_udev = uhandle->esoc_udev;
struct esoc_clink *esoc_clink = uhandle->esoc_clink;
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
void __user *uarg = (void __user *)arg;
switch (cmd) {
case ESOC_REG_REQ_ENG:
esoc_mdm_log("ESOC_REG_REQ_ENG\n");
err = esoc_clink_register_req_eng(esoc_clink, &uhandle->eng);
if (err) {
esoc_mdm_log("ESOC_REG_REQ_ENG failed: %d\n", err);
return err;
}
uhandle->req_eng_reg = true;
break;
case ESOC_REG_CMD_ENG:
esoc_mdm_log("ESOC_REG_CMD_ENG\n");
err = esoc_clink_register_cmd_eng(esoc_clink, &uhandle->eng);
if (err) {
esoc_mdm_log("ESOC_REG_CMD_ENG failed: %d\n", err);
return err;
}
uhandle->cmd_eng_reg = true;
break;
case ESOC_CMD_EXE:
if (esoc_clink->cmd_eng != &uhandle->eng) {
esoc_mdm_log("ESOC_CMD_EXE failed to access\n");
return -EACCES;
}
get_user(esoc_cmd, (u32 __user *)arg);
esoc_mdm_log("ESOC_CMD_EXE: Executing esoc command: %u\n", esoc_cmd);
return clink_ops->cmd_exe(esoc_cmd, esoc_clink);
case ESOC_WAIT_FOR_REQ:
if (esoc_clink->req_eng != &uhandle->eng) {
esoc_mdm_log("ESOC_WAIT_FOR_REQ: Failed to access\n");
return -EACCES;
}
esoc_mdm_log("ESOC_WAIT_FOR_REQ: Waiting for req event to arrive.\n");
err = wait_event_interruptible(esoc_udev->req_wait,
!kfifo_is_empty(&esoc_udev->req_fifo));
if (!err) {
err = kfifo_out_spinlocked(&esoc_udev->req_fifo, &req, sizeof(req),
&esoc_udev->req_fifo_lock);
if (err != sizeof(req)) {
esoc_mdm_log("ESOC_WAIT_FOR_REQ: Failed to read the event\n");
pr_err("read from clink %s req q failed\n", esoc_clink->name);
return -EIO;
}
put_user(req, (unsigned int __user *)uarg);
esoc_mdm_log("ESOC_WAIT_FOR_REQ: Event arrived: %u\n", req);
}
return err;
case ESOC_NOTIFY:
get_user(esoc_cmd, (u32 __user *)arg);
esoc_mdm_log("ESOC_NOTIFY: Notifying esoc about cmd: %u\n", esoc_cmd);
clink_ops->notify(esoc_cmd, esoc_clink);
break;
case ESOC_GET_STATUS:
clink_ops->get_status(&status, esoc_clink);
esoc_mdm_log("ESOC_GET_STATUS: Sending the status from esoc: %u\n", status);
put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_GET_ERR_FATAL:
clink_ops->get_err_fatal(&status, esoc_clink);
esoc_mdm_log("ESOC_GET_ERR_FATAL: Sending err_fatal status from esoc: %u\n",
status);
put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_WAIT_FOR_CRASH:
esoc_mdm_log("ESOC_WAIT_FOR_CRASH: Waiting for evt to arrive..\n");
err = wait_event_interruptible(esoc_udev->evt_wait,
!kfifo_is_empty(&esoc_udev->evt_fifo));
if (!err) {
err = kfifo_out_spinlocked(&esoc_udev->evt_fifo, &evt, sizeof(evt),
&esoc_udev->evt_fifo_lock);
if (err != sizeof(evt)) {
esoc_mdm_log("ESOC_WAIT_FOR_CRASH: Failed to read event\n");
pr_err("read from clink %s evt q failed\n", esoc_clink->name);
return -EIO;
}
put_user(evt, (unsigned int __user *)uarg);
esoc_mdm_log("ESOC_WAIT_FOR_CRASH: Event arrived: %u\n", evt);
}
return err;
case ESOC_GET_LINK_ID:
return esoc_get_link_id(esoc_clink, arg);
case ESOC_SET_BOOT_FAIL_ACT:
get_user(esoc_cmd, (u32 __user *) arg);
return esoc_set_boot_fail_action(esoc_clink, esoc_cmd);
case ESOC_SET_N_PON_TRIES:
get_user(esoc_cmd, (u32 __user *) arg);
return esoc_set_n_pon_tries(esoc_clink, esoc_cmd);
default:
return -EINVAL;
}
return 0;
}
static int esoc_dev_open(struct inode *inode, struct file *file)
{
struct esoc_uhandle *uhandle;
struct esoc_udev *esoc_udev;
struct esoc_clink *esoc_clink;
struct esoc_eng *eng;
unsigned int minor = iminor(inode);
esoc_udev = esoc_udev_get_by_minor(minor);
if (!esoc_udev) {
esoc_mdm_log("failed to get udev\n");
pr_err("failed to get udev\n");
return -ENOMEM;
}
esoc_clink = get_esoc_clink(esoc_udev->clink->id);
if (!esoc_clink) {
esoc_mdm_log("failed to get clink\n");
pr_err("failed to get clink\n");
return -ENOMEM;
}
uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
if (!uhandle) {
put_esoc_clink(esoc_clink);
return -ENOMEM;
}
uhandle->esoc_udev = esoc_udev;
uhandle->esoc_clink = esoc_clink;
eng = &uhandle->eng;
eng->handle_clink_req = esoc_udev_handle_clink_req;
eng->handle_clink_evt = esoc_udev_handle_clink_evt;
file->private_data = uhandle;
esoc_mdm_log("%s successfully attached to esoc driver\n", current->comm);
return 0;
}
static int esoc_dev_release(struct inode *inode, struct file *file)
{
struct esoc_clink *esoc_clink;
struct esoc_uhandle *uhandle = file->private_data;
esoc_clink = uhandle->esoc_clink;
if (uhandle->req_eng_reg) {
esoc_mdm_log("Unregistering req_eng\n");
esoc_clink_unregister_req_eng(esoc_clink, &uhandle->eng);
} else {
esoc_mdm_log("No req_eng to unregister\n");
}
if (uhandle->cmd_eng_reg) {
esoc_mdm_log("Unregistering cmd_eng\n");
esoc_clink_unregister_cmd_eng(esoc_clink, &uhandle->eng);
} else {
esoc_mdm_log("No cmd_eng to unregister\n");
}
uhandle->req_eng_reg = false;
uhandle->cmd_eng_reg = false;
put_esoc_clink(esoc_clink);
kfree(uhandle);
esoc_mdm_log("%s Unregistered with esoc\n", current->comm);
return 0;
}
static const struct file_operations esoc_dev_fops = {
.owner = THIS_MODULE,
.open = esoc_dev_open,
.unlocked_ioctl = esoc_dev_ioctl,
.release = esoc_dev_release,
};
int esoc_clink_add_device(struct device *dev, void *dummy)
{
struct esoc_udev *esoc_udev;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
esoc_udev = get_free_esoc_udev(esoc_clink);
if (IS_ERR_OR_NULL(esoc_udev))
return PTR_ERR(esoc_udev);
esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
MKDEV(esoc_major, esoc_clink->id),
esoc_clink, "esoc-%d", esoc_clink->id);
if (IS_ERR_OR_NULL(esoc_udev->dev)) {
pr_err("failed to create user device\n");
goto dev_err;
}
return 0;
dev_err:
return_esoc_udev(esoc_udev);
return -ENODEV;
}
int esoc_clink_del_device(struct device *dev, void *dummy)
{
struct esoc_udev *esoc_udev;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev)
return 0;
device_destroy(esoc_class, MKDEV(esoc_major, esoc_clink->id));
return_esoc_udev(esoc_udev);
return 0;
}
static int esoc_dev_notifier_call(struct notifier_block *nb, unsigned long action, void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
return esoc_clink_add_device(dev, NULL);
case BUS_NOTIFY_DEL_DEVICE:
return esoc_clink_del_device(dev, NULL);
}
return 0;
}
static struct notifier_block esoc_dev_notifier = {
.notifier_call = esoc_dev_notifier_call,
};
int esoc_dev_init(void)
{
int ret = 0;
esoc_class = class_create(THIS_MODULE, "esoc-dev");
if (IS_ERR_OR_NULL(esoc_class)) {
pr_err("coudn't create class\n");
return PTR_ERR(esoc_class);
}
esoc_major = register_chrdev(0, "esoc", &esoc_dev_fops);
if (esoc_major < 0) {
pr_err("failed to allocate char dev\n");
ret = esoc_major;
goto class_unreg;
}
ret = bus_register_notifier(&esoc_bus_type, &esoc_dev_notifier);
if (ret)
goto chrdev_unreg;
esoc_for_each_dev(NULL, esoc_clink_add_device);
return ret;
chrdev_unreg:
unregister_chrdev(esoc_major, "esoc");
class_unreg:
class_destroy(esoc_class);
return 0;
}
EXPORT_SYMBOL(esoc_dev_init);
void esoc_dev_exit(void)
{
bus_unregister_notifier(&esoc_bus_type, &esoc_dev_notifier);
class_destroy(esoc_class);
unregister_chrdev(esoc_major, "esoc");
}
EXPORT_SYMBOL(esoc_dev_exit);

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, 2018, 2020-2021, The Linux Foundation. All rights reserved.
*/
static bool debug_init_done;
#ifndef CONFIG_ESOC_MDM_DBG_ENG
static inline bool dbg_check_cmd_mask(unsigned int cmd)
{
return false;
}
static inline bool dbg_check_notify_mask(unsigned int notify)
{
return false;
}
static inline int mdm_dbg_eng_init(struct esoc_drv *drv,
struct esoc_clink *clink)
{
return 0;
}
#else
extern bool dbg_check_cmd_mask(unsigned int cmd);
extern bool dbg_check_notify_mask(unsigned int notify);
extern int mdm_dbg_eng_init(struct esoc_drv *drv,
struct esoc_clink *clink);
#endif
static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
{
if (debug_init_done)
return dbg_check_cmd_mask(cmd);
else
return false;
}
static inline bool mdm_dbg_stall_notify(unsigned int notify)
{
if (debug_init_done)
return dbg_check_notify_mask(notify);
else
return false;
}

View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014, 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __ESOC_CLIENT_H_
#define __ESOC_CLIENT_H_
#include <linux/device.h>
#include <linux/esoc_ctrl.h>
#include <linux/notifier.h>
/* Flag values used with the power_on and power_off hooks */
#define ESOC_HOOK_MDM_CRASH 0x0001 /* In crash handling path */
#define ESOC_HOOK_MDM_DOWN 0x0002 /* MDM about to go down */
struct esoc_client_hook {
char *name;
void *priv;
enum esoc_client_hook_prio prio;
int (*esoc_link_power_on)(void *priv, unsigned int flags);
void (*esoc_link_power_off)(void *priv, unsigned int flags);
u64 (*esoc_link_get_id)(void *priv);
void (*esoc_link_mdm_crash)(void *priv);
};
/*
* struct esoc_desc: Describes an external soc
* @name: external soc name
* @priv: private data for external soc
*/
struct esoc_desc {
const char *name;
const char *link;
const char *link_info;
void *priv;
};
/* Can return probe deferral */
struct esoc_desc *devm_register_esoc_client(struct device *dev,
const char *name);
void devm_unregister_esoc_client(struct device *dev,
struct esoc_desc *esoc_desc);
int esoc_register_client_notifier(struct notifier_block *nb);
int esoc_register_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook);
int esoc_unregister_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook);
#endif

View File

@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_ESOC_CTRL_H_
#define _UAPI_ESOC_CTRL_H_
#include <linux/types.h>
enum esoc_client_hook_prio {
ESOC_MHI_HOOK,
ESOC_MAX_HOOKS
};
struct esoc_link_data {
enum esoc_client_hook_prio prio;
__u64 link_id;
};
#define ESOC_CODE 0xCC
#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, unsigned int)
#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, unsigned int)
#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, unsigned int)
#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, unsigned int)
#define ESOC_GET_ERR_FATAL _IOR(ESOC_CODE, 5, unsigned int)
#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, unsigned int)
#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
#define ESOC_GET_LINK_ID _IOWR(ESOC_CODE, 9, struct esoc_link_data)
#define ESOC_SET_BOOT_FAIL_ACT _IOW(ESOC_CODE, 10, unsigned int)
#define ESOC_SET_N_PON_TRIES _IOW(ESOC_CODE, 11, unsigned int)
#define ESOC_REQ_SEND_SHUTDOWN ESOC_REQ_SEND_SHUTDOWN
#define ESOC_REQ_CRASH_SHUTDOWN ESOC_REQ_CRASH_SHUTDOWN
#define ESOC_PON_RETRY ESOC_PON_RETRY
#define ESOC_BOOT_FAIL_ACTION
enum esoc_boot_fail_action {
BOOT_FAIL_ACTION_RETRY,
BOOT_FAIL_ACTION_COLD_RESET,
BOOT_FAIL_ACTION_SHUTDOWN,
BOOT_FAIL_ACTION_PANIC,
BOOT_FAIL_ACTION_NOP,
BOOT_FAIL_ACTION_S3_RESET,
BOOT_FAIL_ACTION_LAST,
};
enum esoc_evt {
ESOC_RUN_STATE = 0x1,
ESOC_UNEXPECTED_RESET,
ESOC_ERR_FATAL,
ESOC_IN_DEBUG,
ESOC_REQ_ENG_ON,
ESOC_REQ_ENG_OFF,
ESOC_CMD_ENG_ON,
ESOC_CMD_ENG_OFF,
ESOC_INVALID_STATE,
ESOC_RETRY_PON_EVT,
ESOC_BOOT_STATE,
};
enum esoc_cmd {
ESOC_PWR_ON = 1,
ESOC_PWR_OFF,
ESOC_FORCE_PWR_OFF,
ESOC_RESET,
ESOC_PREPARE_DEBUG,
ESOC_EXE_DEBUG,
ESOC_EXIT_DEBUG,
};
enum esoc_notify {
ESOC_IMG_XFER_DONE = 1,
ESOC_BOOT_DONE,
ESOC_BOOT_FAIL,
ESOC_IMG_XFER_RETRY,
ESOC_IMG_XFER_FAIL,
ESOC_UPGRADE_AVAILABLE,
ESOC_DEBUG_DONE,
ESOC_DEBUG_FAIL,
ESOC_PRIMARY_CRASH,
ESOC_PRIMARY_REBOOT,
ESOC_PON_RETRY,
};
enum esoc_req {
ESOC_REQ_IMG = 1,
ESOC_REQ_DEBUG,
ESOC_REQ_SHUTDOWN,
ESOC_REQ_SEND_SHUTDOWN,
ESOC_REQ_CRASH_SHUTDOWN,
};
#endif