drivers: soc: qcom: snapshot of sleep drivers for Lahaina

This is a snapshot of the Sleep driver and related functionality as of
'commit b252df6e0200 ("cpuidle: lpm-levels: log enabled regulators
before entering suspend")' on msm-4.19 branch.

Updates:
        - Remove event timers
        - Remove per-cpu PM QoS
        - Remove rpm stats (Use new soc_sleep_stats driver instead)
        - Update __arch_counter_get_cntvct (new)
        - Update to get next wakeup from cpuidle device
	- Remove pending_ipi and is_IPI_pending() (defined in smp.c)
	- Update copyright
	- Update Kconfig menu items
	- Update hotplug enum to use the already upstreamed
	- ifdef's around scheduler functions that are not upstreamed

Change-Id: I75336ec927932d93c0c193a20973fc84e3b05ff5
Signed-off-by: Lina Iyer <ilina@codeaurora.org>
This commit is contained in:
Lina Iyer 2019-07-24 10:01:22 -06:00 committed by Gerrit - the friendly Code Review server
parent 52c2866900
commit 6d5e000248
15 changed files with 4081 additions and 1 deletions

View File

@ -86,3 +86,16 @@ config ARM_MVEBU_V7_CPUIDLE
depends on ARCH_MVEBU && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
config ARM_QCOM_LPM_CPUIDLE
depends on ARCH_QCOM || COMPILE_TEST
select CPU_IDLE_MULTIPLE_DRIVERS
select MSM_PM if QGKI
select QGKI_LPM_IPI_CHECK if QGKI
tristate "Qualcomm Technologies, Inc. (QTI) Power Management Drivers"
help
Platform specific power driver to manage cores and cluster low power
modes. It interface with various system driver and put the cores
into low power modes. It implements OS initiated scheme and
determines last CPU to call into PSCI for cluster Low power
modes.

View File

@ -22,6 +22,8 @@ obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
obj-$(CONFIG_ARM_QCOM_LPM_CPUIDLE) += lpm-levels.o
obj-$(CONFIG_ARM_QCOM_LPM_CPUIDLE) += lpm-levels-of.o
###############################################################################
# MIPS drivers

View File

@ -0,0 +1,696 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
#include "lpm-levels.h"
enum lpm_type {
IDLE = 0,
SUSPEND,
LATENCY,
LPM_TYPE_NR,
};
struct lpm_type_str {
enum lpm_type type;
char *str;
};
static const struct lpm_type_str lpm_types[] = {
{IDLE, "idle_enabled"},
{SUSPEND, "suspend_enabled"},
{LATENCY, "exit_latency_us"},
};
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;
static int lpm_of_read_u32(struct device_node *dn, const char *key,
u32 *val, bool is_err)
{
int ret;
ret = of_property_read_u32(dn, key, val);
if (is_err && ret)
pr_err("%s:failed to read key:%s ret:%d\n", dn->name, key, ret);
return ret;
}
static void *get_enabled_ptr(struct kobj_attribute *attr,
struct lpm_level_avail *avail)
{
void *arg = NULL;
if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
arg = (void *) &avail->idle_enabled;
else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
arg = (void *) &avail->suspend_enabled;
return arg;
}
static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
struct kobj_attribute *attr)
{
struct lpm_level_avail *avail = NULL;
if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
avail = container_of(attr, struct lpm_level_avail,
idle_enabled_attr);
else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
avail = container_of(attr, struct lpm_level_avail,
suspend_enabled_attr);
else if (!strcmp(attr->attr.name, lpm_types[LATENCY].str))
avail = container_of(attr, struct lpm_level_avail,
latency_attr);
return avail;
}
static ssize_t lpm_latency_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int ret = 0;
struct kernel_param kp;
struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
if (WARN_ON(!avail))
return -EINVAL;
kp.arg = &avail->exit_latency;
ret = param_get_uint(buf, &kp);
if (ret > 0) {
strlcat(buf, "\n", PAGE_SIZE);
ret++;
}
return ret;
}
ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int ret = 0;
struct kernel_param kp;
struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
if (WARN_ON(!avail))
return -EINVAL;
kp.arg = get_enabled_ptr(attr, avail);
if (WARN_ON(!kp.arg))
return -EINVAL;
ret = param_get_bool(buf, &kp);
if (ret > 0) {
strlcat(buf, "\n", PAGE_SIZE);
ret++;
}
return ret;
}
ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
{
int ret = 0;
struct kernel_param kp;
struct lpm_level_avail *avail;
avail = get_avail_ptr(kobj, attr);
if (WARN_ON(!avail))
return -EINVAL;
kp.arg = get_enabled_ptr(attr, avail);
ret = param_set_bool(buf, &kp);
return ret ? ret : len;
}
static int create_lvl_avail_nodes(const char *name,
struct kobject *parent, struct lpm_level_avail *avail,
void *data, int index, bool cpu_node)
{
struct attribute_group *attr_group = NULL;
struct attribute **attr = NULL;
struct kobject *kobj = NULL;
int ret = 0;
kobj = kobject_create_and_add(name, parent);
if (!kobj)
return -ENOMEM;
attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
GFP_KERNEL);
if (!attr_group) {
ret = -ENOMEM;
goto failed;
}
attr = devm_kcalloc(&lpm_pdev->dev, LPM_TYPE_NR + 1, sizeof(*attr),
GFP_KERNEL);
if (!attr) {
ret = -ENOMEM;
goto failed;
}
sysfs_attr_init(&avail->idle_enabled_attr.attr);
avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
avail->idle_enabled_attr.attr.mode = 0644;
avail->idle_enabled_attr.show = lpm_enable_show;
avail->idle_enabled_attr.store = lpm_enable_store;
sysfs_attr_init(&avail->suspend_enabled_attr.attr);
avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
avail->suspend_enabled_attr.attr.mode = 0644;
avail->suspend_enabled_attr.show = lpm_enable_show;
avail->suspend_enabled_attr.store = lpm_enable_store;
sysfs_attr_init(&avail->latency_attr.attr);
avail->latency_attr.attr.name = lpm_types[LATENCY].str;
avail->latency_attr.attr.mode = 0444;
avail->latency_attr.show = lpm_latency_show;
avail->latency_attr.store = NULL;
attr[0] = &avail->idle_enabled_attr.attr;
attr[1] = &avail->suspend_enabled_attr.attr;
attr[2] = &avail->latency_attr.attr;
attr[3] = NULL;
attr_group->attrs = attr;
ret = sysfs_create_group(kobj, attr_group);
if (ret)
goto failed;
avail->idle_enabled = true;
avail->suspend_enabled = true;
avail->kobj = kobj;
avail->data = data;
avail->idx = index;
avail->cpu_node = cpu_node;
return 0;
failed:
kobject_put(kobj);
return ret;
}
static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
{
int cpu;
int i, cpu_idx;
struct kobject **cpu_kobj = NULL;
struct lpm_level_avail *level_list = NULL;
char cpu_name[20] = {0};
int ret = 0;
struct list_head *pos;
cpu_kobj = devm_kcalloc(&lpm_pdev->dev, cpumask_weight(&p->child_cpus),
sizeof(*cpu_kobj), GFP_KERNEL);
if (!cpu_kobj)
return -ENOMEM;
cpu_idx = 0;
list_for_each(pos, &p->cpu) {
struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
for_each_cpu(cpu, &lpm_cpu->related_cpus) {
snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
parent);
if (!cpu_kobj[cpu_idx]) {
ret = -ENOMEM;
goto release_kobj;
}
level_list = devm_kcalloc(&lpm_pdev->dev,
lpm_cpu->nlevels,
sizeof(*level_list),
GFP_KERNEL);
if (!level_list) {
ret = -ENOMEM;
goto release_kobj;
}
/*
* Skip enable/disable for WFI. cpuidle expects WFI to
* be available at all times.
*/
for (i = 1; i < lpm_cpu->nlevels; i++) {
level_list[i].exit_latency =
p->levels[i].pwr.exit_latency;
ret = create_lvl_avail_nodes(
lpm_cpu->levels[i].name,
cpu_kobj[cpu_idx],
&level_list[i],
(void *)lpm_cpu, cpu, true);
if (ret)
goto release_kobj;
}
cpu_level_available[cpu] = level_list;
cpu_idx++;
}
}
return ret;
release_kobj:
for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
kobject_put(cpu_kobj[i]);
return ret;
}
int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
{
int ret = 0;
struct lpm_cluster *child = NULL;
int i;
struct kobject *cluster_kobj = NULL;
if (!p)
return -ENODEV;
cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
if (!cluster_kobj)
return -ENOMEM;
for (i = 0; i < p->nlevels; i++) {
p->levels[i].available.exit_latency =
p->levels[i].pwr.exit_latency;
ret = create_lvl_avail_nodes(p->levels[i].level_name,
cluster_kobj, &p->levels[i].available,
(void *)p, 0, false);
if (ret)
return ret;
}
list_for_each_entry(child, &p->child, list) {
ret = create_cluster_lvl_nodes(child, cluster_kobj);
if (ret)
return ret;
}
if (!list_empty(&p->cpu))
ret = create_cpu_lvl_nodes(p, cluster_kobj);
return ret;
}
int lpm_cpu_mode_allow(unsigned int cpu,
unsigned int index, bool from_idle)
{
struct lpm_level_avail *avail = cpu_level_available[cpu];
if (lpm_pdev && !index)
return 1;
if (!lpm_pdev || !avail)
return !from_idle;
return !!(from_idle ? avail[index].idle_enabled :
avail[index].suspend_enabled);
}
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
unsigned int mode, bool from_idle)
{
struct lpm_level_avail *avail = &cluster->levels[mode].available;
if (!lpm_pdev || !avail)
return false;
return !!(from_idle ? avail->idle_enabled :
avail->suspend_enabled);
}
static int parse_cluster_params(struct device_node *dn, struct lpm_cluster *c)
{
int ret;
ret = of_property_read_string(dn, "label", &c->cluster_name);
if (ret) {
pr_err("Failed to read label ret: %d\n", ret);
return ret;
}
ret = lpm_of_read_u32(dn, "qcom,psci-mode-shift",
&c->psci_mode_shift, true);
if (ret)
return ret;
ret = lpm_of_read_u32(dn, "qcom,psci-mode-mask",
&c->psci_mode_mask, true);
if (ret)
return ret;
c->lpm_prediction = !(of_property_read_bool(dn,
"qcom,disable-prediction"));
if (c->lpm_prediction) {
ret = lpm_of_read_u32(dn, "qcom,clstr-tmr-add", &c->tmr_add,
false);
if (ret || c->tmr_add < TIMER_ADD_LOW ||
c->tmr_add > TIMER_ADD_HIGH) {
c->tmr_add = DEFAULT_TIMER_ADD;
ret = 0;
}
}
/* Set default_level to 0 as default */
c->default_level = 0;
return ret;
}
static int parse_power_params(struct device_node *dn, struct power_params *pwr)
{
int ret;
ret = lpm_of_read_u32(dn, "qcom,entry-latency-us",
&pwr->entry_latency, true);
if (ret)
return ret;
ret = lpm_of_read_u32(dn, "qcom,exit-latency-us",
&pwr->exit_latency, true);
if (ret)
return ret;
ret = lpm_of_read_u32(dn, "qcom,min-residency-us",
&pwr->min_residency, true);
return ret;
}
static int parse_cluster_level(struct device_node *dn,
struct lpm_cluster *cluster)
{
struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
int ret = -ENOMEM;
ret = of_property_read_string(dn, "label", &level->level_name);
if (ret) {
pr_err("Failed to read label ret: %d\n", ret);
return ret;
}
ret = lpm_of_read_u32(dn, "qcom,psci-mode", &level->psci_id, true);
if (ret)
return ret;
level->is_reset = of_property_read_bool(dn, "qcom,is-reset");
if (cluster->nlevels != cluster->default_level) {
ret = lpm_of_read_u32(dn, "qcom,min-child-idx",
&level->min_child_level, true);
if (ret)
return ret;
if (cluster->min_child_level > level->min_child_level)
cluster->min_child_level = level->min_child_level;
}
level->notify_rpm = of_property_read_bool(dn, "qcom,notify-rpm");
ret = parse_power_params(dn, &level->pwr);
if (ret) {
pr_err("Failed to parse power params ret:%d\n", ret);
return ret;
}
cluster->nlevels++;
return 0;
}
static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
{
int ret;
ret = of_property_read_string(n, "label", &l->name);
if (ret) {
pr_err("Failed to read label level: %s\n", l->name);
return ret;
}
return lpm_of_read_u32(n, "qcom,psci-cpu-mode", &l->psci_id, true);
}
static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
{
struct device_node *cpu_node;
int cpu;
int idx = 0;
cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
if (!cpu_node) {
pr_info("%s: No CPU phandle, assuming single cluster\n",
node->full_name);
/*
* Not all targets have the cpu node populated in the device
* tree. If cpu node is not populated assume all possible
* nodes belong to this cluster
*/
cpumask_copy(mask, cpu_possible_mask);
return 0;
}
while (cpu_node) {
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
cpumask_set_cpu(cpu, mask);
break;
}
}
of_node_put(cpu_node);
cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
}
return 0;
}
static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
{
struct device_node *n;
int ret, i;
for_each_child_of_node(node, n) {
struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
cpu->nlevels++;
ret = parse_cpu_mode(n, l);
if (ret) {
of_node_put(n);
return ret;
}
ret = parse_power_params(n, &l->pwr);
if (ret) {
of_node_put(n);
return ret;
}
l->use_bc_timer = of_property_read_bool(n,
"qcom,use-broadcast-timer");
l->is_reset = of_property_read_bool(n, "qcom,is-reset");
}
for (i = 1; i < cpu->nlevels; i++)
cpu->levels[i-1].pwr.max_residency =
cpu->levels[i].pwr.min_residency - 1;
cpu->levels[i-1].pwr.max_residency = UINT_MAX;
return 0;
}
static int parse_cpu_levels(struct device_node *dn, struct lpm_cluster *c)
{
int ret;
struct lpm_cpu *cpu;
cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
if (!cpu)
return -ENOMEM;
if (get_cpumask_for_node(dn, &cpu->related_cpus))
return -EINVAL;
cpu->parent = c;
ret = lpm_of_read_u32(dn, "qcom,psci-mode-shift",
&cpu->psci_mode_shift, true);
if (ret)
return ret;
ret = lpm_of_read_u32(dn, "qcom,psci-mode-mask",
&cpu->psci_mode_mask, true);
if (ret)
return ret;
cpu->lpm_prediction = !(of_property_read_bool(dn,
"qcom,disable-prediction"));
if (cpu->lpm_prediction) {
ret = lpm_of_read_u32(dn, "qcom,ref-stddev",
&cpu->ref_stddev, false);
if (ret || cpu->ref_stddev < STDDEV_LOW ||
cpu->ref_stddev > STDDEV_HIGH)
cpu->ref_stddev = DEFAULT_STDDEV;
ret = lpm_of_read_u32(dn, "qcom,tmr-add",
&cpu->tmr_add, false);
if (ret || cpu->tmr_add < TIMER_ADD_LOW ||
cpu->tmr_add > TIMER_ADD_HIGH)
cpu->tmr_add = DEFAULT_TIMER_ADD;
ret = lpm_of_read_u32(dn, "qcom,ref-premature-cnt",
&cpu->ref_premature_cnt, false);
if (ret || cpu->ref_premature_cnt < PREMATURE_CNT_LOW ||
cpu->ref_premature_cnt > PREMATURE_CNT_HIGH)
cpu->ref_premature_cnt = DEFAULT_PREMATURE_CNT;
}
ret = parse_cpu(dn, cpu);
if (ret) {
pr_err("Failed to parse cpu %s\n", dn->name);
return ret;
}
cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
list_add(&cpu->list, &c->cpu);
return ret;
}
void free_cluster_node(struct lpm_cluster *cluster)
{
struct lpm_cpu *cpu, *n;
struct lpm_cluster *cl, *m;
list_for_each_entry_safe(cl, m, &cluster->child, list) {
list_del(&cl->list);
free_cluster_node(cl);
}
list_for_each_entry_safe(cpu, n, &cluster->cpu, list)
list_del(&cpu->list);
}
/*
* TODO:
* Expects a CPU or a cluster only. This ensures that affinity
* level of a cluster is consistent with reference to its
* child nodes.
*/
struct lpm_cluster *parse_cluster(struct device_node *node,
struct lpm_cluster *parent)
{
struct lpm_cluster *c;
struct device_node *n;
int ret = 0, i;
c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
ret = parse_cluster_params(node, c);
if (ret)
return NULL;
INIT_LIST_HEAD(&c->child);
INIT_LIST_HEAD(&c->cpu);
c->parent = parent;
spin_lock_init(&c->sync_lock);
c->min_child_level = NR_LPM_LEVELS;
for_each_child_of_node(node, n) {
if (!n->name)
continue;
if (!of_node_cmp(n->name, "qcom,pm-cluster-level")) {
if (parse_cluster_level(n, c)) {
pr_err("Failed parse pm-cluster-level\n");
goto failed_parse_cluster;
}
} else if (!of_node_cmp(n->name, "qcom,pm-cluster")) {
struct lpm_cluster *child;
child = parse_cluster(n, c);
if (!child) {
pr_err("Failed parse pm-cluster\n");
goto failed_parse_cluster;
}
list_add(&child->list, &c->child);
cpumask_or(&c->child_cpus, &c->child_cpus,
&child->child_cpus);
c->aff_level = child->aff_level + 1;
} else if (!of_node_cmp(n->name, "qcom,pm-cpu")) {
if (parse_cpu_levels(n, c)) {
pr_err("Failed parse pm-cpu\n");
goto failed_parse_cluster;
}
c->aff_level = 1;
}
}
if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
c->last_level = c->default_level;
else
c->last_level = c->nlevels-1;
for (i = 1; i < c->nlevels; i++)
c->levels[i-1].pwr.max_residency =
c->levels[i].pwr.min_residency - 1;
c->levels[i-1].pwr.max_residency = UINT_MAX;
return c;
failed_parse_cluster:
of_node_put(n);
if (parent)
list_del(&c->list);
free_cluster_node(c);
return NULL;
}
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
{
struct device_node *top = NULL;
struct lpm_cluster *c;
top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
if (!top) {
pr_err("Failed to find root node\n");
return ERR_PTR(-ENODEV);
}
lpm_pdev = pdev;
c = parse_cluster(top, NULL);
of_node_put(top);
return c;
}

1593
drivers/cpuidle/lpm-levels.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,130 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __LPM_LEVELS_H__
#define __LPM_LEVELS_H__
#include <soc/qcom/pm.h>
#define NR_LPM_LEVELS 8
#define MAXSAMPLES 5
#define CLUST_SMPL_INVLD_TIME 40000
#define DEFAULT_PREMATURE_CNT 3
#define DEFAULT_STDDEV 100
#define DEFAULT_TIMER_ADD 100
#define TIMER_ADD_LOW 100
#define TIMER_ADD_HIGH 1500
#define STDDEV_LOW 100
#define STDDEV_HIGH 1000
#define PREMATURE_CNT_LOW 1
#define PREMATURE_CNT_HIGH 5
struct power_params {
uint32_t entry_latency; /* Entry latency */
uint32_t exit_latency; /* Exit latency */
uint32_t min_residency;
uint32_t max_residency;
};
struct lpm_cpu_level {
const char *name;
bool use_bc_timer;
struct power_params pwr;
unsigned int psci_id;
bool is_reset;
};
struct lpm_cpu {
struct list_head list;
struct cpumask related_cpus;
struct lpm_cpu_level levels[NR_LPM_LEVELS];
int nlevels;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
uint32_t ref_stddev;
uint32_t ref_premature_cnt;
uint32_t tmr_add;
bool lpm_prediction;
struct cpuidle_driver *drv;
struct lpm_cluster *parent;
ktime_t next_hrtimer;
};
struct lpm_level_avail {
bool idle_enabled;
bool suspend_enabled;
uint32_t exit_latency;
struct kobject *kobj;
struct kobj_attribute idle_enabled_attr;
struct kobj_attribute suspend_enabled_attr;
struct kobj_attribute latency_attr;
void *data;
int idx;
bool cpu_node;
};
struct lpm_cluster_level {
const char *level_name;
int min_child_level;
struct cpumask num_cpu_votes;
struct power_params pwr;
bool notify_rpm;
bool sync_level;
struct lpm_level_avail available;
unsigned int psci_id;
bool is_reset;
};
struct cluster_history {
uint32_t resi[MAXSAMPLES];
int mode[MAXSAMPLES];
int64_t stime[MAXSAMPLES];
uint32_t hptr;
uint32_t hinvalid;
uint32_t htmr_wkup;
uint64_t entry_time;
int entry_idx;
int nsamp;
int flag;
};
struct lpm_cluster {
struct list_head list;
struct list_head child;
const char *cluster_name;
unsigned long aff_level; /* Affinity level of the node */
struct lpm_cluster_level levels[NR_LPM_LEVELS];
int nlevels;
int min_child_level;
int default_level;
int last_level;
uint32_t tmr_add;
bool lpm_prediction;
struct list_head cpu;
spinlock_t sync_lock;
struct cpumask child_cpus;
struct cpumask num_children_in_sync;
struct lpm_cluster *parent;
struct lpm_stats *stats;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
struct cluster_history history;
struct hrtimer histtimer;
};
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
void free_cluster_node(struct lpm_cluster *cluster);
void cluster_dt_walkthrough(struct lpm_cluster *cluster);
int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
int lpm_cpu_mode_allow(unsigned int cpu,
unsigned int mode, bool from_idle);
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
unsigned int mode, bool from_idle);
uint32_t *get_per_cpu_max_residency(int cpu);
uint32_t *get_per_cpu_min_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;
#endif /* __LPM_LEVELS_H__ */

View File

@ -439,6 +439,58 @@ config QGKI
This flag is absent if the build is a GKI build. If this flag is
enabled, all the ABI compatibilities are not applicable.
config MSM_PM
bool
select MSM_IDLE_STATS if DEBUG_FS
select QTI_SYSTEM_PM if QCOM_RPMH
config QTI_SYSTEM_PM
bool
if MSM_PM
menuconfig MSM_IDLE_STATS
bool "Collect idle statistics"
help
Collect cores various low power mode idle statistics
and export them in proc/msm_pm_stats. User can read
this data and determine what low power modes and how
many times cores have entered into LPM modes.
if MSM_IDLE_STATS
config MSM_IDLE_STATS_FIRST_BUCKET
int "First bucket time"
default 62500
help
Upper time limit in nanoseconds of first bucket.
config MSM_IDLE_STATS_BUCKET_SHIFT
int "Bucket shift"
default 2
config MSM_IDLE_STATS_BUCKET_COUNT
int "Bucket count"
default 10
config MSM_SUSPEND_STATS_FIRST_BUCKET
int "First bucket time for suspend"
default 1000000000
help
Upper time limit in nanoseconds of first bucket of the
histogram. This is for collecting statistics on suspend.
endif # MSM_IDLE_STATS
endif # MSM_PM
config QTI_RPM_STATS_LOG
tristate "Qualcomm Technologies RPM Stats Driver"
depends on QCOM_RPMH
help
This option enables a driver which reads RPM messages from a shared
memory location. These messages provide statistical information about
the low power modes that RPM enters. The drivers outputs the message
via a sysfs node.
config QTI_DDR_STATS_LOG
tristate "Qualcomm Technologies Inc (QTI) DDR Stats Driver"
depends on QCOM_RPMH
@ -457,5 +509,4 @@ config QCOM_SOC_SLEEP_STATS
the shared memory exported by the remote processor related to
various SoC level low power modes statistics and export to sysfs
interface.
endmenu

View File

@ -49,3 +49,6 @@ obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
obj-$(CONFIG_MSM_GLINK_SSR) += msm_glink_ssr.o
obj-$(CONFIG_QTI_DDR_STATS_LOG) += ddr_stats.o
obj-$(CONFIG_QTI_SYSTEM_PM) += system_pm.o
obj-$(CONFIG_MSM_IDLE_STATS) += lpm-stats.o
obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o

View File

@ -0,0 +1,819 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/suspend.h>
#include <soc/qcom/pm.h>
#include <soc/qcom/lpm-stats.h>
#define MAX_STR_LEN 256
#define MAX_TIME_LEN 20
char *lpm_stats_reset = "reset";
char *lpm_stats_suspend = "suspend";
struct lpm_sleep_time {
struct kobj_attribute ts_attr;
unsigned int cpu;
};
struct level_stats {
const char *name;
struct lpm_stats *owner;
int64_t first_bucket_time;
int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int success_count;
int failed_count;
uint64_t total_time;
uint64_t enter_time;
};
static struct level_stats suspend_time_stats;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct lpm_stats, cpu_stats);
bool str_is_reset(const char __user *in, size_t count)
{
loff_t ppos = 0;
char buffer[64] = { 0 };
int ret = simple_write_to_buffer(buffer, sizeof(buffer) - 1,
&ppos, in, count - 1);
if (ret > 0)
return strcmp(buffer, lpm_stats_reset) ? false : true;
return false;
}
static uint64_t get_total_sleep_time(unsigned int cpu_id)
{
struct lpm_stats *stats = &per_cpu(cpu_stats, cpu_id);
int i;
uint64_t ret = 0;
for (i = 0; i < stats->num_levels; i++)
ret += stats->time_stats[i].total_time;
return ret;
}
static void update_level_stats(struct level_stats *stats, uint64_t t,
bool success)
{
uint64_t bt;
int i;
if (!success) {
stats->failed_count++;
return;
}
stats->success_count++;
stats->total_time += t;
bt = t;
do_div(bt, stats->first_bucket_time);
if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
(CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
i = DIV_ROUND_UP(fls((uint32_t)bt),
CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
else
i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
stats->bucket[i]++;
if (t < stats->min_time[i] || !stats->max_time[i])
stats->min_time[i] = t;
if (t > stats->max_time[i])
stats->max_time[i] = t;
}
static void level_stats_print(struct seq_file *m, struct level_stats *stats)
{
int i = 0;
int64_t bucket_time = 0;
uint64_t s = stats->total_time;
uint32_t ns = do_div(s, NSEC_PER_SEC);
seq_printf(m, "[%s] %s:\n success count: %7d\n"
"total success time: %lld.%09u\n",
stats->owner->name, stats->name, stats->success_count, s, ns);
if (stats->failed_count)
seq_printf(m, " failed count: %7d\n", stats->failed_count);
bucket_time = stats->first_bucket_time;
for (i = 0;
i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
i++) {
s = bucket_time;
ns = do_div(s, NSEC_PER_SEC);
seq_printf(m, "\t<%6lld.%09u: %7d (%lld-%lld)\n",
s, ns, stats->bucket[i],
stats->min_time[i],
stats->max_time[i]);
bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
}
seq_printf(m, "\t>=%5lld.%09u:%8d (%lld-%lld)\n",
s, ns, stats->bucket[i],
stats->min_time[i],
stats->max_time[i]);
}
static int level_stats_file_show(struct seq_file *m, void *v)
{
struct level_stats *stats = (struct level_stats *) m->private;
level_stats_print(m, stats);
return 0;
}
static int level_stats_file_open(struct inode *inode, struct file *file)
{
return single_open(file, level_stats_file_show, inode->i_private);
}
static void level_stats_print_all(struct seq_file *m, struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
int i = 0;
for (i = 0; i < stats->num_levels; i++)
level_stats_print(m, &stats->time_stats[i]);
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
level_stats_print_all(m, pos);
}
}
static void level_stats_reset(struct level_stats *stats)
{
memset(stats->bucket, 0, sizeof(stats->bucket));
memset(stats->min_time, 0, sizeof(stats->min_time));
memset(stats->max_time, 0, sizeof(stats->max_time));
stats->success_count = 0;
stats->failed_count = 0;
stats->total_time = 0;
}
static void level_stats_reset_all(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
int i = 0;
for (i = 0; i < stats->num_levels; i++)
level_stats_reset(&stats->time_stats[i]);
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
level_stats_reset_all(pos);
}
}
static int lpm_stats_file_show(struct seq_file *m, void *v)
{
struct lpm_stats *stats = (struct lpm_stats *)m->private;
level_stats_print_all(m, stats);
level_stats_print(m, &suspend_time_stats);
return 0;
}
static int lpm_stats_file_open(struct inode *inode, struct file *file)
{
return single_open(file, lpm_stats_file_show, inode->i_private);
}
static ssize_t level_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
struct inode *in = file->f_inode;
struct level_stats *stats = (struct level_stats *)in->i_private;
if (!str_is_reset(buffer, count))
return -EINVAL;
level_stats_reset(stats);
return count;
}
static void reset_cpu_stats(void *info)
{
struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
int i;
for (i = 0; i < stats->num_levels; i++)
level_stats_reset(&stats->time_stats[i]);
}
static ssize_t lpm_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
struct inode *in = file->f_inode;
struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
if (!str_is_reset(buffer, count))
return -EINVAL;
level_stats_reset_all(stats);
/*
* Wake up each CPU and reset the stats from that CPU,
* for that CPU, so we could have better timestamp for
* accounting.
*/
on_each_cpu(reset_cpu_stats, NULL, 1);
return count;
}
int lifo_stats_file_show(struct seq_file *m, void *v)
{
struct lpm_stats *stats = (struct lpm_stats *)m->private;
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
if (list_empty(&stats->child)) {
pr_err("%s: ERROR: Lifo level with no children\n",
__func__);
return -EINVAL;
}
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
seq_printf(m, "%s:\n\tLast-In:%u\n\tFirst-Out:%u\n",
pos->name,
pos->lifo.last_in,
pos->lifo.first_out);
}
return 0;
}
static int lifo_stats_file_open(struct inode *inode, struct file *file)
{
return single_open(file, lifo_stats_file_show, inode->i_private);
}
static void lifo_stats_reset_all(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
pos->lifo.last_in = 0;
pos->lifo.first_out = 0;
if (!list_empty(&pos->child))
lifo_stats_reset_all(pos);
}
}
static ssize_t lifo_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
struct inode *in = file->f_inode;
struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
if (!str_is_reset(buffer, count))
return -EINVAL;
lifo_stats_reset_all(stats);
return count;
}
static const struct file_operations level_stats_fops = {
.owner = THIS_MODULE,
.open = level_stats_file_open,
.read = seq_read,
.release = single_release,
.llseek = no_llseek,
.write = level_stats_file_write,
};
static const struct file_operations lpm_stats_fops = {
.owner = THIS_MODULE,
.open = lpm_stats_file_open,
.read = seq_read,
.release = single_release,
.llseek = no_llseek,
.write = lpm_stats_file_write,
};
static const struct file_operations lifo_stats_fops = {
.owner = THIS_MODULE,
.open = lifo_stats_file_open,
.read = seq_read,
.release = single_release,
.llseek = no_llseek,
.write = lifo_stats_file_write,
};
static void update_last_in_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
pos->lifo.last_in++;
return;
}
}
}
static void update_first_out_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
if (list_empty(&stats->child))
return;
centry = &stats->child;
list_for_each_entry(pos, centry, sibling) {
if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
pos->lifo.first_out++;
return;
}
}
}
static inline void update_exit_stats(struct lpm_stats *stats, uint32_t index,
bool success)
{
uint64_t exit_time = 0;
/* Update time stats only when exit is preceded by enter */
if (stats->sleep_time < 0)
success = false;
else
exit_time = stats->sleep_time;
update_level_stats(&stats->time_stats[index], exit_time,
success);
}
static int config_level(const char *name, const char **levels,
int num_levels, struct lpm_stats *parent, struct lpm_stats *stats)
{
int i = 0;
struct dentry *directory = NULL;
const char *rootname = "lpm_stats";
const char *dirname = rootname;
strlcpy(stats->name, name, MAX_STR_LEN);
stats->num_levels = num_levels;
stats->parent = parent;
INIT_LIST_HEAD(&stats->sibling);
INIT_LIST_HEAD(&stats->child);
stats->time_stats = kcalloc(num_levels, sizeof(*stats->time_stats),
GFP_KERNEL);
if (!stats->time_stats)
return -ENOMEM;
if (parent) {
list_add_tail(&stats->sibling, &parent->child);
directory = parent->directory;
dirname = name;
}
stats->directory = debugfs_create_dir(dirname, directory);
if (!stats->directory) {
pr_err("%s: Unable to create %s debugfs directory\n",
__func__, dirname);
kfree(stats->time_stats);
return -EPERM;
}
for (i = 0; i < num_levels; i++) {
stats->time_stats[i].name = levels[i];
stats->time_stats[i].owner = stats;
stats->time_stats[i].first_bucket_time =
CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
stats->time_stats[i].enter_time = 0;
if (!debugfs_create_file(stats->time_stats[i].name, 0444,
stats->directory, (void *)&stats->time_stats[i],
&level_stats_fops)) {
pr_err("%s: Unable to create %s %s level-stats file\n",
__func__, stats->name,
stats->time_stats[i].name);
kfree(stats->time_stats);
return -EPERM;
}
}
if (!debugfs_create_file("stats", 0444, stats->directory,
(void *)stats, &lpm_stats_fops)) {
pr_err("%s: Unable to create %s's overall 'stats' file\n",
__func__, stats->name);
kfree(stats->time_stats);
return -EPERM;
}
return 0;
}
static ssize_t total_sleep_time_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct lpm_sleep_time *cpu_sleep_time = container_of(attr,
struct lpm_sleep_time, ts_attr);
unsigned int cpu = cpu_sleep_time->cpu;
uint64_t total_time = get_total_sleep_time(cpu);
return scnprintf(buf, MAX_TIME_LEN, "%llu.%09u\n", total_time,
do_div(total_time, NSEC_PER_SEC));
}
static struct kobject *local_module_kobject(void)
{
struct kobject *kobj;
kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!kobj) {
int err;
struct module_kobject *mk;
mk = kcalloc(1, sizeof(*mk), GFP_KERNEL);
if (!mk)
return ERR_PTR(-ENOMEM);
mk->mod = THIS_MODULE;
mk->kobj.kset = module_kset;
err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
"%s", KBUILD_MODNAME);
if (err) {
kobject_put(&mk->kobj);
kfree(mk);
pr_err("%s: cannot create kobject for %s\n",
__func__, KBUILD_MODNAME);
return ERR_PTR(err);
}
kobject_get(&mk->kobj);
kobj = &mk->kobj;
}
return kobj;
}
static int create_sysfs_node(unsigned int cpu, struct lpm_stats *stats)
{
struct kobject *cpu_kobj = NULL;
struct lpm_sleep_time *ts = NULL;
struct kobject *stats_kobj;
char cpu_name[10] = { 0 };
int ret = -ENOMEM;
stats_kobj = local_module_kobject();
if (IS_ERR_OR_NULL(stats_kobj))
return PTR_ERR(stats_kobj);
snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
cpu_kobj = kobject_create_and_add(cpu_name, stats_kobj);
if (!cpu_kobj)
return -ENOMEM;
ts = kcalloc(1, sizeof(*ts), GFP_KERNEL);
if (!ts)
goto failed;
sysfs_attr_init(&ts->ts_attr.attr);
ts->ts_attr.attr.name = "total_sleep_time_secs";
ts->ts_attr.attr.mode = 0444;
ts->ts_attr.show = total_sleep_time_show;
ts->ts_attr.store = NULL;
ts->cpu = cpu;
ret = sysfs_create_file(cpu_kobj, &ts->ts_attr.attr);
if (ret)
goto failed;
return 0;
failed:
kfree(ts);
kobject_put(cpu_kobj);
return ret;
}
static struct lpm_stats *config_cpu_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask)
{
int cpu = 0;
struct lpm_stats *pstats = NULL;
struct lpm_stats *stats = NULL;
for (pstats = parent; pstats; pstats = pstats->parent)
cpumask_or(&pstats->mask, &pstats->mask, mask);
for_each_cpu(cpu, mask) {
int ret = 0;
char cpu_name[16] = { 0 };
stats = &per_cpu(cpu_stats, cpu);
snprintf(cpu_name, sizeof(cpu_name), "%s%d", name, cpu);
cpumask_set_cpu(cpu, &stats->mask);
stats->is_cpu = true;
ret = config_level(cpu_name, levels, num_levels, parent,
stats);
if (ret) {
pr_err("%s: Unable to create %s stats\n",
__func__, cpu_name);
return ERR_PTR(ret);
}
ret = create_sysfs_node(cpu, stats);
if (ret) {
pr_err("Could not create the sysfs node\n");
return ERR_PTR(ret);
}
}
return stats;
}
static void config_suspend_level(struct lpm_stats *stats)
{
suspend_time_stats.name = lpm_stats_suspend;
suspend_time_stats.owner = stats;
suspend_time_stats.first_bucket_time =
CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
suspend_time_stats.enter_time = 0;
suspend_time_stats.success_count = 0;
suspend_time_stats.failed_count = 0;
if (!debugfs_create_file(suspend_time_stats.name, 0444,
stats->directory, (void *)&suspend_time_stats,
&level_stats_fops))
pr_err("%s: Unable to create %s Suspend stats file\n",
__func__, stats->name);
}
static struct lpm_stats *config_cluster_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent)
{
struct lpm_stats *stats = NULL;
int ret = 0;
stats = kcalloc(1, sizeof(*stats), GFP_KERNEL);
if (!stats)
return ERR_PTR(-ENOMEM);
stats->is_cpu = false;
ret = config_level(name, levels, num_levels, parent, stats);
if (ret) {
pr_err("%s: Unable to create %s stats\n", __func__,
name);
kfree(stats);
return ERR_PTR(ret);
}
if (!debugfs_create_file("lifo", 0444, stats->directory,
(void *)stats, &lifo_stats_fops)) {
pr_err("%s: Unable to create %s lifo stats file\n",
__func__, stats->name);
kfree(stats);
return ERR_PTR(-EPERM);
}
if (!parent)
config_suspend_level(stats);
return stats;
}
static void cleanup_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
struct lpm_stats *n = NULL;
centry = &stats->child;
list_for_each_entry_safe_reverse(pos, n, centry, sibling) {
if (!list_empty(&pos->child)) {
cleanup_stats(pos);
continue;
}
list_del_init(&pos->child);
kfree(pos->time_stats);
if (!pos->is_cpu)
kfree(pos);
}
kfree(stats->time_stats);
kfree(stats);
}
static void lpm_stats_cleanup(struct lpm_stats *stats)
{
struct lpm_stats *pstats = stats;
if (!pstats)
return;
while (pstats->parent)
pstats = pstats->parent;
debugfs_remove_recursive(pstats->directory);
cleanup_stats(pstats);
}
/**
* lpm_stats_config_level() - API to configure levels stats.
*
* @name: Name of the cluster/cpu.
* @levels: Low power mode level names.
* @num_levels: Number of leves supported.
* @parent: Pointer to the parent's lpm_stats object.
* @mask: cpumask, if configuring cpu stats, else NULL.
*
* Function to communicate the low power mode levels supported by
* cpus or a cluster.
*
* Return: Pointer to the lpm_stats object or ERR_PTR(-ERRNO)
*/
struct lpm_stats *lpm_stats_config_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask)
{
struct lpm_stats *stats = NULL;
if (!levels || num_levels <= 0 || IS_ERR(parent)) {
pr_err("%s: Invalid input\n", __func__);
return ERR_PTR(-EINVAL);
}
if (mask)
stats = config_cpu_level(name, levels, num_levels, parent,
mask);
else
stats = config_cluster_level(name, levels, num_levels,
parent);
if (IS_ERR(stats)) {
lpm_stats_cleanup(parent);
return stats;
}
return stats;
}
EXPORT_SYMBOL(lpm_stats_config_level);
/**
* lpm_stats_cluster_enter() - API to communicate the lpm level a cluster
* is prepared to enter.
*
* @stats: Pointer to the cluster's lpm_stats object.
* @index: Index of the lpm level that the cluster is going to enter.
*
* Function to communicate the low power mode level that the cluster is
* prepared to enter.
*/
void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index)
{
if (IS_ERR_OR_NULL(stats))
return;
update_last_in_stats(stats);
}
EXPORT_SYMBOL(lpm_stats_cluster_enter);
/**
* lpm_stats_cluster_exit() - API to communicate the lpm level a cluster
* exited.
*
* @stats: Pointer to the cluster's lpm_stats object.
* @index: Index of the cluster lpm level.
* @success: Success/Failure of the low power mode execution.
*
* Function to communicate the low power mode level that the cluster
* exited.
*/
void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
bool success)
{
if (IS_ERR_OR_NULL(stats))
return;
update_exit_stats(stats, index, success);
update_first_out_stats(stats);
}
EXPORT_SYMBOL(lpm_stats_cluster_exit);
/**
* lpm_stats_cpu_enter() - API to communicate the lpm level a cpu
* is prepared to enter.
*
* @index: cpu's lpm level index.
*
* Function to communicate the low power mode level that the cpu is
* prepared to enter.
*/
void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
{
struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
stats->sleep_time = time;
if (!stats->time_stats)
return;
}
EXPORT_SYMBOL(lpm_stats_cpu_enter);
/**
* lpm_stats_cpu_exit() - API to communicate the lpm level that the cpu exited.
*
* @index: cpu's lpm level index.
* @success: Success/Failure of the low power mode execution.
*
* Function to communicate the low power mode level that the cpu exited.
*/
void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success)
{
struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
if (!stats->time_stats)
return;
stats->sleep_time = time - stats->sleep_time;
update_exit_stats(stats, index, success);
}
EXPORT_SYMBOL(lpm_stats_cpu_exit);
/**
* lpm_stats_suspend_enter() - API to communicate system entering suspend.
*
* Function to communicate that the system is ready to enter suspend.
*/
void lpm_stats_suspend_enter(void)
{
struct timespec ts;
getnstimeofday(&ts);
suspend_time_stats.enter_time = timespec_to_ns(&ts);
}
EXPORT_SYMBOL(lpm_stats_suspend_enter);
/**
* lpm_stats_suspend_exit() - API to communicate system exiting suspend.
*
* Function to communicate that the system exited suspend.
*/
void lpm_stats_suspend_exit(void)
{
struct timespec ts;
uint64_t exit_time = 0;
getnstimeofday(&ts);
exit_time = timespec_to_ns(&ts) - suspend_time_stats.enter_time;
update_level_stats(&suspend_time_stats, exit_time, true);
}
EXPORT_SYMBOL(lpm_stats_suspend_exit);

View File

@ -0,0 +1,281 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/uaccess.h>
#include <linux/soc/qcom/smem.h>
#include <asm/arch_timer.h>
#include "rpmh_master_stat.h"
#define UNIT_DIST 0x14
#define REG_VALID 0x0
#define REG_DATA_LO 0x4
#define REG_DATA_HI 0x8
#define GET_ADDR(REG, UNIT_NO) (REG + (UNIT_DIST * UNIT_NO))
/**************** Remove this *********************************/
#define qcom_smem_get(a, b, c) c
/*************************************************************/
enum master_smem_id {
MPSS = 605,
ADSP,
CDSP,
SLPI,
GPU,
DISPLAY,
};
enum master_pid {
PID_APSS = 0,
PID_MPSS = 1,
PID_ADSP = 2,
PID_SLPI = 3,
PID_CDSP = 5,
PID_GPU = PID_APSS,
PID_DISPLAY = PID_APSS,
};
enum profile_data {
POWER_DOWN_START,
POWER_UP_END,
POWER_DOWN_END,
POWER_UP_START,
NUM_UNIT,
};
struct msm_rpmh_master_data {
char *master_name;
enum master_smem_id smem_id;
enum master_pid pid;
};
static const struct msm_rpmh_master_data rpmh_masters[] = {
{"MPSS", MPSS, PID_MPSS},
{"ADSP", ADSP, PID_ADSP},
{"CDSP", CDSP, PID_CDSP},
{"SLPI", SLPI, PID_SLPI},
{"GPU", GPU, PID_GPU},
{"DISPLAY", DISPLAY, PID_DISPLAY},
};
struct msm_rpmh_master_stats {
uint32_t version_id;
uint32_t counts;
uint64_t last_entered;
uint64_t last_exited;
uint64_t accumulated_duration;
};
struct msm_rpmh_profile_unit {
uint64_t value;
uint64_t valid;
};
struct rpmh_master_stats_prv_data {
struct kobj_attribute ka;
struct kobject *kobj;
};
static struct msm_rpmh_master_stats apss_master_stats;
static void __iomem *rpmh_unit_base;
static DEFINE_MUTEX(rpmh_stats_mutex);
static ssize_t msm_rpmh_master_stats_print_data(char *prvbuf, ssize_t length,
struct msm_rpmh_master_stats *record,
const char *name)
{
uint64_t accumulated_duration = record->accumulated_duration;
/*
* If a master is in sleep when reading the sleep stats from SMEM
* adjust the accumulated sleep duration to show actual sleep time.
* This ensures that the displayed stats are real when used for
* the purpose of computing battery utilization.
*/
if (record->last_entered > record->last_exited)
accumulated_duration +=
(__arch_counter_get_cntvct()
- record->last_entered);
return scnprintf(prvbuf, length, "%s\n\tVersion:0x%x\n"
"\tSleep Count:0x%x\n"
"\tSleep Last Entered At:0x%llx\n"
"\tSleep Last Exited At:0x%llx\n"
"\tSleep Accumulated Duration:0x%llx\n\n",
name, record->version_id, record->counts,
record->last_entered, record->last_exited,
accumulated_duration);
}
static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t length;
int i = 0;
size_t size = 0;
struct msm_rpmh_master_stats *record = NULL;
mutex_lock(&rpmh_stats_mutex);
/* First Read APSS master stats */
length = msm_rpmh_master_stats_print_data(buf, PAGE_SIZE,
&apss_master_stats, "APSS");
/* Read SMEM data written by other masters */
for (i = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
record = (struct msm_rpmh_master_stats *) qcom_smem_get(
rpmh_masters[i].pid,
rpmh_masters[i].smem_id, &size);
if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
length += msm_rpmh_master_stats_print_data(
buf + length, PAGE_SIZE - length,
record,
rpmh_masters[i].master_name);
}
mutex_unlock(&rpmh_stats_mutex);
return length;
}
static inline void msm_rpmh_apss_master_stats_update(
struct msm_rpmh_profile_unit *profile_unit)
{
apss_master_stats.counts++;
apss_master_stats.last_entered = profile_unit[POWER_DOWN_END].value;
apss_master_stats.last_exited = profile_unit[POWER_UP_START].value;
apss_master_stats.accumulated_duration +=
(apss_master_stats.last_exited
- apss_master_stats.last_entered);
}
void msm_rpmh_master_stats_update(void)
{
int i;
struct msm_rpmh_profile_unit profile_unit[NUM_UNIT];
if (!rpmh_unit_base)
return;
for (i = POWER_DOWN_END; i < NUM_UNIT; i++) {
profile_unit[i].valid = readl_relaxed(rpmh_unit_base +
GET_ADDR(REG_VALID, i));
/*
* Do not update APSS stats if valid bit is not set.
* It means APSS did not execute cx-off sequence.
* This can be due to fall through at some point.
*/
if (!(profile_unit[i].valid & BIT(REG_VALID)))
return;
profile_unit[i].value = readl_relaxed(rpmh_unit_base +
GET_ADDR(REG_DATA_LO, i));
profile_unit[i].value |= ((uint64_t)
readl_relaxed(rpmh_unit_base +
GET_ADDR(REG_DATA_HI, i)) << 32);
}
msm_rpmh_apss_master_stats_update(profile_unit);
}
EXPORT_SYMBOL(msm_rpmh_master_stats_update);
static int msm_rpmh_master_stats_probe(struct platform_device *pdev)
{
struct rpmh_master_stats_prv_data *prvdata = NULL;
struct kobject *rpmh_master_stats_kobj = NULL;
int ret = -ENOMEM;
prvdata = devm_kzalloc(&pdev->dev, sizeof(*prvdata), GFP_KERNEL);
if (!prvdata)
return ret;
rpmh_master_stats_kobj = kobject_create_and_add(
"rpmh_stats",
power_kobj);
if (!rpmh_master_stats_kobj)
return ret;
prvdata->kobj = rpmh_master_stats_kobj;
sysfs_attr_init(&prvdata->ka.attr);
prvdata->ka.attr.mode = 0444;
prvdata->ka.attr.name = "master_stats";
prvdata->ka.show = msm_rpmh_master_stats_show;
prvdata->ka.store = NULL;
ret = sysfs_create_file(prvdata->kobj, &prvdata->ka.attr);
if (ret) {
pr_err("sysfs_create_file failed\n");
goto fail_sysfs;
}
rpmh_unit_base = of_iomap(pdev->dev.of_node, 0);
if (!rpmh_unit_base) {
pr_err("Failed to get rpmh_unit_base\n");
ret = -ENOMEM;
goto fail_iomap;
}
apss_master_stats.version_id = 0x1;
platform_set_drvdata(pdev, prvdata);
return ret;
fail_iomap:
sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
fail_sysfs:
kobject_put(prvdata->kobj);
return ret;
}
static int msm_rpmh_master_stats_remove(struct platform_device *pdev)
{
struct rpmh_master_stats_prv_data *prvdata;
prvdata = (struct rpmh_master_stats_prv_data *)
platform_get_drvdata(pdev);
sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
kobject_put(prvdata->kobj);
platform_set_drvdata(pdev, NULL);
iounmap(rpmh_unit_base);
rpmh_unit_base = NULL;
return 0;
}
static const struct of_device_id rpmh_master_table[] = {
{.compatible = "qcom,rpmh-master-stats-v1"},
{},
};
static struct platform_driver msm_rpmh_master_stats_driver = {
.probe = msm_rpmh_master_stats_probe,
.remove = msm_rpmh_master_stats_remove,
.driver = {
.name = "msm_rpmh_master_stats",
.of_match_table = rpmh_master_table,
},
};
module_platform_driver(msm_rpmh_master_stats_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM RPMH Master Statistics driver");
MODULE_ALIAS("platform:msm_rpmh_master_stat_log");

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_RPM_STATS_LOG_H__
#define __QCOM_RPM_STATS_LOG_H__
#if defined(CONFIG_QTI_RPM_STATS_LOG)
void msm_rpmh_master_stats_update(void);
#else
static inline void msm_rpmh_master_stats_update(void) {}
#endif
#endif /* __QCOM_RPM_STATS_LOG_H__ */

View File

@ -0,0 +1,96 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <soc/qcom/rpmh.h>
#include <clocksource/arm_arch_timer.h>
#include <soc/qcom/lpm_levels.h>
#include "rpmh_master_stat.h"
#define PDC_TIME_VALID_SHIFT 31
#define PDC_TIME_UPPER_MASK 0xFFFFFF
static struct device *dev;
static int setup_wakeup(uint32_t lo, uint32_t hi)
{
struct tcs_cmd cmd[2] = { { 0 } };
cmd[0].data = hi & PDC_TIME_UPPER_MASK;
cmd[0].data |= 1 << PDC_TIME_VALID_SHIFT;
cmd[1].data = lo;
return rpmh_write_pdc_data(dev, cmd, ARRAY_SIZE(cmd));
}
static int system_sleep_update_wakeup(bool from_idle)
{
uint32_t lo = ~0U, hi = ~0U;
/* Read the hardware to get the most accurate value */
arch_timer_mem_get_cval(&lo, &hi);
return setup_wakeup(lo, hi);
}
/**
* system_sleep_allowed() - Returns if its okay to enter system low power modes
*/
static bool system_sleep_allowed(void)
{
return rpmh_ctrlr_idle(dev);
}
/**
* system_sleep_enter() - Activties done when entering system low power modes
*
* Returns 0 for success or error values from writing the sleep/wake values to
* the hardware block.
*/
static int system_sleep_enter(struct cpumask *mask)
{
return rpmh_flush(dev);
}
/**
* system_sleep_exit() - Activities done when exiting system low power modes
*/
static void system_sleep_exit(bool success)
{
if (success)
msm_rpmh_master_stats_update();
}
static struct system_pm_ops pm_ops = {
.enter = system_sleep_enter,
.exit = system_sleep_exit,
.update_wakeup = system_sleep_update_wakeup,
.sleep_allowed = system_sleep_allowed,
};
static int sys_pm_probe(struct platform_device *pdev)
{
dev = &pdev->dev;
return register_system_pm_ops(&pm_ops);
}
static const struct of_device_id sys_pm_drv_match[] = {
{ .compatible = "qcom,system-pm", },
{ }
};
static struct platform_driver sys_pm_driver = {
.probe = sys_pm_probe,
.driver = {
.name = KBUILD_MODNAME,
.suppress_bind_attrs = true,
.of_match_table = sys_pm_drv_match,
},
};
builtin_platform_driver(sys_pm_driver);

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2015,2017,2019 The Linux Foundation. All rights reserved.
*/
#ifndef __ARCH_ARM_MACH_MSM_LPM_STATS_H
#define __ARCH_ARM_MACH_MSM_LPM_STATS_H
struct lpm_stats;
#define MAX_STR_LEN 256
struct lifo_stats {
uint32_t last_in;
uint32_t first_out;
};
struct lpm_stats {
char name[MAX_STR_LEN];
struct level_stats *time_stats;
uint32_t num_levels;
struct lifo_stats lifo;
struct lpm_stats *parent;
struct list_head sibling;
struct list_head child;
struct cpumask mask;
struct dentry *directory;
int64_t sleep_time;
bool is_cpu;
};
#ifdef CONFIG_MSM_IDLE_STATS
struct lpm_stats *lpm_stats_config_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask);
void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index);
void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
bool success);
void lpm_stats_cpu_enter(uint32_t index, uint64_t time);
void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success);
void lpm_stats_suspend_enter(void);
void lpm_stats_suspend_exit(void);
#else
static inline struct lpm_stats *lpm_stats_config_level(const char *name,
const char **levels, int num_levels, struct lpm_stats *parent,
struct cpumask *mask)
{
return ERR_PTR(-ENODEV);
}
static inline void lpm_stats_cluster_enter(struct lpm_stats *stats,
uint32_t index)
{ }
static inline void lpm_stats_cluster_exit(struct lpm_stats *stats,
uint32_t index, bool success)
{ }
static inline void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
{ }
static inline void lpm_stats_cpu_exit(uint32_t index, bool success,
uint64_t time)
{ }
static inline void lpm_stats_suspend_enter(void)
{ }
static inline void lpm_stats_suspend_exit(void)
{ }
#endif
#endif /* __ARCH_ARM_MACH_MSM_LPM_STATS_H */

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SOC_QCOM_LPM_LEVEL_H__
#define __SOC_QCOM_LPM_LEVEL_H__
struct system_pm_ops {
int (*enter)(struct cpumask *mask);
void (*exit)(bool success);
int (*update_wakeup)(bool b);
bool (*sleep_allowed)(void);
};
#ifdef CONFIG_MSM_PM
uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops);
#else
static inline uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
{ return -ENODEV; }
#endif
#endif

31
include/soc/qcom/pm.h Normal file
View File

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
* Author: San Mehat <san@android.com>
*/
#ifndef __ARCH_ARM_MACH_MSM_PM_H
#define __ARCH_ARM_MACH_MSM_PM_H
#include <linux/types.h>
#include <linux/cpuidle.h>
struct latency_level {
int affinity_level;
int reset_level;
const char *level_name;
};
#ifdef CONFIG_MSM_PM
s32 msm_cpuidle_get_deep_idle_latency(void);
#else
static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
#endif
#endif /* __ARCH_ARM_MACH_MSM_PM_H */

View File

@ -0,0 +1,249 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM msm_low_power
#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MSM_LOW_POWER_H_
#include <linux/tracepoint.h>
TRACE_EVENT(cpu_power_select,
TP_PROTO(int index, u32 sleep_us, u32 latency, u32 next_event_us),
TP_ARGS(index, sleep_us, latency, next_event_us),
TP_STRUCT__entry(
__field(int, index)
__field(u32, sleep_us)
__field(u32, latency)
__field(u32, next_event_us)
),
TP_fast_assign(
__entry->index = index;
__entry->sleep_us = sleep_us;
__entry->latency = latency;
__entry->next_event_us = next_event_us;
),
TP_printk("idx:%d sleep_time:%u latency:%u next_event:%u",
__entry->index, __entry->sleep_us, __entry->latency,
__entry->next_event_us)
);
TRACE_EVENT(cpu_pred_select,
TP_PROTO(u32 predtype, u64 predicted, u32 tmr_time),
TP_ARGS(predtype, predicted, tmr_time),
TP_STRUCT__entry(
__field(u32, predtype)
__field(u64, predicted)
__field(u32, tmr_time)
),
TP_fast_assign(
__entry->predtype = predtype;
__entry->predicted = predicted;
__entry->tmr_time = tmr_time;
),
TP_printk("pred:%u time:%lu tmr_time:%u",
__entry->predtype, (unsigned long)__entry->predicted,
__entry->tmr_time)
);
TRACE_EVENT(cpu_pred_hist,
TP_PROTO(int idx, u32 resi, u32 sample, u32 tmr),
TP_ARGS(idx, resi, sample, tmr),
TP_STRUCT__entry(
__field(int, idx)
__field(u32, resi)
__field(u32, sample)
__field(u32, tmr)
),
TP_fast_assign(
__entry->idx = idx;
__entry->resi = resi;
__entry->sample = sample;
__entry->tmr = tmr;
),
TP_printk("idx:%d resi:%u sample:%u tmr:%u",
__entry->idx, __entry->resi,
__entry->sample, __entry->tmr)
);
TRACE_EVENT(cpu_idle_enter,
TP_PROTO(int index),
TP_ARGS(index),
TP_STRUCT__entry(
__field(int, index)
),
TP_fast_assign(
__entry->index = index;
),
TP_printk("idx:%d",
__entry->index)
);
TRACE_EVENT(cpu_idle_exit,
TP_PROTO(int index, bool success),
TP_ARGS(index, success),
TP_STRUCT__entry(
__field(int, index)
__field(bool, success)
),
TP_fast_assign(
__entry->index = index;
__entry->success = success;
),
TP_printk("idx:%d success:%d",
__entry->index,
__entry->success)
);
TRACE_EVENT(cluster_enter,
TP_PROTO(const char *name, int index, unsigned long sync_cpus,
unsigned long child_cpus, bool from_idle),
TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
TP_STRUCT__entry(
__string(name, name)
__field(int, index)
__field(unsigned long, sync_cpus)
__field(unsigned long, child_cpus)
__field(bool, from_idle)
),
TP_fast_assign(
__assign_str(name, name);
__entry->index = index;
__entry->sync_cpus = sync_cpus;
__entry->child_cpus = child_cpus;
__entry->from_idle = from_idle;
),
TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
__get_str(name),
__entry->index,
__entry->sync_cpus,
__entry->child_cpus,
__entry->from_idle)
);
TRACE_EVENT(cluster_exit,
TP_PROTO(const char *name, int index, unsigned long sync_cpus,
unsigned long child_cpus, bool from_idle),
TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
TP_STRUCT__entry(
__string(name, name)
__field(int, index)
__field(unsigned long, sync_cpus)
__field(unsigned long, child_cpus)
__field(bool, from_idle)
),
TP_fast_assign(
__assign_str(name, name);
__entry->index = index;
__entry->sync_cpus = sync_cpus;
__entry->child_cpus = child_cpus;
__entry->from_idle = from_idle;
),
TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
__get_str(name),
__entry->index,
__entry->sync_cpus,
__entry->child_cpus,
__entry->from_idle)
);
TRACE_EVENT(cluster_pred_select,
TP_PROTO(const char *name, int index, u32 sleep_us,
u32 latency, int pred, u32 pred_us),
TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
TP_STRUCT__entry(
__string(name, name)
__field(int, index)
__field(u32, sleep_us)
__field(u32, latency)
__field(int, pred)
__field(u32, pred_us)
),
TP_fast_assign(
__assign_str(name, name);
__entry->index = index;
__entry->sleep_us = sleep_us;
__entry->latency = latency;
__entry->pred = pred;
__entry->pred_us = pred_us;
),
TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
__get_str(name), __entry->index, __entry->sleep_us,
__entry->latency, __entry->pred, __entry->pred_us)
);
TRACE_EVENT(cluster_pred_hist,
TP_PROTO(const char *name, int idx, u32 resi,
u32 sample, u32 tmr),
TP_ARGS(name, idx, resi, sample, tmr),
TP_STRUCT__entry(
__string(name, name)
__field(int, idx)
__field(u32, resi)
__field(u32, sample)
__field(u32, tmr)
),
TP_fast_assign(
__assign_str(name, name);
__entry->idx = idx;
__entry->resi = resi;
__entry->sample = sample;
__entry->tmr = tmr;
),
TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
__get_str(name), __entry->idx, __entry->resi,
__entry->sample, __entry->tmr)
);
#endif
#define TRACE_INCLUDE_FILE trace_msm_low_power
#include <trace/define_trace.h>