drivers: cpuidle: Add a simple governor for qcom chipsets

Add a simple governor for the QCOM chipsets which allows the
deeper LPMs more aggressively. Also it provides an interface to
tune the cpuidle parameters like latency and residency as required.

Change-Id: I240765bd0a188775cf84a858a5b7a4e7d58f4c60
Signed-off-by: Raghavendra Kakarla <quic_rkakarla@quicinc.com>
This commit is contained in:
Raghavendra Kakarla 2022-11-21 11:39:32 +05:30 committed by Gerrit - the friendly Code Review server
parent a47bc131cd
commit 0710af9a00
6 changed files with 804 additions and 0 deletions

View File

@ -57,6 +57,17 @@ config CPU_IDLE_GOV_QCOM_LPM
CPU and the idle state chosen based on the parameters are all
logged in the trace.
config CPU_IDLE_SIMPLE_GOV_QCOM_LPM
tristate "Qualcomm Technologies, Inc. CPU and Cluster simple governor"
depends on ARCH_QCOM
depends on ARM_PSCI_CPUIDLE
help
This governor allows the cpu and cluster to deeper idle states more aggressively
by reducing cpuidle parameters such as the entry, exit latencies and the residency.
This provides the provision to user to update cpuidle parameters so that it can
allow or restrict particular Idle state more aggressively.
config DT_IDLE_STATES
bool

View File

@ -6,6 +6,10 @@
CFLAGS_qcom_lpm.o := -I$(src)
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
obj-$(CONFIG_CPU_IDLE_SIMPLE_GOV_QCOM_LPM) += qcom_simple_lpm.o
qcom_simple_lpm-y += qcom-simple-lpm.o
qcom_simple_lpm-y += qcom-simple-cluster-lpm.o
qcom_simple_lpm-y += qcom-simple-lpm-sysfs.o
obj-$(CONFIG_CPU_IDLE_GOV_QCOM_LPM) += qcom_lpm.o
qcom_lpm-y += qcom-lpm.o
qcom_lpm-y += qcom-cluster-lpm.o

View File

@ -0,0 +1,168 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#if defined(_TRACE_HOOK_PM_DOMAIN_H)
#include <trace/hooks/pm_domain.h>
#endif
#define CREATE_TRACE_POINTS
#include "qcom-simple-lpm.h"
LIST_HEAD(cluster_dev_list);
u64 cluster_cur_div = 500;
static struct simple_lpm_cluster *to_cluster(struct generic_pm_domain *genpd)
{
struct simple_lpm_cluster *cluster_simple_gov;
list_for_each_entry(cluster_simple_gov, &cluster_dev_list, list)
if (cluster_simple_gov->genpd == genpd)
return cluster_simple_gov;
return NULL;
}
void update_cluster_select(struct simple_lpm_cpu *cpu_gov)
{
}
#if defined(_TRACE_HOOK_PM_DOMAIN_H)
static void android_vh_allow_domain_state(void *unused,
struct generic_pm_domain *genpd,
uint32_t idx, bool *allow)
{
struct simple_lpm_cluster *cluster_simple_gov = to_cluster(genpd);
if (!cluster_simple_gov)
return;
*allow = cluster_simple_gov->state_allowed[idx];
}
#endif
static int simple_lpm_cluster_simple_gov_remove(struct platform_device *pdev)
{
int i;
struct generic_pm_domain *genpd = pd_to_genpd(pdev->dev.pm_domain);
struct simple_lpm_cluster *cluster_simple_gov = to_cluster(genpd);
if (!cluster_simple_gov)
return -ENODEV;
pm_runtime_disable(&pdev->dev);
cluster_simple_gov->genpd->flags &= ~GENPD_FLAG_MIN_RESIDENCY;
remove_simple_cluster_sysfs_nodes(cluster_simple_gov);
for (i = 0; i < genpd->state_count; i++) {
struct genpd_power_state *states = &genpd->states[i];
states->residency_ns = states->residency_ns * cluster_cur_div;
states->power_on_latency_ns = states->power_on_latency_ns * cluster_cur_div;
states->power_off_latency_ns = states->power_off_latency_ns * cluster_cur_div;
}
list_del(&cluster_simple_gov->list);
return 0;
}
static int simple_lpm_cluster_simple_gov_probe(struct platform_device *pdev)
{
int i, ret;
struct simple_lpm_cluster *cluster_simple_gov;
cluster_simple_gov = devm_kzalloc(&pdev->dev,
sizeof(struct simple_lpm_cluster),
GFP_KERNEL);
if (!cluster_simple_gov)
return -ENOMEM;
spin_lock_init(&cluster_simple_gov->lock);
cluster_simple_gov->dev = &pdev->dev;
pm_runtime_enable(&pdev->dev);
cluster_simple_gov->genpd = pd_to_genpd(cluster_simple_gov->dev->pm_domain);
dev_pm_genpd_set_next_wakeup(cluster_simple_gov->dev, KTIME_MAX - 1);
cluster_simple_gov->genpd->flags |= GENPD_FLAG_MIN_RESIDENCY;
ret = create_simple_cluster_sysfs_nodes(cluster_simple_gov);
if (ret < 0) {
pm_runtime_disable(&pdev->dev);
cluster_simple_gov->genpd->flags &= ~GENPD_FLAG_MIN_RESIDENCY;
return ret;
}
list_add_tail(&cluster_simple_gov->list, &cluster_dev_list);
cluster_simple_gov->initialized = true;
for (i = 0; i < cluster_simple_gov->genpd->state_count; i++) {
struct generic_pm_domain *genpd = cluster_simple_gov->genpd;
struct genpd_power_state *states = &genpd->states[i];
do_div(states->residency_ns, cluster_cur_div);
do_div(states->power_on_latency_ns, cluster_cur_div);
do_div(states->power_off_latency_ns, cluster_cur_div);
cluster_simple_gov->state_allowed[i] = true;
}
return 0;
}
static const struct of_device_id qcom_cluster_simple_lpm[] = {
{ .compatible = "qcom,lpm-cluster-dev" },
{ }
};
static struct platform_driver qcom_cluster_simple_lpm_driver = {
.probe = simple_lpm_cluster_simple_gov_probe,
.remove = simple_lpm_cluster_simple_gov_remove,
.driver = {
.name = "qcom-simple-gov",
.of_match_table = qcom_cluster_simple_lpm,
.suppress_bind_attrs = true,
},
};
static void cluster_simple_gov_disable(void)
{
#if defined(_TRACE_HOOK_PM_DOMAIN_H)
unregister_trace_android_vh_allow_domain_state(android_vh_allow_domain_state, NULL);
#endif
platform_driver_unregister(&qcom_cluster_simple_lpm_driver);
}
static void cluster_simple_gov_enable(void)
{
#if defined(_TRACE_HOOK_PM_DOMAIN_H)
register_trace_android_vh_allow_domain_state(android_vh_allow_domain_state, NULL);
#endif
platform_driver_register(&qcom_cluster_simple_lpm_driver);
}
struct simple_cluster_governor gov_ops = {
.select = update_cluster_select,
.enable = cluster_simple_gov_enable,
.disable = cluster_simple_gov_disable,
};
void qcom_cluster_lpm_simple_governor_deinit(void)
{
unregister_cluster_simple_governor_ops(&gov_ops);
}
int qcom_cluster_lpm_simple_governor_init(void)
{
register_cluster_simple_governor_ops(&gov_ops);
return 0;
}

View File

@ -0,0 +1,258 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/cpu.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qcom-simple-lpm.h"
#define MAX_LATENCY_DIV 1000
static struct kobject *qcom_lpm_simple_kobj;
static ssize_t cluster_idle_set(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
struct qcom_simple_cluster_node *d = container_of(attr,
struct qcom_simple_cluster_node, disable_attr);
bool disable;
int ret;
ret = strtobool(buf, &disable);
if (ret)
return -EINVAL;
d->cluster->state_allowed[d->state_idx] = !disable;
return len;
}
static ssize_t cluster_idle_get(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct qcom_simple_cluster_node *d = container_of(attr,
struct qcom_simple_cluster_node, disable_attr);
return scnprintf(buf, PAGE_SIZE, "%d\n", !d->cluster->state_allowed[d->state_idx]);
}
static int create_simple_cluster_state_node(struct device *dev, struct qcom_simple_cluster_node *d)
{
struct kobj_attribute *attr = &d->disable_attr;
int ret;
d->attr_group = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
if (!d->attr_group)
return -ENOMEM;
d->attrs = devm_kcalloc(dev, 2, sizeof(struct attribute *), GFP_KERNEL);
if (!d->attrs)
return -ENOMEM;
sysfs_attr_init(&attr->attr);
attr->attr.name = "disable";
attr->attr.mode = 0644;
attr->show = cluster_idle_get;
attr->store = cluster_idle_set;
d->attrs[0] = &attr->attr;
d->attrs[1] = NULL;
d->attr_group->attrs = d->attrs;
ret = sysfs_create_group(d->kobj, d->attr_group);
if (ret)
return -ENOMEM;
return ret;
}
void remove_simple_cluster_sysfs_nodes(struct simple_lpm_cluster *cluster)
{
struct generic_pm_domain *genpd = cluster->genpd;
struct kobject *kobj = cluster->dev_kobj;
int i;
if (!qcom_lpm_simple_kobj || !kobj)
return;
for (i = 0; i < genpd->state_count; i++) {
struct qcom_simple_cluster_node *d = cluster->dev_node[i];
if (d->kobj) {
sysfs_remove_group(d->kobj, d->attr_group);
kobject_put(d->kobj);
}
}
kobject_put(kobj);
}
int create_simple_cluster_sysfs_nodes(struct simple_lpm_cluster *cluster)
{
char name[10];
int i, ret;
struct generic_pm_domain *genpd = cluster->genpd;
if (!qcom_lpm_simple_kobj)
return -EPROBE_DEFER;
cluster->dev_kobj = kobject_create_and_add(genpd->name, qcom_lpm_simple_kobj);
if (!cluster->dev_kobj)
return -ENOMEM;
for (i = 0; i < genpd->state_count; i++) {
struct qcom_simple_cluster_node *d;
d = devm_kzalloc(cluster->dev, sizeof(*d), GFP_KERNEL);
if (!d) {
kobject_put(cluster->dev_kobj);
return -ENOMEM;
}
d->state_idx = i;
d->cluster = cluster;
scnprintf(name, PAGE_SIZE, "D%u", i);
d->kobj = kobject_create_and_add(name, cluster->dev_kobj);
if (!d->kobj) {
kobject_put(cluster->dev_kobj);
return -ENOMEM;
}
ret = create_simple_cluster_state_node(cluster->dev, d);
if (ret) {
kobject_put(d->kobj);
kobject_put(cluster->dev_kobj);
return ret;
}
cluster->dev_node[i] = d;
}
return 0;
}
static ssize_t simple_sleep_disabled_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", simple_sleep_disabled);
}
static ssize_t simple_sleep_disabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret) {
pr_err("Invalid argument passed\n");
return count;
}
simple_sleep_disabled = val;
return count;
}
static ssize_t cpu_latency_factor_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int val, ret;
ret = kstrtoint(buf, 0, &val);
if (ret || !val || val > MAX_LATENCY_DIV) {
pr_err("Invalid argument passed\n");
return count;
}
cur_div = val;
return count;
}
static ssize_t cpu_latency_factor_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", cur_div);
}
static ssize_t cluster_latency_factor_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int val, ret, i;
struct simple_lpm_cluster *cluster_simple_gov;
ret = kstrtoint(buf, 0, &val);
if (ret || !val || val > MAX_LATENCY_DIV) {
pr_err("Invalid argument passed\n");
return count;
}
list_for_each_entry(cluster_simple_gov, &cluster_dev_list, list) {
for (i = 0; i < cluster_simple_gov->genpd->state_count; i++) {
struct generic_pm_domain *genpd = cluster_simple_gov->genpd;
struct genpd_power_state *state = &genpd->states[i];
state->residency_ns = state->residency_ns * cluster_cur_div;
do_div(state->residency_ns, val);
state->power_on_latency_ns = state->power_on_latency_ns * cluster_cur_div;
do_div(state->power_on_latency_ns, val);
state->power_off_latency_ns = state->power_on_latency_ns * cluster_cur_div;
do_div(state->power_off_latency_ns, val);
}
}
cluster_cur_div = val;
return count;
}
static ssize_t cluster_latency_factor_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", cluster_cur_div);
}
static struct kobj_attribute attr_simple_sleep_disabled = __ATTR_RW(simple_sleep_disabled);
static struct kobj_attribute attr_cpu_latency_factor = __ATTR_RW(cpu_latency_factor);
static struct kobj_attribute attr_cluster_latency_factor = __ATTR_RW(cluster_latency_factor);
static struct attribute *lpm_simple_gov_attrs[] = {
&attr_simple_sleep_disabled.attr,
&attr_cpu_latency_factor.attr,
&attr_cluster_latency_factor.attr,
NULL
};
static struct attribute_group lpm_gov_attr_group = {
.attrs = lpm_simple_gov_attrs,
.name = "parameters",
};
void remove_simple_gov_global_sysfs_nodes(void)
{
sysfs_remove_group(qcom_lpm_simple_kobj, &lpm_gov_attr_group);
kobject_put(qcom_lpm_simple_kobj);
}
int create_simple_gov_global_sysfs_nodes(void)
{
struct kobject *cpuidle_kobj = &cpu_subsys.dev_root->kobj;
qcom_lpm_simple_kobj = kobject_create_and_add("qcom_simple_lpm", cpuidle_kobj);
if (!qcom_lpm_simple_kobj)
return -ENOMEM;
return sysfs_create_group(qcom_lpm_simple_kobj, &lpm_gov_attr_group);
}

View File

@ -0,0 +1,296 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/pm_qos.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/tick.h>
#include <trace/events/power.h>
#include "qcom-simple-lpm.h"
static bool cluster_gov_registered;
bool simple_sleep_disabled = true;
u64 cur_div = 100;
static bool suspend_in_progress;
static struct simple_cluster_governor *cluster_simple_gov_ops;
DEFINE_PER_CPU(struct simple_lpm_cpu, simple_lpm_cpu_data);
static inline bool check_cpu_isactive(int cpu)
{
return cpu_active(cpu);
}
static int simple_lpm_cpu_qos_notify(struct notifier_block *nfb,
unsigned long val, void *ptr)
{
struct simple_lpm_cpu *cpu_gov = container_of(nfb, struct simple_lpm_cpu, nb);
int cpu = cpu_gov->cpu;
if (!cpu_gov->enable)
return NOTIFY_OK;
preempt_disable();
if (cpu != smp_processor_id() && cpu_online(cpu) &&
check_cpu_isactive(cpu))
wake_up_if_idle(cpu);
preempt_enable();
return NOTIFY_OK;
}
static int lpm_offline_cpu(unsigned int cpu)
{
struct simple_lpm_cpu *cpu_gov = per_cpu_ptr(&simple_lpm_cpu_data, cpu);
struct device *dev = get_cpu_device(cpu);
if (!dev || !cpu_gov)
return 0;
dev_pm_qos_remove_notifier(dev, &cpu_gov->nb, DEV_PM_QOS_RESUME_LATENCY);
return 0;
}
static int lpm_online_cpu(unsigned int cpu)
{
struct simple_lpm_cpu *cpu_gov = per_cpu_ptr(&simple_lpm_cpu_data, cpu);
struct device *dev = get_cpu_device(cpu);
if (!dev || !cpu_gov)
return 0;
cpu_gov->nb.notifier_call = simple_lpm_cpu_qos_notify;
dev_pm_qos_add_notifier(dev, &cpu_gov->nb, DEV_PM_QOS_RESUME_LATENCY);
return 0;
}
/**
* get_cpus_qos() - Returns the aggrigated PM QoS request.
* @mask: cpumask of the cpus
*/
static inline s64 get_cpus_qos(const struct cpumask *mask)
{
int cpu;
s64 n, latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
for_each_cpu(cpu, mask) {
if (!check_cpu_isactive(cpu))
continue;
n = cpuidle_governor_latency_req(cpu);
do_div(n, NSEC_PER_USEC);
if (n < latency)
latency = n;
}
return latency;
}
void register_cluster_simple_governor_ops(struct simple_cluster_governor *ops)
{
if (!ops)
return;
cluster_simple_gov_ops = ops;
}
void unregister_cluster_simple_governor_ops(struct simple_cluster_governor *ops)
{
if (ops != cluster_simple_gov_ops)
return;
cluster_simple_gov_ops = NULL;
}
/**
* lpm_select() - Find the best idle state for the cpu device
* @dev: Target cpu
* @state: Entered state
* @stop_tick: Is the tick device stopped
*
* Return: Best cpu LPM mode to enter
*/
static int lpm_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct simple_lpm_cpu *cpu_gov = this_cpu_ptr(&simple_lpm_cpu_data);
u64 latency_req = get_cpus_qos(cpumask_of(dev->cpu));
ktime_t delta_tick;
s64 duration_ns;
int i = 0;
if (!cpu_gov)
return 0;
if (simple_sleep_disabled)
return 0;
duration_ns = tick_nohz_get_sleep_length(&delta_tick);
if (duration_ns < 0)
return 0;
for (i = drv->state_count - 1; i > 0; i--) {
struct cpuidle_state *s = &drv->states[i];
u64 target_latency = s->exit_latency;
s64 target_residency = s->target_residency_ns;
do_div(target_latency, cur_div);
do_div(target_residency, cur_div);
if (dev->states_usage[i].disable)
continue;
if (latency_req < target_latency)
continue;
if (target_residency > duration_ns)
continue;
break;
}
cpu_gov->last_idx = i;
return i;
}
/**
* lpm_reflect() - Update the state entered by the cpu device
* @dev: Target CPU
* @state: Entered state
*/
static void lpm_reflect(struct cpuidle_device *dev, int state)
{
}
/**
* lpm_enable_device() - Initialize the governor's data for the CPU
* @drv: cpuidle driver
* @dev: Target CPU
*/
static int lpm_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct simple_lpm_cpu *cpu_gov = per_cpu_ptr(&simple_lpm_cpu_data, dev->cpu);
cpu_gov->cpu = dev->cpu;
cpu_gov->enable = true;
cpu_gov->drv = drv;
cpu_gov->dev = dev;
cpu_gov->last_idx = -1;
if (!cluster_gov_registered) {
if (cluster_simple_gov_ops && cluster_simple_gov_ops->enable)
cluster_simple_gov_ops->enable();
cluster_gov_registered = true;
}
return 0;
}
/**
* lpm_disable_device() - Clean up the governor's data for the CPU
* @drv: cpuidle driver
* @dev: Target CPU
*/
static void lpm_disable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct simple_lpm_cpu *cpu_gov = per_cpu_ptr(&simple_lpm_cpu_data, dev->cpu);
int cpu;
cpu_gov->enable = false;
cpu_gov->last_idx = -1;
for_each_possible_cpu(cpu) {
struct simple_lpm_cpu *cpu_gov = per_cpu_ptr(&simple_lpm_cpu_data, cpu);
if (cpu_gov->enable)
return;
}
if (cluster_gov_registered) {
if (cluster_simple_gov_ops && cluster_simple_gov_ops->disable)
cluster_simple_gov_ops->disable();
cluster_gov_registered = false;
}
}
static void qcom_lpm_suspend_trace(void *unused, const char *action,
int event, bool start)
{
int cpu;
if (start && !strcmp("dpm_suspend_late", action)) {
suspend_in_progress = true;
for_each_online_cpu(cpu)
wake_up_if_idle(cpu);
return;
}
if (!start && !strcmp("dpm_resume_early", action)) {
suspend_in_progress = false;
for_each_online_cpu(cpu)
wake_up_if_idle(cpu);
}
}
static struct cpuidle_governor lpm_simple_governor = {
.name = "qcom-simple-lpm",
.rating = 40,
.enable = lpm_enable_device,
.disable = lpm_disable_device,
.select = lpm_select,
.reflect = lpm_reflect,
};
static int __init qcom_lpm_simple_governor_init(void)
{
int ret;
ret = create_simple_gov_global_sysfs_nodes();
if (ret)
goto sysfs_fail;
ret = qcom_cluster_lpm_simple_governor_init();
if (ret)
goto cluster_init_fail;
ret = cpuidle_register_governor(&lpm_simple_governor);
if (ret)
goto cpuidle_reg_fail;
ret = register_trace_suspend_resume(qcom_lpm_suspend_trace, NULL);
if (ret)
goto cpuidle_reg_fail;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "qcom-simple-lpm",
lpm_online_cpu, lpm_offline_cpu);
if (ret < 0)
goto cpuhp_setup_fail;
return 0;
cpuhp_setup_fail:
unregister_trace_suspend_resume(qcom_lpm_suspend_trace, NULL);
cpuidle_reg_fail:
qcom_cluster_lpm_simple_governor_deinit();
cluster_init_fail:
remove_simple_gov_global_sysfs_nodes();
sysfs_fail:
return ret;
}
module_init(qcom_lpm_simple_governor_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. simple LPM governor");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_SIMPLE_LPM_H__
#define __QCOM_SIMPLE_LPM_H__
#define MAX_CLUSTER_STATES 4
extern bool simple_sleep_disabled;
struct qcom_simple_cluster_node {
struct simple_lpm_cluster *cluster;
struct kobject *kobj;
int state_idx;
struct kobj_attribute disable_attr;
struct attribute_group *attr_group;
struct attribute **attrs;
};
struct simple_lpm_cpu {
int cpu;
int enable;
int last_idx;
struct notifier_block nb;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
ktime_t next_wakeup;
ktime_t now;
};
struct simple_lpm_cluster {
struct device *dev;
struct generic_pm_domain *genpd;
struct qcom_simple_cluster_node *dev_node[MAX_CLUSTER_STATES];
struct kobject *dev_kobj;
struct notifier_block genpd_nb;
bool state_allowed[MAX_CLUSTER_STATES];
struct list_head list;
spinlock_t lock;
bool initialized;
};
struct simple_cluster_governor {
void (*select)(struct simple_lpm_cpu *cpu_gov);
void (*enable)(void);
void (*disable)(void);
void (*reflect)(void);
};
DECLARE_PER_CPU(struct simple_lpm_cpu, lpm_cpu_data);
extern struct list_head cluster_dev_list;
extern u64 cur_div;
extern u64 cluster_cur_div;
void update_simple_cluster_select(struct simple_lpm_cpu *cpu_gov);
int create_simple_gov_global_sysfs_nodes(void);
void remove_simple_gov_global_sysfs_nodes(void);
void register_cluster_simple_governor_ops(struct simple_cluster_governor *ops);
void unregister_cluster_simple_governor_ops(struct simple_cluster_governor *ops);
void remove_simple_cluster_sysfs_nodes(struct simple_lpm_cluster *simple_cluster_gov);
int create_simple_cluster_sysfs_nodes(struct simple_lpm_cluster *simple_cluster_gov);
int qcom_cluster_lpm_simple_governor_init(void);
void qcom_cluster_lpm_simple_governor_deinit(void);
#endif /* __QCOM_SIMPLE_LPM_H__ */