drivers: powercap: add qti epm hardware device driver

Add qcom epm hardware device driver. It enables to
measure different pmic regulator power consumption data
and pmic temperature data. It exposes these data to userspace
clients via powercap sysfs interface and thermal zone sysfs
interface respectively.

Change-Id: Ic6c190a85ca2425afd68db7662e94b7aa92be4ef
Signed-off-by: Priyansh Jain <quic_priyjain@quicinc.com>
This commit is contained in:
Priyansh Jain 2024-04-22 08:18:11 +05:30
parent 2c4e117d35
commit e68a0e7cc8
5 changed files with 1345 additions and 0 deletions

View File

@ -46,6 +46,17 @@ config IDLE_INJECT
synchronously on a set of specified CPUs or alternatively
on a per CPU basis.
config QCOM_EPM
tristate "Qualcomm Technologies, Inc. EPM Hardware driver"
depends on SPMI && NVMEM_SPMI_SDAM
default n
help
This enables Qualcomm Technologies, Inc. epm hardware
device driver. It provides to measure different pmic
regulators or bucks power consumption data in different
modes. It exposes these data to userspace clients via
powercap sysfs interface.
config DTPM
bool "Power capping for Dynamic Thermal Power Management (EXPERIMENTAL)"
depends on OF

View File

@ -6,3 +6,5 @@ obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
obj-$(CONFIG_IDLE_INJECT) += idle_inject.o
obj-$(CONFIG_QCOM_EPM) += qti_epm_hardware.o
qti_epm_hardware-y += qti_epm_hw.o qti_epm_interface.o

282
drivers/powercap/qti_epm.h Normal file
View File

@ -0,0 +1,282 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_EPM_H__
#define __QCOM_EPM_H__
#include <linux/interrupt.h>
#include <linux/ipc_logging.h>
#include <linux/powercap.h>
#include <linux/thermal.h>
struct epm_priv;
struct epm_device;
#define IPC_LOGPAGES 10
#define EPM_DBG(epm, msg, args...) do { \
dev_dbg(epm->dev, "%s:" msg, __func__, args); \
if ((epm) && (epm)->ipc_log) { \
ipc_log_string((epm)->ipc_log, \
"[%s] "msg"\n", \
current->comm, args); \
} \
} while (0)
#define EPM_REG_NAME_LENGTH 32
#define EPM_POWER_CH_MAX 48
#define EPM_TZ_CH_MAX 8
#define EPM_MAX_DATA_MAX 10
/*
* Different epm modes of operation
*/
enum epm_mode {
EPM_ACAT_MODE,
EPM_RCM_MODE,
/* Keep last */
EPM_MODE_MAX
};
/*
* Different epm sdam IDs to use as an index into an array
*/
enum epm_sdam_id {
CONFIG_SDAM,
DATA_AVG_SDAM,
DATA_1_SDAM,
DATA_2_SDAM,
DATA_3_SDAM,
DATA_4_SDAM,
DATA_5_SDAM,
DATA_6_SDAM,
DATA_7_SDAM,
DATA_8_SDAM,
DATA_9_SDAM,
DATA_10_SDAM,
DATA_11_SDAM,
/* Keep last */
MAX_EPM_SDAM
};
/*
* Data sdam field IDs to use as an index into an array
*/
enum data_sdam_field_ids {
DATA_SDAM_SEQ_START,
DATA_SDAM_SEQ_END,
DATA_SDAM_NUM_RECORDS,
DATA_SDAM_RTC0,
DATA_SDAM_RTC1,
DATA_SDAM_RTC2,
DATA_SDAM_RTC3,
DATA_SDAM_VPH_LSB,
DATA_SDAM_VPH_MSB,
DATA_SDAM_DIE_TEMP_SID1,
DATA_SDAM_DIE_TEMP_SID8 = DATA_SDAM_DIE_TEMP_SID1 + EPM_TZ_CH_MAX - 1,
DATA_SDAM_POWER_LSB_CH1,
DATA_SDAM_POWER_MSB_CH1,
DATA_SDAM_POWER_LSB_CH48 = DATA_SDAM_POWER_LSB_CH1 + 2 * (EPM_POWER_CH_MAX - 1),
DATA_SDAM_POWER_MSB_CH48,
/* Keep last */
MAX_SDAM_DATA
};
/*
* config sdam field IDs to use as an index into an array
*/
enum config_sdam_field_ids {
CONFIG_SDAM_EPM_MODE,
CONFIG_SDAM_EPM_STATUS,
CONFIG_SDAM_MAX_DATA,
CONFIG_SDAM_MEAS_CFG,
CONFIG_SDAM_LAST_FULL_SDAM,
CONFIG_SDAM_CONFIG_1,
CONFIG_SDAM_PID_1,
CONFIG_SDAM_CONFIG_48 = CONFIG_SDAM_CONFIG_1 + 2 * (EPM_POWER_CH_MAX - 1),
/* Keep last */
MAX_CONFIG_SDAM_DATA
};
/*
* ACAT mode different epm data types
*/
enum epm_data_type {
EPM_1S_DATA,
EPM_10S_AVG_DATA,
/* Keep last */
EPM_TYPE_MAX
};
/**
* struct epm_sdam - EPM sdam data structure
* @id: EPM sdam id type
* @nvmem: Pointer to nvmem device
* @lock: lock to protect multiple read concurrently
* @last_data: last full read data copy for current sdam
*/
struct epm_sdam {
enum epm_sdam_id id;
struct nvmem_device *nvmem;
struct mutex lock;
uint8_t last_data[MAX_CONFIG_SDAM_DATA];
};
/**
* struct epm_device_pz_data - EPM device powercap zone data for each data type
* @pz: Pointer to powercap zone device
* @type: Type of epm acat data types, 1S data to 10S average data
* @epm_dev: Pointer to regulator epm channel device
*/
struct epm_device_pz_data {
struct powercap_zone pz;
enum epm_data_type type;
struct epm_device *epm_dev;
};
/**
* struct epm_device - Each regulator channel device data
* @epm_node: epm device list head member to traverse all devices
* @priv: epm hardware instance that this channel is connected to
* @epm_pz: array of powercap zone data types for different data retrieval
* @name: name of the regulator which is used to identify channel
* @enabled: epm channel is enabled or not
* @sid: epm channel SID
* @pid: epm channel PID
* @gang_num: epm channel gang_num
* @data_offset: epm channel power data offset from DATA sdam base
* @last_data: epm channel last 1S data
* @last_avg_data: epm channel last 10S average data
* @time_stamp: timestamp of last 1S data collected from epm channel
* @avg_time_stamp: timestamp of last 10S average data collected from epm channel
* @lock: lock to protect multiple client read concurrently
*/
struct epm_device {
struct list_head epm_node;
struct epm_priv *priv;
struct epm_device_pz_data epm_pz[EPM_TYPE_MAX];
char name[EPM_REG_NAME_LENGTH];
bool enabled;
uint8_t sid;
uint8_t pid;
uint8_t gang_num;
uint8_t data_offset;
uint16_t last_data[EPM_MAX_DATA_MAX];
uint16_t last_avg_data;
u64 last_data_uw[EPM_MAX_DATA_MAX];
u64 last_avg_data_uw;
u64 time_stamp;
u64 avg_time_stamp;
struct mutex lock;
};
/**
* struct epm_tz_device - EPM each temeprature device data
* @offset: epm tz channel offset from first temperature data
* @last_temp: epm thermal zone last read data
* @time_stamp: timestamp of last temperature data collected for given tz
* @priv: epm hardware instance that this channel is connected to
* @tz: array of powercap zone data types for different data retrieval
* @lock: lock to protect multiple client read concurrently
*/
struct epm_tz_device {
uint8_t offset;
int last_temp;
u64 time_stamp;
struct epm_priv *priv;
struct thermal_zone_device *tz;
struct mutex lock;
};
/**
* struct epm_priv - Structure for EPM hardware private data
* @dev: Pointer for EPM device
* @mode: enum to give current mode of operation
* @sdam: Pointer for array of EPM sdams
* @pct: pointer to powercap control type
* @irq: epm sdam pbs irq number
* @num_sdams: Number of SDAMs used for EPM from DT
* @num_reg: Number of regulator based on config sdam
* @max_data: EPM hardware max_data configuration
* @data_1s_base_pid: PID info of 1st 1S sdam of EPM
* @last_sdam_pid: PID info of latest 1S data sdam
* @reg_ppid_map: array of regulator/rail PPID from devicetree
* @dt_reg_cnt: Number of regulator count in devicetree
* @last_ch_offset: Last enabled data channel offset
* @initialized: EPM hardware initialization is done if it is true
* @g_enabled: The epm global enable bit status
* @ops: EPM hardware supported ops
* @config_sdam_data: Config sdam data dump collected at init
* @ipc_log: Handle to ipc_logging
* @all_1s_read_ts: Timestamp collected just after epm irq 1S data update
* @all_avg_read_ts: Timestamp collected just after epm irq 10S avg data update
* @epm_dev_head: List head for all epm channel devices
* @epm_tz: Array of list of pmic die temperature devices
* @sec_read_lock: lock to protect 1s data update and client request
* @avg_read_lock: lock to protect avg data update and client request
* @avg_data_work: Workqueue to check avg data if counters are not matching state
*/
struct epm_priv {
struct device *dev;
enum epm_mode mode;
struct epm_sdam *sdam;
struct powercap_control_type *pct;
int irq;
u32 num_sdams;
u32 num_reg;
u8 max_data;
u16 data_1s_base_pid;
u16 last_sdam_pid;
u16 reg_ppid_map[EPM_POWER_CH_MAX];
u8 dt_reg_cnt;
u8 dt_tz_cnt;
u8 last_ch_offset;
bool initialized;
bool g_enabled;
struct epm_ops *ops;
uint8_t *config_sdam_data;
void *ipc_log;
u64 all_1s_read_ts;
u64 all_avg_read_ts;
u64 all_tz_read_ts;
struct list_head epm_dev_head;
struct epm_tz_device *epm_tz;
struct mutex sec_read_lock;
struct mutex avg_read_lock;
struct delayed_work avg_data_work;
};
/**
* struct epm_ops - Structure for EPM hardware supported ops
* @init: EPM hardware init function
* @get_mode: Function to get current EPM operation mode
* @get_power: Function to get power for EPM channel in us for a given type
* @get_max_power: Function to get max power which EPM channel can deliver
* @get_temp: Function to get current temperature of a pmic die
* @get_hw_mon_data: Function to hardware monitor data for RCM mode
* @release: Function to clear all EPM data on exit
* @suspend: Function to execute EPM during suspend callback if any
* @resume: Function to restore EPM durng resume callback if any
*/
struct epm_ops {
int (*init)(struct epm_priv *priv);
enum epm_mode (*get_mode)(struct epm_priv *epm);
int (*get_power)(struct epm_device *epm_dev, enum epm_data_type type, u64 *power);
int (*get_max_power)(const struct epm_device *epm_dev, u64 *max_power);
int (*get_temp)(struct epm_tz_device *epm_tz, int *temp);
int (*get_hw_mon_data)(struct epm_device *epm, int *data);
void (*release)(struct epm_priv *epm);
int (*suspend)(struct epm_priv *epm);
int (*resume)(struct epm_priv *epm);
};
extern struct epm_ops epm_hw_ops;
#endif /* __QCOM_EPM_H__ */

View File

@ -0,0 +1,782 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "qti_epm: %s: " fmt, __func__
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
#include <linux/workqueue.h>
#include "qti_epm.h"
#include <linux/delay.h>
#define EPM_CONFIG_SDAM_BASE_OFF 0x45
#define EPM_DATA_SDAM_BASE_OFF 0x45
#define EPM_CH_ENABLE_MASK BIT(7)
#define EPM_SID_MASK 0xf
#define EPM_GANG_NUM_MASK 0x70
#define EPM_DATA_BYTE_SIZE 2
#define EPM_NSEC_PER_SEC 1000000000L
#define EPM_DATA_TO_POWER_UW 1500L /* 1 LSB = 1.5 mW */
#define EPM_TZ_BYTE_SIZE 1
#define EPM_TZ_MIN_VAL_IN_C 50
#define EPM_GET_POWER_UW_FROM_ADC(adc) ((adc) * EPM_DATA_TO_POWER_UW)
#define EPM_GET_TEMP_MC_FROM_ADC(adc) (((adc) - EPM_TZ_MIN_VAL_IN_C) * 1000)
#define EPM_AVG_SDAM_RETRY_DELAY msecs_to_jiffies(200)
static int epm_sdam_nvmem_read(struct epm_priv *epm, struct epm_sdam *sdam,
uint16_t offset, size_t bytes, uint8_t *data)
{
int rc = 0;
mutex_lock(&sdam->lock);
rc = nvmem_device_read(sdam->nvmem, offset, bytes, data);
mutex_unlock(&sdam->lock);
EPM_DBG(epm, "sdam[%d] off:0x%x,size:%d rc=%d data[0]:0x%x data[1]:0x%x",
sdam->id, offset, bytes, rc, data[0], data[1]);
if (rc < 0)
dev_err(epm->dev,
"Failed to read sdam[%d] off:0x%x,size:%d rc=%d\n",
sdam->id, offset, bytes, rc);
return rc;
}
static struct epm_sdam *get_data_sdam_from_pid(struct epm_priv *epm, uint8_t pid)
{
if (!epm->data_1s_base_pid ||
(pid - epm->data_1s_base_pid) >= (MAX_EPM_SDAM - DATA_1_SDAM)) {
dev_err(epm->dev, "Invalid sdam pids, base=0x%x curr=0x%x\n",
epm->data_1s_base_pid, pid);
return ERR_PTR(-EINVAL);
}
if (pid < epm->data_1s_base_pid)
pid = epm->data_1s_base_pid;
return &epm->sdam[DATA_1_SDAM + (pid - epm->data_1s_base_pid)];
}
static int epm_validate_data_sdam_sequence_matching(struct epm_priv *epm,
struct epm_sdam *sdam)
{
int rc = 0;
uint8_t data_counter[2] = {0};
rc = epm_sdam_nvmem_read(epm, sdam,
EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_SEQ_START,
2, data_counter);
if (rc < 0)
return rc;
if (!data_counter[0] ||
data_counter[0] !=
data_counter[1]) {
EPM_DBG(epm,
"sdam[%d] No matching counter START:%d END:%d, rc=%d",
sdam->id, data_counter[0], data_counter[1], rc);
return -EBUSY;
}
return 0;
}
static struct epm_sdam *get_prev_data_sdam(struct epm_priv *epm,
struct epm_sdam *cur_sdam)
{
enum epm_sdam_id id;
struct epm_sdam *prev;
if (cur_sdam->id - 1 < DATA_1_SDAM)
id = DATA_11_SDAM;
else
id = cur_sdam->id - 1;
prev = &epm->sdam[id];
if (prev && (prev != cur_sdam))
return prev;
return NULL;
}
static enum epm_mode qti_epm_get_mode(struct epm_priv *epm)
{
if (!epm || !epm->initialized)
return -ENODEV;
return epm->mode;
}
static bool epm_is_need_hw_read(u64 last_timestamp)
{
if (sched_clock() - last_timestamp < EPM_NSEC_PER_SEC)
return false;
return true;
}
static void epm_channel_avg_data_update(struct epm_device *epm_dev,
uint8_t lsb, uint8_t msb, u64 ts)
{
epm_dev->last_avg_data = (msb << 8) | lsb;
epm_dev->last_avg_data_uw = EPM_GET_POWER_UW_FROM_ADC((msb << 8) | lsb);
epm_dev->avg_time_stamp = ts;
EPM_DBG(epm_dev->priv, "epm[%s]:avg power:%duw msb:0x%x lsb:0x%x",
epm_dev->name, epm_dev->last_avg_data_uw, msb, lsb);
}
static int qti_epm_read_acat_10s_avg_data_common(struct epm_priv *epm,
struct epm_device *epm_dev,
uint16_t offset, size_t size)
{
uint8_t data_sdam_avg[DATA_SDAM_POWER_MSB_CH48 + 1] = {0};
int rc = 0;
struct epm_device *epm_dev_tmp;
rc = epm_validate_data_sdam_sequence_matching(epm,
&epm->sdam[DATA_AVG_SDAM]);
if (rc < 0) {
if (rc == -EBUSY) {
dev_dbg(epm->dev,
"Retry avg data update after sometime\n");
schedule_delayed_work(&epm->avg_data_work,
EPM_AVG_SDAM_RETRY_DELAY);
}
return rc;
}
rc = epm_sdam_nvmem_read(epm, &epm->sdam[DATA_AVG_SDAM], offset,
size, data_sdam_avg);
if (rc < 0)
return rc;
if (!epm_dev && size > EPM_DATA_BYTE_SIZE) {
epm->all_avg_read_ts = sched_clock();
list_for_each_entry(epm_dev_tmp, &epm->epm_dev_head, epm_node) {
if (!epm_dev_tmp->enabled)
continue;
if (epm_dev_tmp->data_offset >= (offset + size))
continue;
epm_channel_avg_data_update(epm_dev_tmp,
data_sdam_avg[epm_dev_tmp->data_offset],
data_sdam_avg[epm_dev_tmp->data_offset + 1],
epm->all_avg_read_ts);
}
} else if (epm_dev && size == EPM_DATA_BYTE_SIZE) {
epm_channel_avg_data_update(epm_dev, data_sdam_avg[0],
data_sdam_avg[1], sched_clock());
}
return 0;
}
static int qti_epm_update_acat_10s_avg_full_data(struct epm_priv *epm)
{
int rc = 0;
mutex_lock(&epm->avg_read_lock);
if (epm_is_need_hw_read(epm->all_avg_read_ts))
rc = qti_epm_read_acat_10s_avg_data_common(epm, NULL,
EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1,
epm->last_ch_offset + 2);
mutex_unlock(&epm->avg_read_lock);
if (rc < 0)
return rc;
return 0;
}
static int qti_epm_read_acat_10s_avg_channel(struct epm_device *epm_dev,
u64 *power_uw)
{
struct epm_priv *epm = epm_dev->priv;
int rc = 0;
if (epm_dev->data_offset >= DATA_SDAM_POWER_MSB_CH48)
return -EINVAL;
mutex_lock(&epm->avg_read_lock);
if (epm_is_need_hw_read(epm_dev->avg_time_stamp))
rc = qti_epm_read_acat_10s_avg_data_common(epm, epm_dev,
EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1
+ epm_dev->data_offset, EPM_DATA_BYTE_SIZE);
mutex_unlock(&epm->avg_read_lock);
if (rc >= 0 || rc == -EBUSY) {
rc = 0;
*power_uw = epm_dev->last_avg_data_uw;
}
return rc;
}
static int epm_get_latest_sdam_pid(struct epm_priv *epm, uint8_t *pid)
{
int rc = 0;
rc = epm_sdam_nvmem_read(epm, &epm->sdam[CONFIG_SDAM],
EPM_CONFIG_SDAM_BASE_OFF + CONFIG_SDAM_LAST_FULL_SDAM,
1, pid);
if (rc < 0)
return rc;
return rc;
}
static struct epm_sdam *get_next_valid_data_1s_sdam(struct epm_priv *epm,
struct epm_sdam *sdam)
{
uint8_t data_sdam_pid;
int rc = 0, idx = 0;
if (!sdam) {
/* get latest data sdam pid */
rc = epm_get_latest_sdam_pid(epm, &data_sdam_pid);
if (rc < 0)
return ERR_PTR(-ENODEV);
/* Better save last sdam */
epm->last_sdam_pid = data_sdam_pid;
/* Get data sdam from sdam pid */
sdam = get_data_sdam_from_pid(epm, data_sdam_pid);
if (IS_ERR(sdam))
return sdam;
} else {
sdam = get_prev_data_sdam(epm, sdam);
if (!sdam)
return ERR_PTR(-ENODEV);
}
rc = epm_validate_data_sdam_sequence_matching(epm, sdam);
while (idx < (MAX_EPM_SDAM - 2) && rc != 0) {
sdam = get_prev_data_sdam(epm, sdam);
if (!sdam)
return ERR_PTR(-ENODEV);
rc = epm_validate_data_sdam_sequence_matching(epm, sdam);
if (!rc)
break;
idx++;
}
if (idx >= (MAX_EPM_SDAM - 2)) {
dev_err(epm->dev, "No matching data sdam\n");
return ERR_PTR(-EBUSY);
}
return sdam;
}
static void epm_channel_data_update(struct epm_device *epm_dev,
uint8_t lsb, uint8_t msb, int idx, u64 ts)
{
epm_dev->last_data[idx] = (msb << 8) | lsb;
epm_dev->last_data_uw[idx] = EPM_GET_POWER_UW_FROM_ADC((msb << 8) | lsb);
epm_dev->time_stamp = ts;
EPM_DBG(epm_dev->priv, "epm[%s]:1s power[%d]:%duw msb:0x%x lsb:0x%x",
epm_dev->name, idx, epm_dev->last_data_uw[idx],
msb, lsb);
}
static int qti_epm_read_acat_data_common(struct epm_priv *epm,
struct epm_device *epm_dev, uint16_t offset,
size_t size, bool epm_full)
{
uint8_t data[MAX_SDAM_DATA] = {0};
struct epm_sdam *sdam = NULL;
int rc = 0, data_idx = 0;
struct epm_device *epm_dev_tmp;
do {
sdam = get_next_valid_data_1s_sdam(epm, sdam);
if (IS_ERR(sdam))
return PTR_ERR(sdam);
rc = epm_sdam_nvmem_read(epm, sdam, offset, size, data);
if (rc < 0)
return rc;
if (!epm_dev && size > EPM_DATA_BYTE_SIZE) {
epm->all_1s_read_ts = sched_clock();
list_for_each_entry(
epm_dev_tmp, &epm->epm_dev_head, epm_node) {
if (!epm_dev_tmp->enabled)
continue;
if (epm_dev_tmp->data_offset >= (offset + size))
continue;
epm_channel_data_update(epm_dev_tmp,
data[epm_dev_tmp->data_offset],
data[epm_dev_tmp->data_offset + 1],
data_idx, epm->all_1s_read_ts);
}
} else if (epm_dev && size == EPM_DATA_BYTE_SIZE) {
epm_channel_data_update(epm_dev, data[0], data[1],
0, sched_clock());
}
data_idx++;
} while (epm_full && data_idx < EPM_MAX_DATA_MAX &&
data_idx < epm->max_data);
return rc;
}
int qti_epm_update_acat_full_data(struct epm_priv *epm)
{
int rc = 0;
mutex_lock(&epm->sec_read_lock);
if (epm_is_need_hw_read(epm->all_1s_read_ts))
rc = qti_epm_read_acat_data_common(epm, NULL,
EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1,
epm->last_ch_offset + 2, true);
mutex_unlock(&epm->sec_read_lock);
if (rc < 0)
return rc;
return rc;
}
static int qti_epm_read_acat_1s_channel(struct epm_device *epm_dev, u64 *power_uw)
{
struct epm_priv *epm = epm_dev->priv;
int rc = 0;
if (epm_dev->data_offset > epm->last_ch_offset)
return -EINVAL;
mutex_lock(&epm->sec_read_lock);
if (epm_is_need_hw_read(epm_dev->time_stamp))
rc = qti_epm_read_acat_data_common(epm, epm_dev,
EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1
+ epm_dev->data_offset, EPM_DATA_BYTE_SIZE,
false);
mutex_unlock(&epm->sec_read_lock);
if (rc >= 0 || rc == -EBUSY) {
rc = 0;
*power_uw = epm_dev->last_data_uw[0];
}
return rc;
}
static int qti_epm_get_power(struct epm_device *epm_dev,
enum epm_data_type type, u64 *power_uw)
{
int rc = 0;
mutex_lock(&epm_dev->lock);
switch (type) {
case EPM_1S_DATA:
rc = qti_epm_read_acat_1s_channel(epm_dev, power_uw);
break;
case EPM_10S_AVG_DATA:
rc = qti_epm_read_acat_10s_avg_channel(epm_dev, power_uw);
break;
default:
dev_err(epm_dev->priv->dev,
"No valid epm data type, type:%d\n", type);
rc = -EINVAL;
break;
}
mutex_unlock(&epm_dev->lock);
return rc;
}
static void epm_temp_data_update(struct epm_tz_device *epm_tz,
uint8_t data, u64 ts)
{
epm_tz->last_temp = EPM_GET_TEMP_MC_FROM_ADC(data);
epm_tz->time_stamp = ts;
EPM_DBG(epm_tz->priv, "epm tz[%d]:temp_adc:0x%x temp:%d mC",
epm_tz->offset + 1, data, epm_tz->last_temp);
}
static int qti_epm_read_tz_common(struct epm_priv *epm,
struct epm_tz_device *epm_tz, uint16_t offset, size_t size)
{
uint8_t data[EPM_TZ_CH_MAX] = {0};
struct epm_sdam *sdam = NULL;
int rc = 0, data_idx = 0;
sdam = get_next_valid_data_1s_sdam(epm, sdam);
if (IS_ERR(sdam))
return PTR_ERR(sdam);
rc = epm_sdam_nvmem_read(epm, sdam, offset, size, data);
if (rc < 0)
return rc;
if (!epm_tz && size > EPM_TZ_BYTE_SIZE) {
epm->all_tz_read_ts = sched_clock();
for (data_idx = 0; data_idx < epm->dt_tz_cnt; data_idx++) {
epm_tz = &epm->epm_tz[data_idx];
if (epm_tz->offset >= (offset + size))
continue;
epm_temp_data_update(epm_tz,
data[epm_tz->offset], epm->all_tz_read_ts);
}
} else if (epm_tz && size == EPM_TZ_BYTE_SIZE) {
epm_temp_data_update(epm_tz, data[0], sched_clock());
}
return rc;
}
static int qti_epm_read_1s_temp(struct epm_tz_device *epm_tz, int *temp)
{
struct epm_priv *epm = epm_tz->priv;
int rc = 0;
if (epm_is_need_hw_read(epm_tz->time_stamp))
rc = qti_epm_read_tz_common(epm, epm_tz,
EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_DIE_TEMP_SID1
+ epm_tz->offset, 1);
if (rc >= 0 || rc == -EBUSY) {
rc = 0;
*temp = epm_tz->last_temp;
}
return rc;
}
static int qti_epm_get_temp(struct epm_tz_device *epm_tz, int *temp)
{
int rc = 0;
mutex_lock(&epm_tz->lock);
rc = qti_epm_read_1s_temp(epm_tz, temp);
mutex_unlock(&epm_tz->lock);
return rc;
}
static int qti_epm_read_data_update(struct epm_priv *epm)
{
switch (qti_epm_get_mode(epm)) {
case EPM_ACAT_MODE:
qti_epm_update_acat_10s_avg_full_data(epm);
break;
default:
break;
}
return 0;
}
static void qti_epm_update_avg_data(struct work_struct *work)
{
struct epm_priv *epm = container_of(work, struct epm_priv,
avg_data_work.work);
qti_epm_update_acat_10s_avg_full_data(epm);
}
static irqreturn_t epm_sdam_irq_handler(int irq, void *data)
{
struct epm_priv *epm = data;
qti_epm_read_data_update(epm);
return IRQ_HANDLED;
}
static int get_dt_index_from_ppid(struct epm_device *epm_dev)
{
uint16_t ppid = 0, i = 0;
struct epm_priv *epm = epm_dev->priv;
if (!epm_dev->enabled || !epm->dt_reg_cnt)
return -EINVAL;
ppid = epm_dev->sid << 8 | epm_dev->pid;
for (i = 0; i < epm->dt_reg_cnt; i++) {
if (ppid == epm->reg_ppid_map[i])
return i;
}
return -ENODEV;
}
static int qti_epm_config_sdam_read(struct epm_priv *epm)
{
uint8_t *config_sdam = NULL;
struct epm_device *epm_dev = NULL;
int rc = 0;
uint8_t conf_idx, data_idx;
if (!epm->sdam[CONFIG_SDAM].nvmem) {
dev_err(epm->dev, "Invalid sdam nvmem\n");
return -EINVAL;
}
config_sdam = devm_kcalloc(epm->dev, MAX_CONFIG_SDAM_DATA,
sizeof(*config_sdam), GFP_KERNEL);
if (!config_sdam)
return -ENOMEM;
rc = epm_sdam_nvmem_read(epm, &epm->sdam[CONFIG_SDAM],
EPM_CONFIG_SDAM_BASE_OFF,
MAX_CONFIG_SDAM_DATA, config_sdam);
if (rc < 0)
return rc;
epm->g_enabled = config_sdam[CONFIG_SDAM_EPM_MODE] & BIT(7);
if (!epm->g_enabled) {
dev_err(epm->dev, "pmic epm is in disabled state, reg:0x%x\n",
config_sdam[CONFIG_SDAM_EPM_MODE]);
return -ENODEV;
}
epm->mode = config_sdam[CONFIG_SDAM_EPM_MODE] & BIT(0);
epm->max_data = config_sdam[CONFIG_SDAM_MAX_DATA];
epm->last_sdam_pid = config_sdam[CONFIG_SDAM_LAST_FULL_SDAM];
epm->config_sdam_data = config_sdam;
/* logic to read number of channels and die_temps */
for (conf_idx = CONFIG_SDAM_CONFIG_1, data_idx = 0;
conf_idx <= CONFIG_SDAM_CONFIG_48;
conf_idx += 2, data_idx += EPM_DATA_BYTE_SIZE) {
const char *reg_name;
if (!(config_sdam[conf_idx] & EPM_CH_ENABLE_MASK))
continue;
epm->num_reg++;
epm_dev = devm_kzalloc(epm->dev, sizeof(*epm_dev), GFP_KERNEL);
if (!epm_dev)
return -ENOMEM;
epm_dev->enabled = config_sdam[conf_idx] & EPM_CH_ENABLE_MASK ?
true : false;
epm_dev->sid = config_sdam[conf_idx] & EPM_SID_MASK;
epm_dev->gang_num = config_sdam[conf_idx] & EPM_GANG_NUM_MASK;
epm_dev->pid = config_sdam[conf_idx + 1];
epm_dev->priv = epm;
epm_dev->data_offset = data_idx;
mutex_init(&epm_dev->lock);
if (data_idx > epm->last_ch_offset)
epm->last_ch_offset = data_idx;
rc = get_dt_index_from_ppid(epm_dev);
if (rc < 0 || rc >= epm->dt_reg_cnt) {
dev_err(epm->dev, "No matching channel ppid, rc:%d\n",
rc);
return rc;
}
of_property_read_string_index(epm->dev->of_node,
"qcom,reg-ppid-names", rc, &reg_name);
dev_dbg(epm->dev, "%s: epm channel:%s off:0x%x\n", __func__,
reg_name, data_idx);
strscpy(epm_dev->name, reg_name, sizeof(epm_dev->name));
list_add(&epm_dev->epm_node, &epm->epm_dev_head);
}
return 0;
}
static int initialize_epm_tz(struct epm_priv *epm)
{
struct epm_tz_device *epm_tz = NULL;
int tz_idx = 0;
if (!epm->dt_tz_cnt || epm->dt_tz_cnt > EPM_TZ_CH_MAX)
return 0;
epm_tz = devm_kzalloc(epm->dev,
sizeof(*epm_tz) * epm->dt_tz_cnt, GFP_KERNEL);
if (!epm_tz)
return -ENOMEM;
for (tz_idx = 0; tz_idx < epm->dt_tz_cnt; tz_idx++) {
epm_tz[tz_idx].priv = epm;
epm_tz[tz_idx].offset = tz_idx;
mutex_init(&epm_tz[tz_idx].lock);
}
epm->epm_tz = epm_tz;
return 0;
}
static int epm_get_sdam_nvmem(struct device *dev, struct epm_sdam *sdam,
char *sdam_name)
{
int rc = 0;
sdam->nvmem = devm_nvmem_device_get(dev, sdam_name);
if (IS_ERR(sdam->nvmem)) {
rc = PTR_ERR(sdam->nvmem);
if (rc != -EPROBE_DEFER)
dev_err(dev, "Failed to get nvmem device, rc=%d\n",
rc);
sdam->nvmem = NULL;
return rc;
}
mutex_init(&sdam->lock);
return rc;
}
static int epm_parse_sdam_data(struct epm_priv *epm)
{
int rc = 0;
char buf[20];
rc = of_property_count_strings(epm->dev->of_node, "nvmem-names");
if (rc < 0) {
dev_err(epm->dev, "Could not find nvmem device\n");
return rc;
}
if (rc > MAX_EPM_SDAM) {
dev_err(epm->dev, "Invalid num of SDAMs:%d\n", rc);
return -EINVAL;
}
epm->num_sdams = rc;
epm->sdam = devm_kcalloc(epm->dev, epm->num_sdams,
sizeof(*epm->sdam), GFP_KERNEL);
if (!epm->sdam)
return -ENOMEM;
/* Check for config sdam */
epm->sdam[0].id = CONFIG_SDAM;
scnprintf(buf, sizeof(buf), "epm-config-sdam");
rc = epm_get_sdam_nvmem(epm->dev, &epm->sdam[0], buf);
if (rc < 0)
return rc;
/* Check 10s avg sdam */
epm->sdam[1].id = DATA_AVG_SDAM;
scnprintf(buf, sizeof(buf), "epm-10s-avg-sdam");
rc = epm_get_sdam_nvmem(epm->dev, &epm->sdam[1], buf);
if (rc < 0)
return rc;
return 0;
}
static int epm_parse_dt(struct epm_priv *epm)
{
struct platform_device *pdev;
int rc = 0;
uint32_t val = 0;
struct device_node *np = epm->dev->of_node;
/* 1s data is not enabled yet, hence below DT is optional for now */
rc = of_property_read_u32(np, "qcom,data-sdam-base-id", &val);
if (rc < 0)
dev_dbg(epm->dev, "Failed to get sdam base, rc = %d\n", rc);
epm->data_1s_base_pid = val;
rc = of_property_count_strings(np, "qcom,reg-ppid-names");
if (rc < 1 || rc >= EPM_POWER_CH_MAX) {
dev_err(epm->dev,
"Invalid ppid name mapping count, rc=%d\n", rc);
return rc;
}
epm->dt_reg_cnt = rc;
rc = of_property_count_elems_of_size(np, "qcom,reg-ppid-ids",
sizeof(u16));
if (rc < 1 || rc >= EPM_POWER_CH_MAX || rc != epm->dt_reg_cnt) {
dev_err(epm->dev,
"Invalid ppid mapping count, rc = %d strings:%d\n",
rc, epm->dt_reg_cnt);
return rc;
}
rc = of_property_read_u16_array(np, "qcom,reg-ppid-ids",
epm->reg_ppid_map, epm->dt_reg_cnt);
if (rc < 0) {
dev_err(epm->dev,
"Failed to read ppid mapping array, rc = %d\n", rc);
return rc;
}
rc = of_property_read_u8(np, "#qcom,epm-tz-sensor", &epm->dt_tz_cnt);
if (rc < 0)
dev_dbg(epm->dev,
"Failed to read epm tz sensor count, rc = %d\n", rc);
rc = epm_parse_sdam_data(epm);
if (rc < 0)
return rc;
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_err(epm->dev, "Invalid pdev\n");
return -ENODEV;
}
rc = platform_get_irq(pdev, 0);
if (rc <= 0)
dev_dbg(epm->dev, "Failed to get epm irq, rc=%d\n", rc);
epm->irq = rc;
return 0;
}
static int qti_epm_hw_init(struct epm_priv *epm)
{
int rc;
if (epm->initialized)
return 0;
mutex_init(&epm->sec_read_lock);
mutex_init(&epm->avg_read_lock);
INIT_LIST_HEAD(&epm->epm_dev_head);
INIT_DELAYED_WORK(&epm->avg_data_work, qti_epm_update_avg_data);
rc = epm_parse_dt(epm);
if (rc < 0) {
dev_err(epm->dev, "Failed to parse epm rc=%d\n", rc);
return rc;
}
rc = qti_epm_config_sdam_read(epm);
if (rc < 0) {
dev_err(epm->dev, "Failed to parse config sdam rc=%d\n", rc);
return rc;
}
if (epm->irq > 0) {
rc = devm_request_threaded_irq(epm->dev, epm->irq,
NULL, epm_sdam_irq_handler,
IRQF_ONESHOT, "qti_epm_irq", epm);
if (rc < 0) {
dev_err(epm->dev,
"Failed to request IRQ for epm, rc=%d\n", rc);
return rc;
}
}
rc = initialize_epm_tz(epm);
epm->initialized = true;
/* Update first reading for all channels */
qti_epm_read_data_update(epm);
return 0;
}
static void qti_epm_hw_release(struct epm_priv *epm)
{
}
struct epm_ops epm_hw_ops = {
.init = qti_epm_hw_init,
.get_mode = qti_epm_get_mode,
.get_power = qti_epm_get_power, // only for ACAT mode
.get_temp = qti_epm_get_temp,
.release = qti_epm_hw_release,
};

View File

@ -0,0 +1,268 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "qti_epm.h"
#define EPM_HW "qti-epm-hw"
static inline struct epm_device_pz_data *to_epm_dev_pz(struct powercap_zone *pz)
{
return container_of(pz, struct epm_device_pz_data, pz);
}
static const char * const constraint_name[] = {
"dummy",
};
static int epm_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct epm_tz_device *epm_tz = (struct epm_tz_device *)tz->devdata;
struct epm_priv *epm = epm_tz->priv;
return epm->ops->get_temp(epm_tz, temp);
}
static const struct thermal_zone_device_ops epm_thermal_of_ops = {
.get_temp = epm_get_temp,
};
static int thermal_zone_register(struct epm_priv *epm)
{
int idx;
struct epm_tz_device *epm_tz = NULL;
for (idx = 0; idx < epm->dt_tz_cnt; idx++) {
epm_tz = &epm->epm_tz[idx];
epm_tz->tz = devm_thermal_of_zone_register(
epm->dev, idx, &epm->epm_tz[idx],
&epm_thermal_of_ops);
if (IS_ERR(epm_tz->tz)) {
epm_tz->tz = NULL;
continue;
}
}
return 0;
}
static int epm_get_time_window_us(struct powercap_zone *pcz, int cid, u64 *window)
{
return -EOPNOTSUPP;
}
static int epm_set_time_window_us(struct powercap_zone *pcz, int cid, u64 window)
{
return -EOPNOTSUPP;
}
static int epm_get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw)
{
struct epm_device_pz_data *epm_pz = to_epm_dev_pz(pcz);
struct epm_device *epm_dev = epm_pz->epm_dev;
struct epm_priv *epm = epm_dev->priv;
if (epm->ops->get_max_power)
epm->ops->get_max_power(epm_dev, max_power_uw);
return 0;
}
static int epm_get_power_uw(struct powercap_zone *pcz, u64 *power_uw)
{
struct epm_device_pz_data *epm_pz = to_epm_dev_pz(pcz);
struct epm_device *epm_dev = epm_pz->epm_dev;
struct epm_priv *epm = epm_dev->priv;
if (epm->ops->get_power)
epm->ops->get_power(epm_dev, epm_pz->type, power_uw);
else
return -EOPNOTSUPP;
return 0;
}
static int epm_release_zone(struct powercap_zone *pcz)
{
struct epm_device_pz_data *epm_pz = to_epm_dev_pz(pcz);
struct epm_device *epm_dev = epm_pz->epm_dev;
struct epm_priv *epm = epm_dev->priv;
if (epm->ops->release)
epm->ops->release(epm);
return 0;
}
static int epm_get_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 *power_limit)
{
return -EOPNOTSUPP;
}
static int epm_set_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 power_limit)
{
return -EOPNOTSUPP;
}
static const char *get_constraint_name(struct powercap_zone *pcz, int cid)
{
return constraint_name[cid];
}
static int epm_get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power)
{
struct epm_device_pz_data *epm_pz = to_epm_dev_pz(pcz);
struct epm_device *epm_dev = epm_pz->epm_dev;
struct epm_priv *epm = epm_dev->priv;
if (epm->ops->get_max_power)
return epm->ops->get_max_power(epm_dev, max_power);
else
return -EOPNOTSUPP;
}
static struct powercap_zone_constraint_ops constraint_ops = {
.set_power_limit_uw = epm_set_power_limit_uw,
.get_power_limit_uw = epm_get_power_limit_uw,
.set_time_window_us = epm_set_time_window_us,
.get_time_window_us = epm_get_time_window_us,
.get_max_power_uw = epm_get_max_power_uw,
.get_name = get_constraint_name,
};
static struct powercap_zone_ops zone_ops = {
.get_max_power_range_uw = epm_get_max_power_range_uw,
.get_power_uw = epm_get_power_uw,
.release = epm_release_zone,
};
static int powercap_register(struct epm_priv *epm)
{
struct epm_device *epm_dev;
struct powercap_zone *pcz = NULL;
epm->pct = powercap_register_control_type(NULL, "epm", NULL);
if (IS_ERR(epm->pct)) {
dev_err(epm->dev, "Failed to register control type\n");
return PTR_ERR(epm->pct);
}
list_for_each_entry(epm_dev, &epm->epm_dev_head, epm_node) {
if (!epm_dev->enabled)
continue;
epm_dev->epm_pz[EPM_10S_AVG_DATA].type = EPM_10S_AVG_DATA;
epm_dev->epm_pz[EPM_10S_AVG_DATA].epm_dev = epm_dev;
pcz = powercap_register_zone(
&epm_dev->epm_pz[EPM_10S_AVG_DATA].pz,
epm->pct, epm_dev->name, NULL, &zone_ops, 1,
&constraint_ops);
if (IS_ERR(pcz))
return PTR_ERR(pcz);
}
return 0;
}
static int epm_hw_device_probe(struct platform_device *pdev)
{
int ret;
struct epm_priv *epm;
epm = devm_kzalloc(&pdev->dev, sizeof(*epm), GFP_KERNEL);
if (!epm)
return -ENOMEM;
epm->dev = &pdev->dev;
epm->ops = &epm_hw_ops;
platform_set_drvdata(pdev, epm);
epm->ipc_log = ipc_log_context_create(IPC_LOGPAGES, "qti_epm", 0);
if (!epm->ipc_log)
dev_err(epm->dev, "%s: unable to create IPC Logging for %s\n",
__func__, "qti_epm");
if (!epm->ops || !epm->ops->init || !epm->ops->get_mode ||
!epm->ops->get_power || !epm->ops->release)
return -EINVAL;
ret = epm->ops->init(epm);
if (ret < 0) {
dev_err(&pdev->dev, "%s: init failed\n", __func__);
return ret;
}
switch (epm->ops->get_mode(epm)) {
case EPM_ACAT_MODE:
ret = powercap_register(epm);
break;
default:
break;
}
if (epm->dt_tz_cnt)
thermal_zone_register(epm);
return ret;
}
static int epm_hw_device_remove(struct platform_device *pdev)
{
struct epm_priv *epm = platform_get_drvdata(pdev);
struct epm_device *epm_dev;
list_for_each_entry(epm_dev, &epm->epm_dev_head, epm_node) {
if (epm->pct) {
powercap_unregister_zone(epm->pct,
&epm_dev->epm_pz[EPM_1S_DATA].pz);
powercap_unregister_zone(epm->pct,
&epm_dev->epm_pz[EPM_10S_AVG_DATA].pz);
}
}
if (epm->pct)
powercap_unregister_control_type(epm->pct);
if (epm->ops->release)
epm->ops->release(epm);
return 0;
}
static const struct of_device_id epm_hw_device_match[] = {
{.compatible = "qcom,epm-devices"},
{}
};
static struct platform_driver epm_hw_device_driver = {
.probe = epm_hw_device_probe,
.remove = epm_hw_device_remove,
.driver = {
.name = EPM_HW,
.of_match_table = epm_hw_device_match,
},
};
static int __init epm_hw_device_init(void)
{
return platform_driver_register(&epm_hw_device_driver);
}
module_init(epm_hw_device_init);
static void __exit epm_hw_device_exit(void)
{
platform_driver_unregister(&epm_hw_device_driver);
}
module_exit(epm_hw_device_exit);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. EPM Hardware driver");
MODULE_LICENSE("GPL");