From a659f1264c8550773728cf8a30dc81bf27ddebe7 Mon Sep 17 00:00:00 2001 From: Panicker Harish Date: Mon, 8 Jul 2024 17:40:40 +0530 Subject: [PATCH 001/117] serial: msm_geni_serial: Prevent geni register access while suspend is in progress While runtime suspend is in progress and before it completes, if mctrl is invoked, there is a possibility that it may access the geni register without the voting clock. To address this issue, add mutual exclusion to prevent mctrl from accessing the geni register while suspension is in progress. Change-Id: I3b2d5f6314680ce6cc91c8f0b50f11c37d896868 Signed-off-by: Mehul Raninga Signed-off-by: Panicker Harish --- drivers/tty/serial/msm_geni_serial.c | 55 ++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 28cc64726835..ed464fe97bc6 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -6,19 +6,22 @@ #include #include +#include #include #include -#include -#include #include +#include #include +#include #include #include #include #include #include +#include #include #include +#include #include #include #include @@ -28,9 +31,6 @@ #include #include #include -#include -#include -#include #include static bool con_enabled = IS_ENABLED(CONFIG_SERIAL_MSM_GENI_CONSOLE_DEFAULT_ENABLED); @@ -459,6 +459,12 @@ struct msm_geni_serial_port { int hs_uart_operation; struct msm_geni_serial_ssr uart_ssr; struct geni_se_rsc rsc; + /** + * mutex to prevent race condition between runtime + * suspend and get_mctrl which tries to access IOS registers + * when runtime suspend was in progress + */ + struct mutex suspend_resume_lock; }; static const struct uart_ops msm_geni_serial_pops; @@ -1407,17 +1413,26 @@ static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport) return 0; } - if (!uart_console(uport) && device_pending_suspend(uport)) { - UART_LOG_DBG(port->ipc_log_misc, uport->dev, - "%s.Device is suspended, %s\n", - __func__, current->comm); - return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; + if (!uart_console(uport)) { + if (!mutex_trylock(&port->suspend_resume_lock)) { + UART_LOG_DBG(port->ipc_log_misc, uport->dev, + "%s.Device is being suspended, %s\n", + __func__, current->comm); + return mctrl; + } + if (device_pending_suspend(uport)) { + UART_LOG_DBG(port->ipc_log_misc, uport->dev, + "%s.Device is suspended, %s\n", + __func__, current->comm); + mutex_unlock(&port->suspend_resume_lock); + return mctrl | TIOCM_CTS; + } } geni_ios = geni_read_reg(uport->membase, SE_GENI_IOS); if (!(geni_ios & IO2_DATA_IN)) mctrl |= TIOCM_CTS; - else + else if (!uart_console(uport)) msm_geni_update_uart_error_code(port, SOC_ERROR_START_TX_IOS_SOC_RFR_HIGH); if (!port->manual_flow) @@ -1425,6 +1440,10 @@ static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport) UART_LOG_DBG(port->ipc_log_misc, uport->dev, "%s: geni_ios:0x%x, mctrl:0x%x\n", __func__, geni_ios, mctrl); + + if (!uart_console(uport)) + mutex_unlock(&port->suspend_resume_lock); + return mctrl; } @@ -5519,6 +5538,8 @@ static int msm_geni_serial_probe(struct platform_device *pdev) if (!dev_port->is_console) spin_lock_init(&dev_port->rx_lock); + mutex_init(&dev_port->suspend_resume_lock); + ret = uart_add_one_port(drv, uport); if (ret) dev_err(&pdev->dev, "Failed to register uart_port: %d\n", ret); @@ -5615,7 +5636,7 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) unsigned long long start_time; u32 geni_status = geni_read_reg(port->uport.membase, SE_GENI_STATUS); - + mutex_lock(&port->suspend_resume_lock); UART_LOG_DBG(port->ipc_log_pwr, dev, "%s: Start geni_status : 0x%x\n", __func__, geni_status); @@ -5641,7 +5662,8 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) */ if (port->wakeup_byte && port->wakeup_irq) msm_geni_serial_allow_rx(port); - return -EBUSY; + ret = -EBUSY; + goto exit_runtime_suspend; } } /* @@ -5658,7 +5680,8 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) */ if (port->wakeup_byte && port->wakeup_irq) msm_geni_serial_allow_rx(port); - return -EBUSY; + ret = -EBUSY; + goto exit_runtime_suspend; } geni_status = geni_read_reg(port->uport.membase, SE_GENI_STATUS); @@ -5689,7 +5712,8 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) UART_LOG_DBG(port->ipc_log_pwr, dev, "%s: return, stop_rx_seq busy\n", __func__); enable_irq(port->uport.irq); - return -EBUSY; + ret = -EBUSY; + goto exit_runtime_suspend; } } if (count) @@ -5725,6 +5749,7 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) UART_LOG_DBG(port->ipc_log_pwr, dev, "%s: End %d\n", __func__, ret); __pm_relax(port->geni_wake); exit_runtime_suspend: + mutex_unlock(&port->suspend_resume_lock); return ret; } From e6d9f686f49d4f46fed4321cd2ff41117a44b6ce Mon Sep 17 00:00:00 2001 From: Pranav Mahesh Phansalkar Date: Tue, 28 May 2024 11:48:27 +0530 Subject: [PATCH 002/117] glink: native: Check the return value of native rx Check the return value of native rx function in threaded interrupt. Read the data from fifo until the return value is 0. Change-Id: I7a66707ed942fd5e37c496db91709b9ce0a306d8 Signed-off-by: Pranav Mahesh Phansalkar --- drivers/rpmsg/qcom_glink_native.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 3b869e0599e0..04f31ceac30e 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1638,8 +1638,11 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data) static irqreturn_t qcom_glink_native_thread_intr(int irq, void *data) { struct qcom_glink *glink = data; + int ret; - qcom_glink_native_rx(glink, 0); + do { + ret = qcom_glink_native_rx(glink, 0); + } while (ret >= sizeof(struct glink_msg)); return IRQ_HANDLED; } From b06481e0044ef208dcee8adc7f7d2306355bd17e Mon Sep 17 00:00:00 2001 From: Pratham Pratap Date: Wed, 24 Jul 2024 15:02:22 +0530 Subject: [PATCH 003/117] usb: phy: Enable auto-resume WA if repeater doesn't support Add snapshot of support of auto-resume WA for the revision of repeaters that doesn't support the feature. commit c5b632332b948 ("usb: phy: Enable auto-resume WA if repeater doesn't support"). Change-Id: Ie0556bce34bbc2d321ed821c6beb7ba4a43f3ffd Signed-off-by: Pratham Pratap Signed-off-by: Shubham Chouhan --- include/linux/usb/repeater.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/usb/repeater.h b/include/linux/usb/repeater.h index 7280a0a3a1a7..45cb44349080 100644 --- a/include/linux/usb/repeater.h +++ b/include/linux/usb/repeater.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __LINUX_USB_REPEATER_H #define __LINUX_USB_REPEATER_H @@ -9,6 +9,8 @@ #include #include +#define UR_AUTO_RESUME_SUPPORTED BIT(0) + struct usb_repeater { struct device *dev; const char *label; From 7e6adc573e69ac6b7894343f9becaa230cb083c7 Mon Sep 17 00:00:00 2001 From: Pratham Pratap Date: Wed, 24 Jul 2024 15:13:53 +0530 Subject: [PATCH 004/117] usb: repeater: Add eUSB2 i2c repeater driver Add snapshot of support of eUSB2 i2c repeater driver. This change adds eUSB2 i2c repeater driver.This driver provides reset, initialization and power up/down interfaces for eUSB2 repeater. commit 5732d2fbffeb ("usb: repeater: Add eUSB2 i2c repeater driver"). Change-Id: I88be05f81bba3c670eb981f6085a2f11fadd6a7f Signed-off-by: Pratham Pratap Signed-off-by: Shubham Chouhan --- drivers/usb/repeater/Kconfig | 12 + drivers/usb/repeater/Makefile | 1 + drivers/usb/repeater/repeater-i2c-eusb2.c | 446 ++++++++++++++++++++++ 3 files changed, 459 insertions(+) create mode 100644 drivers/usb/repeater/repeater-i2c-eusb2.c diff --git a/drivers/usb/repeater/Kconfig b/drivers/usb/repeater/Kconfig index c1b8ce62c586..6081abb16d67 100644 --- a/drivers/usb/repeater/Kconfig +++ b/drivers/usb/repeater/Kconfig @@ -21,5 +21,17 @@ config QTI_PMIC_EUSB2_REPEATER provides support to reset, initialiated, power up and configure eUSB2 repeater for USB HS/FS/LS functionality where eUSB2 repeater is used. + To compile this driver as a module, choose M here. + +config I2C_EUSB2_REPEATER + tristate "eUSB2 i2c repeater driver" + depends on ARCH_QCOM + depends on USB_REPEATER + depends on I2C + help + Enable this to support the I2C EUSB2 REPEATER. This driver provides + support to reset, initialize, power up and configure eUSB2 repeater + for USB HS/FS/LS functionality where eUSB2 repeater is used. + To compile this driver as a module, choose M here. endmenu diff --git a/drivers/usb/repeater/Makefile b/drivers/usb/repeater/Makefile index 7933fb5fd14d..81392c31d4c4 100644 --- a/drivers/usb/repeater/Makefile +++ b/drivers/usb/repeater/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_USB_REPEATER) += repeater.o obj-$(CONFIG_QTI_PMIC_EUSB2_REPEATER) += repeater-qti-pmic-eusb2.o +obj-$(CONFIG_I2C_EUSB2_REPEATER) += repeater-i2c-eusb2.o diff --git a/drivers/usb/repeater/repeater-i2c-eusb2.c b/drivers/usb/repeater/repeater-i2c-eusb2.c new file mode 100644 index 000000000000..3473b11af97a --- /dev/null +++ b/drivers/usb/repeater/repeater-i2c-eusb2.c @@ -0,0 +1,446 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define EUSB2_3P0_VOL_MIN 3075000 /* uV */ +#define EUSB2_3P0_VOL_MAX 3300000 /* uV */ +#define EUSB2_3P0_HPM_LOAD 3500 /* uA */ + +#define EUSB2_1P8_VOL_MIN 1800000 /* uV */ +#define EUSB2_1P8_VOL_MAX 1800000 /* uV */ +#define EUSB2_1P8_HPM_LOAD 32000 /* uA */ + +/* NXP eUSB2 repeater registers */ +#define RESET_CONTROL 0x01 +#define LINK_CONTROL1 0x02 +#define LINK_CONTROL2 0x03 +#define eUSB2_RX_CONTROL 0x04 +#define eUSB2_TX_CONTROL 0x05 +#define USB2_RX_CONTROL 0x06 +#define USB2_TX_CONTROL1 0x07 +#define USB2_TX_CONTROL2 0x08 +#define USB2_HS_TERMINATION 0x09 +#define RAP_SIGNATURE 0x0D +#define VDX_CONTROL 0x0E +#define DEVICE_STATUS 0x0F +#define LINK_STATUS 0x10 +#define REVISION_ID 0x13 +#define CHIP_ID_0 0x14 +#define CHIP_ID_1 0x15 +#define CHIP_ID_2 0x16 + +/* TI eUSB2 repeater registers */ +#define GPIO0_CONFIG 0x00 +#define GPIO1_CONFIG 0x40 +#define UART_PORT1 0x50 +#define EXTRA_PORT1 0x51 +#define U_TX_ADJUST_PORT1 0x70 +#define U_HS_TX_PRE_EMPHASIS_P1 0x71 +#define U_RX_ADJUST_PORT1 0x72 +#define U_DISCONNECT_SQUELCH_PORT1 0x73 +#define E_HS_TX_PRE_EMPHASIS_P1 0x77 +#define E_TX_ADJUST_PORT1 0x78 +#define E_RX_ADJUST_PORT1 0x79 +#define REV_ID 0xB0 +#define GLOBAL_CONFIG 0xB2 +#define INT_ENABLE_1 0xB3 +#define INT_ENABLE_2 0xB4 +#define BC_CONTROL 0xB6 +#define BC_STATUS_1 0xB7 +#define INT_STATUS_1 0xA3 +#define INT_STATUS_2 0xA4 + +enum eusb2_repeater_type { + TI_REPEATER, + NXP_REPEATER, +}; + +struct i2c_repeater_chip { + enum eusb2_repeater_type repeater_type; +}; + +struct eusb2_repeater { + struct device *dev; + struct usb_repeater ur; + struct regmap *regmap; + const struct i2c_repeater_chip *chip; + u16 reg_base; + struct regulator *vdd18; + struct regulator *vdd3; + bool power_enabled; + + struct gpio_desc *reset_gpiod; + u32 *param_override_seq; + u8 param_override_seq_cnt; +}; + +static const struct regmap_config eusb2_i2c_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xff, +}; + +static int eusb2_i2c_read_reg(struct eusb2_repeater *er, u8 reg, u8 *val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(er->regmap, reg, ®_val); + if (ret < 0) { + dev_err(er->dev, "Failed to read reg:0x%02x ret=%d\n", reg, ret); + return ret; + } + + *val = reg_val; + dev_dbg(er->dev, "read reg:0x%02x val:0x%02x\n", reg, *val); + + return 0; +} + +static int eusb2_i2c_write_reg(struct eusb2_repeater *er, u8 reg, u8 val) +{ + int ret; + + ret = regmap_write(er->regmap, reg, val); + if (ret < 0) { + dev_err(er->dev, "failed to write 0x%02x to reg: 0x%02x ret=%d\n", val, reg, ret); + return ret; + } + + dev_dbg(er->dev, "write reg:0x%02x val:0x%02x\n", reg, val); + + return 0; +} + +static void eusb2_repeater_update_seq(struct eusb2_repeater *er, u32 *seq, u8 cnt) +{ + int i; + + dev_dbg(er->ur.dev, "param override seq count:%d\n", cnt); + for (i = 0; i < cnt; i = i+2) { + dev_dbg(er->ur.dev, "write 0x%02x to 0x%02x\n", seq[i], seq[i+1]); + eusb2_i2c_write_reg(er, seq[i+1], seq[i]); + } +} + +static int eusb2_repeater_power(struct eusb2_repeater *er, bool on) +{ + int ret = 0; + + dev_dbg(er->ur.dev, "%s turn %s regulators. power_enabled:%d\n", + __func__, on ? "on" : "off", er->power_enabled); + + if (er->power_enabled == on) { + dev_dbg(er->ur.dev, "regulators are already ON.\n"); + return 0; + } + + if (!on) + goto disable_vdd3; + + ret = regulator_set_load(er->vdd18, EUSB2_1P8_HPM_LOAD); + if (ret < 0) { + dev_err(er->ur.dev, "Unable to set HPM of vdd12:%d\n", ret); + goto err_vdd18; + } + + ret = regulator_set_voltage(er->vdd18, EUSB2_1P8_VOL_MIN, + EUSB2_1P8_VOL_MAX); + if (ret) { + dev_err(er->ur.dev, + "Unable to set voltage for vdd18:%d\n", ret); + goto put_vdd18_lpm; + } + + ret = regulator_enable(er->vdd18); + if (ret) { + dev_err(er->ur.dev, "Unable to enable vdd18:%d\n", ret); + goto unset_vdd18; + } + + ret = regulator_set_load(er->vdd3, EUSB2_3P0_HPM_LOAD); + if (ret < 0) { + dev_err(er->ur.dev, "Unable to set HPM of vdd3:%d\n", ret); + goto disable_vdd18; + } + + ret = regulator_set_voltage(er->vdd3, EUSB2_3P0_VOL_MIN, + EUSB2_3P0_VOL_MAX); + if (ret) { + dev_err(er->ur.dev, + "Unable to set voltage for vdd3:%d\n", ret); + goto put_vdd3_lpm; + } + + ret = regulator_enable(er->vdd3); + if (ret) { + dev_err(er->ur.dev, "Unable to enable vdd3:%d\n", ret); + goto unset_vdd3; + } + + er->power_enabled = true; + pr_debug("%s(): eUSB2 repeater egulators are turned ON.\n", __func__); + return ret; + +disable_vdd3: + ret = regulator_disable(er->vdd3); + if (ret) + dev_err(er->ur.dev, "Unable to disable vdd3:%d\n", ret); + +unset_vdd3: + ret = regulator_set_voltage(er->vdd3, 0, EUSB2_3P0_VOL_MAX); + if (ret) + dev_err(er->ur.dev, + "Unable to set (0) voltage for vdd3:%d\n", ret); + +put_vdd3_lpm: + ret = regulator_set_load(er->vdd3, 0); + if (ret < 0) + dev_err(er->ur.dev, "Unable to set (0) HPM of vdd3\n"); + +disable_vdd18: + ret = regulator_disable(er->vdd18); + if (ret) + dev_err(er->ur.dev, "Unable to disable vdd18:%d\n", ret); + +unset_vdd18: + ret = regulator_set_voltage(er->vdd18, 0, EUSB2_1P8_VOL_MAX); + if (ret) + dev_err(er->ur.dev, + "Unable to set (0) voltage for vdd18:%d\n", ret); + +put_vdd18_lpm: + ret = regulator_set_load(er->vdd18, 0); + if (ret < 0) + dev_err(er->ur.dev, "Unable to set LPM of vdd18\n"); + + /* case handling when regulator turning on failed */ + if (!er->power_enabled) + return -EINVAL; + +err_vdd18: + er->power_enabled = false; + dev_dbg(er->ur.dev, "eUSB2 repeater's regulators are turned OFF.\n"); + return ret; +} + +static int eusb2_repeater_init(struct usb_repeater *ur) +{ + struct eusb2_repeater *er = + container_of(ur, struct eusb2_repeater, ur); + const struct i2c_repeater_chip *chip = er->chip; + u8 reg_val; + + switch (chip->repeater_type) { + case TI_REPEATER: + eusb2_i2c_read_reg(er, REV_ID, ®_val); + /* If the repeater revision is B1 disable auto-resume WA */ + if (reg_val == 0x03) + ur->flags |= UR_AUTO_RESUME_SUPPORTED; + break; + case NXP_REPEATER: + eusb2_i2c_read_reg(er, REVISION_ID, ®_val); + break; + default: + dev_err(er->ur.dev, "Invalid repeater\n"); + } + + dev_info(er->ur.dev, "eUSB2 repeater version = 0x%x ur->flags:0x%x\n", reg_val, ur->flags); + + /* override init sequence using devicetree based values */ + if (er->param_override_seq_cnt) + eusb2_repeater_update_seq(er, er->param_override_seq, + er->param_override_seq_cnt); + + dev_info(er->ur.dev, "eUSB2 repeater init\n"); + + return 0; +} + +static int eusb2_repeater_reset(struct usb_repeater *ur, bool bring_out_of_reset) +{ + struct eusb2_repeater *er = + container_of(ur, struct eusb2_repeater, ur); + + dev_dbg(ur->dev, "reset gpio:%s\n", + bring_out_of_reset ? "assert" : "deassert"); + gpiod_set_value_cansleep(er->reset_gpiod, bring_out_of_reset); + return 0; +} + +static int eusb2_repeater_powerup(struct usb_repeater *ur) +{ + struct eusb2_repeater *er = + container_of(ur, struct eusb2_repeater, ur); + + return eusb2_repeater_power(er, true); +} + +static int eusb2_repeater_powerdown(struct usb_repeater *ur) +{ + struct eusb2_repeater *er = + container_of(ur, struct eusb2_repeater, ur); + + return eusb2_repeater_power(er, false); +} + +static struct i2c_repeater_chip repeater_chip[] = { + [NXP_REPEATER] = { + .repeater_type = NXP_REPEATER, + }, + [TI_REPEATER] = { + .repeater_type = TI_REPEATER, + } +}; + +static const struct of_device_id eusb2_repeater_id_table[] = { + { + .compatible = "nxp,eusb2-repeater", + .data = &repeater_chip[NXP_REPEATER] + }, + { + .compatible = "ti,eusb2-repeater", + .data = &repeater_chip[TI_REPEATER] + }, + { }, +}; +MODULE_DEVICE_TABLE(of, eusb2_repeater_id_table); + +static int eusb2_repeater_i2c_probe(struct i2c_client *client) +{ + struct eusb2_repeater *er; + struct device *dev = &client->dev; + const struct of_device_id *match; + int ret = 0, num_elem; + + er = devm_kzalloc(dev, sizeof(*er), GFP_KERNEL); + if (!er) { + ret = -ENOMEM; + goto err_probe; + } + + er->dev = dev; + match = of_match_node(eusb2_repeater_id_table, dev->of_node); + er->chip = match->data; + + er->regmap = devm_regmap_init_i2c(client, &eusb2_i2c_regmap); + if (!er->regmap) { + dev_err(dev, "failed to allocate register map\n"); + ret = -EINVAL; + goto err_probe; + } + + devm_regmap_qti_debugfs_register(er->dev, er->regmap); + i2c_set_clientdata(client, er); + + ret = of_property_read_u16(dev->of_node, "reg", &er->reg_base); + if (ret < 0) { + dev_err(dev, "failed to get reg base address:%d\n", ret); + goto err_probe; + } + + er->vdd3 = devm_regulator_get(dev, "vdd3"); + if (IS_ERR(er->vdd3)) { + dev_err(dev, "unable to get vdd3 supply\n"); + ret = PTR_ERR(er->vdd3); + goto err_probe; + } + + er->vdd18 = devm_regulator_get(dev, "vdd18"); + if (IS_ERR(er->vdd18)) { + dev_err(dev, "unable to get vdd18 supply\n"); + ret = PTR_ERR(er->vdd18); + goto err_probe; + } + + er->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(er->reset_gpiod)) { + ret = PTR_ERR(er->reset_gpiod); + goto err_probe; + } + + num_elem = of_property_count_elems_of_size(dev->of_node, "qcom,param-override-seq", + sizeof(*er->param_override_seq)); + if (num_elem > 0) { + if (num_elem % 2) { + dev_err(dev, "invalid param_override_seq_len\n"); + ret = -EINVAL; + goto err_probe; + } + + er->param_override_seq_cnt = num_elem; + er->param_override_seq = devm_kcalloc(dev, + er->param_override_seq_cnt, + sizeof(*er->param_override_seq), GFP_KERNEL); + if (!er->param_override_seq) { + ret = -ENOMEM; + goto err_probe; + } + + ret = of_property_read_u32_array(dev->of_node, + "qcom,param-override-seq", + er->param_override_seq, + er->param_override_seq_cnt); + if (ret) { + dev_err(dev, "qcom,param-override-seq read failed %d\n", + ret); + goto err_probe; + } + } + + + er->ur.dev = dev; + + er->ur.init = eusb2_repeater_init; + er->ur.reset = eusb2_repeater_reset; + er->ur.powerup = eusb2_repeater_powerup; + er->ur.powerdown = eusb2_repeater_powerdown; + + ret = usb_add_repeater_dev(&er->ur); + if (ret) + goto err_probe; + + return 0; + +err_probe: + return ret; +} + +static void eusb2_repeater_i2c_remove(struct i2c_client *client) +{ + struct eusb2_repeater *er = i2c_get_clientdata(client); + + usb_remove_repeater_dev(&er->ur); + eusb2_repeater_power(er, false); +} + +static struct i2c_driver eusb2_i2c_repeater_driver = { + .probe_new = eusb2_repeater_i2c_probe, + .remove = eusb2_repeater_i2c_remove, + .driver = { + .name = "eusb2-repeater", + .of_match_table = of_match_ptr(eusb2_repeater_id_table), + }, +}; + +module_i2c_driver(eusb2_i2c_repeater_driver); + +MODULE_DESCRIPTION("eUSB2 i2c repeater driver"); +MODULE_LICENSE("GPL"); From ffe786942e90bf4bc31eff5dbf00455fe5e4e3b3 Mon Sep 17 00:00:00 2001 From: Venkata Talluri Date: Sun, 7 Jul 2024 13:20:26 +0530 Subject: [PATCH 005/117] arm64: defconfig: Add initial configs for Neo LA target Add initial configs to generate the gki and consolidated variants of the defconfigs meant to support the Neo LA target. Change-Id: I94cb1d5d56fd3027e3d02a507217b3cbdc1b02a0 Signed-off-by: Venkata Talluri Signed-off-by: Asit Shah --- arch/arm64/configs/vendor/neo_la_GKI.config | 77 ++++++++++++++++ .../configs/vendor/neo_la_consolidate.config | 20 +++++ neo_la.bzl | 87 +++++++++++++++++++ 3 files changed, 184 insertions(+) create mode 100644 arch/arm64/configs/vendor/neo_la_GKI.config create mode 100644 arch/arm64/configs/vendor/neo_la_consolidate.config create mode 100644 neo_la.bzl diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config new file mode 100644 index 000000000000..bca6d11710ef --- /dev/null +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -0,0 +1,77 @@ +CONFIG_ARCH_NEO=y +CONFIG_ARM_SMMU=m +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_SELFTEST=y +CONFIG_EDAC_KRYO_ARM64=m +# CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE is not set +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y +CONFIG_EDAC_QCOM=m +# CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE is not set +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y +# CONFIG_GH_CTRL is not set +# CONFIG_GH_DBL is not set +# CONFIG_GH_GUEST_POPS is not set +# CONFIG_GH_IRQ_LEND is not set +# CONFIG_GH_MEM_NOTIFIER is not set +CONFIG_GH_MSGQ=m +CONFIG_GH_RM_DRV=m +# CONFIG_GH_TLMM_VM_MEM_ACCESS is not set +CONFIG_GH_VIRT_WATCHDOG=m +CONFIG_GUNYAH_DRIVERS=y +# CONFIG_HVC_GUNYAH is not set +CONFIG_HWSPINLOCK_QCOM=m +CONFIG_INIT_ON_FREE_DEFAULT_ON=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_LOCALVERSION="-gki" +# CONFIG_MODULE_SIG_ALL is not set +CONFIG_MSM_BOOT_STATS=m +CONFIG_MSM_CORE_HANG_DETECT=m +CONFIG_MSM_PERFORMANCE=m +CONFIG_MSM_SYSSTATS=m +CONFIG_PDR_INDICATION_NOTIF_TIMEOUT=9000 +CONFIG_PINCTRL_MSM=m +CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m +CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y +CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m +CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_QCOM_COMMAND_DB=m +CONFIG_QCOM_DMABUF_HEAPS=m +CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y +CONFIG_QCOM_DMABUF_HEAPS_CMA=y +CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y +CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y +CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_IOMMU_DEBUG=m +CONFIG_QCOM_IOMMU_UTIL=m +CONFIG_QCOM_LAZY_MAPPING=m +CONFIG_QCOM_LLCC=m +CONFIG_QCOM_LLCC_PERFMON=m +CONFIG_QCOM_LLCC_PMU=m +CONFIG_QCOM_MEMORY_DUMP_V2=m +CONFIG_QCOM_MEM_BUF=m +CONFIG_QCOM_MEM_BUF_DEV=m +CONFIG_QCOM_MEM_HOOKS=m +CONFIG_QCOM_PANIC_ON_NOTIF_TIMEOUT=y +CONFIG_QCOM_PANIC_ON_PDR_NOTIF_TIMEOUT=y +CONFIG_QCOM_RAMDUMP=m +CONFIG_QCOM_RPMH=m +CONFIG_QCOM_RUN_QUEUE_STATS=m +CONFIG_QCOM_SCM=m +CONFIG_QCOM_SECURE_BUFFER=m +CONFIG_QCOM_SHOW_RESUME_IRQ=m +CONFIG_QCOM_SMEM=m +CONFIG_QCOM_SOCINFO=m +CONFIG_QCOM_SOC_WATCHDOG=m +CONFIG_QCOM_WATCHDOG_BARK_TIME=11000 +CONFIG_QCOM_WATCHDOG_IPI_PING=y +CONFIG_QCOM_WATCHDOG_PET_TIME=9360 +# CONFIG_QCOM_WATCHDOG_USERSPACE_PET is not set +CONFIG_QCOM_WATCHDOG_WAKEUP_ENABLE=y +# CONFIG_QCOM_WCNSS_PIL is not set +# CONFIG_QCOM_WDOG_BITE_EARLY_PANIC is not set +CONFIG_QCOM_WDT_CORE=m +CONFIG_QTEE_SHM_BRIDGE=y +CONFIG_QTI_IOMMU_SUPPORT=m +CONFIG_SCHED_WALT=m +CONFIG_VIRT_DRIVERS=y diff --git a/arch/arm64/configs/vendor/neo_la_consolidate.config b/arch/arm64/configs/vendor/neo_la_consolidate.config new file mode 100644 index 000000000000..83455e7a85ec --- /dev/null +++ b/arch/arm64/configs/vendor/neo_la_consolidate.config @@ -0,0 +1,20 @@ +CONFIG_CMDLINE="stack_depot_disable=off kasan.stacktrace=off cgroup_disable=pressure cgroup.memory=nokmem page_owner=on no_hash_pointers panic_on_taint=0x20" +CONFIG_DEBUG_SPINLOCK=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y +CONFIG_INIT_ON_FREE_DEFAULT_ON=y +CONFIG_IOMMU_TLBSYNC_DEBUG=y +CONFIG_LKDTM=m +CONFIG_LOCALVERSION="-consolidate" +CONFIG_LOCKDEP=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOCK_STAT=y +CONFIG_PAGE_POISONING=y +CONFIG_PM_DEBUG=y +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_RUNTIME_TESTING_MENU=y +CONFIG_SCHED_WALT_DEBUG=m +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_MMIO_ACCESS=y +CONFIG_TRACE_PREEMPT_TOGGLE=y diff --git a/neo_la.bzl b/neo_la.bzl new file mode 100644 index 000000000000..2433fe9bcf6e --- /dev/null +++ b/neo_la.bzl @@ -0,0 +1,87 @@ +load(":image_opts.bzl", "boot_image_opts") +load(":msm_kernel_la.bzl", "define_msm_la") +load(":target_variants.bzl", "la_variants") + +target_name = "neo-la" + +def define_neo_la(): + _neo_in_tree_modules = [ + # keep sorted + "drivers/dma-buf/heaps/qcom_dma_heaps.ko", + "drivers/edac/kryo_arm64_edac.ko", + "drivers/edac/qcom_edac.ko", + "drivers/firmware/qcom-scm.ko", + "drivers/hwspinlock/qcom_hwspinlock.ko", + "drivers/iommu/arm/arm-smmu/arm_smmu.ko", + "drivers/iommu/iommu-logger.ko", + "drivers/iommu/msm_dma_iommu_mapping.ko", + "drivers/iommu/qcom_iommu_debug.ko", + "drivers/iommu/qcom_iommu_util.ko", + "drivers/irqchip/msm_show_resume_irq.ko", + "drivers/perf/qcom_llcc_pmu.ko", + "drivers/pinctrl/qcom/pinctrl-msm.ko", + "drivers/power/reset/qcom-dload-mode.ko", + "drivers/power/reset/qcom-reboot-reason.ko", + "drivers/soc/qcom/boot_stats.ko", + "drivers/soc/qcom/cmd-db.ko", + "drivers/soc/qcom/core_hang_detect.ko", + "drivers/soc/qcom/llcc-qcom.ko", + "drivers/soc/qcom/llcc_perfmon.ko", + "drivers/soc/qcom/mem-hooks.ko", + "drivers/soc/qcom/mem_buf/mem_buf.ko", + "drivers/soc/qcom/mem_buf/mem_buf_dev.ko", + "drivers/soc/qcom/memory_dump_v2.ko", + "drivers/soc/qcom/msm_performance.ko", + "drivers/soc/qcom/qcom_ramdump.ko", + "drivers/soc/qcom/qcom_rpmh.ko", + "drivers/soc/qcom/qcom_soc_wdt.ko", + "drivers/soc/qcom/qcom_wdt_core.ko", + "drivers/soc/qcom/rq_stats.ko", + "drivers/soc/qcom/secure_buffer.ko", + "drivers/soc/qcom/smem.ko", + "drivers/soc/qcom/socinfo.ko", + "drivers/virt/gunyah/gh_msgq.ko", + "drivers/virt/gunyah/gh_rm_drv.ko", + "drivers/virt/gunyah/gh_virt_wdt.ko", + "kernel/msm_sysstats.ko", + "kernel/sched/walt/sched-walt.ko", + ] + + _neo_consolidate_in_tree_modules = _neo_in_tree_modules + [ + # keep sorted + "drivers/misc/lkdtm/lkdtm.ko", + "kernel/sched/walt/sched-walt-debug.ko", + ] + + kernel_vendor_cmdline_extras = [ + # do not sort + "console=ttyMSM0,115200n8", + "qcom_geni_serial.con_enabled=1", + "bootconfig", + ] + + for variant in la_variants: + board_kernel_cmdline_extras = [] + board_bootconfig_extras = [] + + if variant == "consolidate": + mod_list = _neo_consolidate_in_tree_modules + else: + mod_list = _neo_in_tree_modules + board_kernel_cmdline_extras += ["nosoftlockup"] + kernel_vendor_cmdline_extras += ["nosoftlockup"] + board_bootconfig_extras += ["androidboot.console=0"] + + define_msm_la( + msm_target = target_name, + variant = variant, + in_tree_module_list = mod_list, + boot_image_opts = boot_image_opts( + earlycon_addr = "qcom_geni,0x00a94000", + kernel_vendor_cmdline_extras = kernel_vendor_cmdline_extras, + board_kernel_cmdline_extras = board_kernel_cmdline_extras, + board_bootconfig_extras = board_bootconfig_extras, + ), + #TODO: Need to enable this + #dpm_overlay = True, + ) From 842e992e8f883a78beccdfd4e0861a3a17291ab0 Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Fri, 5 Jul 2024 14:19:12 +0530 Subject: [PATCH 006/117] modules.list.msm.neo-la: Add build configs for Neo LA Add first stage, systemdlkm blocklist and vendor blocklist modules files for Neo LA platform. Change-Id: I1ac9e4d56a4c4ac38f335c38ec282a9c4c767aee Signed-off-by: Asit Shah --- modules.list.msm.neo-la | 90 +++++++++++++++++++++++++ modules.systemdlkm_blocklist.msm.neo-la | 2 + modules.vendor_blocklist.msm.neo-la | 67 ++++++++++++++++++ 3 files changed, 159 insertions(+) create mode 100644 modules.list.msm.neo-la create mode 100644 modules.systemdlkm_blocklist.msm.neo-la create mode 100644 modules.vendor_blocklist.msm.neo-la diff --git a/modules.list.msm.neo-la b/modules.list.msm.neo-la new file mode 100644 index 000000000000..d3c7e01b4024 --- /dev/null +++ b/modules.list.msm.neo-la @@ -0,0 +1,90 @@ +qcom_cpu_vendor_hooks.ko +qcom_ipc_logging.ko +ns.ko +qrtr.ko +crc8.ko +lzo.ko +lzo-rle.ko +zsmalloc.ko +qcom_edac.ko +icc-test.ko +icc-debug.ko +qnoc-neo.ko +qcom_ipcc.ko +dcvs_fp.ko +qcom-dcvs.ko +qnoc-qos.ko +msm-geni-se.ko +qcom-pdc.ko +qcom-scm.ko +arm_smmu.ko +iommu-logger.ko +qcom_iommu_util.ko +qcom_llcc_pmu.ko +qcom-pmu-lib.ko +llcc-qcom.ko +qmi_helpers.ko +qcom_hwspinlock.ko +cmd-db.ko +qcom_soc_wdt.ko +smem.ko +qcom_sync_file.ko +gh_msgq.ko +gh_rm_drv.ko +socinfo.ko +qcom-dload-mode.ko +deferred-free-helper.ko +mem-offline.ko +mem-hooks.ko +memory_dump_v2.ko +msm_dma_iommu_mapping.ko +qcom_wdt_core.ko +qcom_rpmh.ko +clk-rpmh.ko +icc-bcm-voter.ko +icc-rpmh.ko +clk-qcom.ko +clk-dummy.ko +zram.ko +msm_geni_serial.ko +msm_rtb.ko +mem_buf.ko +mem_buf_dev.ko +pinctrl-msm.ko +phy-generic.ko +phy-qcom-emu.ko +qcom_dma_heaps.ko +dwc3-msm.ko +sdhci-msm-scaling.ko +cqhci.ko +qcom-vadc-common.ko +qcom-spmi-adc5.ko +qti-adc-tm.ko +rtc-pm8xxx.ko +qpnp-power-on.ko +spmi-pmic-arb.ko +nvmem_qcom-spmi-sdam.ko +pinctrl-spmi-gpio.ko +qcom-spmi-temp-alarm.ko +clk-spmi-pmic-div.ko +sdhci-msm.ko +core_hang_detect.ko +kryo_arm64_edac.ko +sched-walt.ko +gcc-neo.ko +dispcc-neo.ko +secure_buffer.ko +qcom-cpufreq-hw.ko +sched-walt-debug.ko +qcom-i2c-pmici.ko +qcom-spmi-pmic.ko +qcom-reboot-reason.ko +qti-regmap-debugfs.ko +regmap-spmi.ko +proxy-consumer.ko +bq256xx_charger.ko +gh_virt_wdt.ko +crypto-qti-common.ko +crypto-qti-hwkm.ko +hwkm.ko +tmecom-intf.ko diff --git a/modules.systemdlkm_blocklist.msm.neo-la b/modules.systemdlkm_blocklist.msm.neo-la new file mode 100644 index 000000000000..5f22fd052ba6 --- /dev/null +++ b/modules.systemdlkm_blocklist.msm.neo-la @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. diff --git a/modules.vendor_blocklist.msm.neo-la b/modules.vendor_blocklist.msm.neo-la new file mode 100644 index 000000000000..2e72b688fe99 --- /dev/null +++ b/modules.vendor_blocklist.msm.neo-la @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + +blocklist 8250_of +blocklist dummy_hcd +blocklist llcc_perfmon +blocklist tda18250 +blocklist tda9887 +blocklist tuner-simple +blocklist mt2266 +blocklist tea5767 +blocklist xc5000 +blocklist mt2131 +blocklist qt1010 +blocklist tuner-types +blocklist tua9001 +blocklist m88rs6000t +blocklist tda18218 +blocklist mxl5007t +blocklist fc2580 +blocklist r820t +blocklist mc44s803 +blocklist fc0012 +blocklist si2157 +blocklist tda827x +blocklist tuner-xc2028 +blocklist mt2060 +blocklist qm1d1b0004 +blocklist qm1d1c0042 +blocklist tda18212 +blocklist fc0013 +blocklist msi001 +blocklist fc0011 +blocklist tda8290 +blocklist max2165 +blocklist xc4000 +blocklist it913x +blocklist mt20xx +blocklist mxl301rf +blocklist mt2063 +blocklist e4000 +blocklist tea5761 +blocklist tda18271 +blocklist mxl5005s +blocklist dummy-cpufreq +blocklist dummy_hcd +blocklist kheaders +blocklist atomic64_test +blocklist test_user_copy +blocklist lkdtm +blocklist net_failover +blocklist adc-tm +blocklist rtc-test +blocklist can-bcm +blocklist can-gw +blocklist can-raw +blocklist failover +blocklist vmw_vsock_virtio_transport +blocklist vmw_vsock_virtio_transport_common +blocklist vsock_diag +blocklist vsock +blocklist torture +blocklist locktorture +blocklist rcutorture +blocklist mmrm_test_module +blocklist q5drv_linux +blocklist limits_stat From 450c52772b7b8f89be2e8be50ecdd1a1d91c9681 Mon Sep 17 00:00:00 2001 From: Yash Jain Date: Mon, 29 Jul 2024 15:43:28 +0530 Subject: [PATCH 007/117] modules.list support for loading gpucc in first stage - Add support for loading gpucc-sm8150 in first stage init. Change-Id: Icc42894ff33d4359fc92a0c84d29facca7ef4e16 Signed-off-by: Yash Jain --- modules.list.msm.gen3auto | 1 + 1 file changed, 1 insertion(+) diff --git a/modules.list.msm.gen3auto b/modules.list.msm.gen3auto index b547e904695d..53f86cca4dc6 100644 --- a/modules.list.msm.gen3auto +++ b/modules.list.msm.gen3auto @@ -77,3 +77,4 @@ stmmac-platform.ko dwmac-qcom-eth.ko crypto-qti-common.ko sdhci-msm-scaling.ko +gpucc-sm8150.ko From a5d86b4d379e2baf1037a8d537097dc0401e676d Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Fri, 5 Jul 2024 13:49:34 +0530 Subject: [PATCH 008/117] build: Add bazel support for Neo LA Add bazel build support for Neo LA platform. Change-Id: I529549bf4e111e735c01c27833906e919c4d39c6 Signed-off-by: Asit Shah --- build.config.msm.neo-la | 37 +++++++++++++++++++++++++++++++++++++ build.targets | 1 + msm_platforms.bzl | 2 ++ target_variants.bzl | 1 + 4 files changed, 41 insertions(+) create mode 100644 build.config.msm.neo-la diff --git a/build.config.msm.neo-la b/build.config.msm.neo-la new file mode 100644 index 000000000000..8195803f1a94 --- /dev/null +++ b/build.config.msm.neo-la @@ -0,0 +1,37 @@ +################################################################################ +## Inheriting configs from ACK +. ${ROOT_DIR}/msm-kernel/build.config.common +. ${ROOT_DIR}/msm-kernel/build.config.aarch64 + +################################################################################ +## Variant setup +MSM_ARCH=neo_la +VARIANTS=(consolidate gki) +[ -z "${VARIANT}" ] && VARIANT=consolidate + +ABL_SRC=bootable/bootloader/edk2 +BOOT_IMAGE_HEADER_VERSION=4 +BASE_ADDRESS=0x80000000 +PAGE_SIZE=4096 +BUILD_VENDOR_DLKM=1 +PREPARE_SYSTEM_DLKM=1 +SYSTEM_DLKM_MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules +SUPER_IMAGE_SIZE=0x10000000 +TRIM_UNUSED_MODULES=1 +BUILD_INIT_BOOT_IMG=1 + +[ -z "${DT_OVERLAY_SUPPORT}" ] && DT_OVERLAY_SUPPORT=1 + +if [ "${KERNEL_CMDLINE_CONSOLE_AUTO}" != "0" ]; then + KERNEL_VENDOR_CMDLINE+=' console=ttyMSM0,115200n8 msm_geni_serial.con_enabled=1 ' +fi + +KERNEL_VENDOR_CMDLINE+=' bootconfig ' + +################################################################################ +## Inheriting MSM configs +. ${KERNEL_DIR}/build.config.msm.common +. ${KERNEL_DIR}/build.config.msm.gki + +## Inherit SXR configs +. ${KERNEL_DIR}/build.config.sxr.common diff --git a/build.targets b/build.targets index e7fc99f9fff3..73510ad8e40b 100644 --- a/build.targets +++ b/build.targets @@ -2,6 +2,7 @@ build.config.msm.autogvm build.config.msm.autoghgvm build.config.msm.pineapple build.config.msm.anorak +build.config.msm.neo-la build.config.msm.niobe build.config.msm.kalama build.config.msm.pineapple.vm diff --git a/msm_platforms.bzl b/msm_platforms.bzl index 2140cfcd17c4..129d4a4da42f 100644 --- a/msm_platforms.bzl +++ b/msm_platforms.bzl @@ -17,6 +17,7 @@ load("//build:msm_kernel_extensions.bzl", "define_top_level_rules") load(":blair.bzl", "define_blair") load(":pitti.bzl", "define_pitti") load(":anorak.bzl", "define_anorak") +load(":neo_la.bzl", "define_neo_la") def define_msm_platforms(): define_top_level_rules() @@ -38,3 +39,4 @@ def define_msm_platforms(): define_blair() define_pitti() define_anorak() + define_neo_la() diff --git a/target_variants.bzl b/target_variants.bzl index 319a0b59adff..b77eb0ef74bc 100644 --- a/target_variants.bzl +++ b/target_variants.bzl @@ -5,6 +5,7 @@ la_targets = [ "autogvm", "blair", "gen3auto", + "neo-la", "niobe", "pineapple", "pitti", From 76afc06676772d72eefcfdea91cebc4a5e0158d5 Mon Sep 17 00:00:00 2001 From: Sumedha Phadnis Date: Tue, 16 Jul 2024 20:39:35 +0530 Subject: [PATCH 009/117] modules.list.msm.autogvm: Keep stub regulator to first stage Moving stub regulator loading to first stage to solve pci noc error. Change-Id: I6ef934a4054ea6dc6f3d99a471753fca4908ad50 Signed-off-by: Sumedha Phadnis --- modules.list.msm.autogvm | 1 + 1 file changed, 1 insertion(+) diff --git a/modules.list.msm.autogvm b/modules.list.msm.autogvm index e70331a1eaf4..484b2d232a78 100644 --- a/modules.list.msm.autogvm +++ b/modules.list.msm.autogvm @@ -34,3 +34,4 @@ debug_symbol.ko virtio_net.ko net_failover.ko failover.ko +stub-regulator.ko From 35b5a592d595ccb77b1e11a613c21dd8dedee9ff Mon Sep 17 00:00:00 2001 From: Uttkarsh Aggarwal Date: Thu, 18 Jul 2024 11:44:46 +0530 Subject: [PATCH 010/117] usb: phy: eusb2: Add 10us delay in eusb2 phy init According to Table No. 4 in HPG, the power-on reset must be asserted for at least 10us after all supplies have ramped up. Change-Id: I086ff86e50cf9846beb422901d61954ddcc47ff5 Signed-off-by: Uttkarsh Aggarwal --- drivers/usb/phy/phy-msm-snps-eusb2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/phy/phy-msm-snps-eusb2.c b/drivers/usb/phy/phy-msm-snps-eusb2.c index 452ee30ecd8a..4179fa65e15e 100644 --- a/drivers/usb/phy/phy-msm-snps-eusb2.c +++ b/drivers/usb/phy/phy-msm-snps-eusb2.c @@ -706,6 +706,8 @@ static int msm_eusb2_phy_init(struct usb_phy *uphy) msm_eusb2_write_readback(phy->base, USB_PHY_UTMI_CTRL5, POR, POR); + udelay(10); + msm_eusb2_write_readback(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0, PHY_ENABLE | RETENABLEN, PHY_ENABLE | RETENABLEN); From 6537605fb3254f8f6bb78f1a022c40710d2b5652 Mon Sep 17 00:00:00 2001 From: Jishnu Prakash Date: Fri, 26 Jul 2024 18:33:36 +0530 Subject: [PATCH 011/117] leds: leds-qti-flash: update HW strobe config for external LEDs Make HW strobe signal level triggered instead of edge triggered for external LEDs based on HW side recommendation. Change-Id: I8c88646a252b58923c08bff0459c3f53a94f1bcd Signed-off-by: Jishnu Prakash --- drivers/leds/leds-qti-flash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/leds/leds-qti-flash.c b/drivers/leds/leds-qti-flash.c index 62444704e131..9a979010f9b1 100644 --- a/drivers/leds/leds-qti-flash.c +++ b/drivers/leds/leds-qti-flash.c @@ -1421,8 +1421,8 @@ static int qti_flash_led_setup(struct qti_flash_led *led) mask = FLASH_LED_STROBE_CFG_MASK | FLASH_LED_HW_SW_STROBE_SEL; if (led->ext_led) { - val |= FLASH_LED_STROBE_TRIGGER | FLASH_LED_STROBE_POLARITY; - mask |= FLASH_LED_STROBE_TRIGGER | FLASH_LED_STROBE_POLARITY; + val |= FLASH_LED_STROBE_POLARITY; + mask |= FLASH_LED_STROBE_POLARITY; } rc = qti_flash_led_masked_write(led, From edb802e961a9352211f1da2443666eea8b6c74f9 Mon Sep 17 00:00:00 2001 From: Vivek Pernamitta Date: Tue, 14 May 2024 21:27:46 +0530 Subject: [PATCH 012/117] pcie: msm: Add support to handle PERST GPIO when FMD mode is enable When FMD mode is enabled, deassert the perst signal if perst is already in asserted state and do not allow it to be reasserted. Change-Id: I45105f189214f683d718ab96a1db30cd2bc80667 Signed-off-by: Vivek Pernamitta Signed-off-by: Paras Sharma --- drivers/pci/controller/pci-msm.c | 78 +++++++++++++++++++++++++------- include/linux/msm_pcie.h | 18 +++++++- 2 files changed, 78 insertions(+), 18 deletions(-) diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c index 6dabc1dd39de..255c9602f4a5 100644 --- a/drivers/pci/controller/pci-msm.c +++ b/drivers/pci/controller/pci-msm.c @@ -1284,6 +1284,8 @@ struct msm_pcie_dev_t { #if IS_ENABLED(CONFIG_I2C) struct pcie_i2c_ctrl i2c_ctrl; #endif + + bool fmd_enable; }; struct msm_root_dev_t { @@ -1596,6 +1598,55 @@ int msm_pcie_reg_dump(struct pci_dev *pci_dev, u8 *buff, u32 len) } EXPORT_SYMBOL(msm_pcie_reg_dump); +static void msm_pcie_config_perst(struct msm_pcie_dev_t *dev, bool assert) +{ + if (dev->fmd_enable) { + pr_err("PCIe: FMD is enabled for RC%d\n", dev->rc_idx); + return; + } + + if (assert) { + PCIE_INFO(dev, "PCIe: RC%d: assert PERST\n", + dev->rc_idx); + gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, + dev->gpio[MSM_PCIE_GPIO_PERST].on); + } else { + PCIE_INFO(dev, "PCIe: RC%d: de-assert PERST\n", + dev->rc_idx); + gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, + 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on); + } +} + +int msm_pcie_fmd_enable(struct pci_dev *pci_dev) +{ + struct pci_dev *root_pci_dev; + struct msm_pcie_dev_t *pcie_dev; + + root_pci_dev = pcie_find_root_port(pci_dev); + if (!root_pci_dev) + return -ENODEV; + + pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus); + if (!pcie_dev) { + pr_err("PCIe: did not find RC for pci endpoint device.\n"); + return -ENODEV; + } + + PCIE_INFO(pcie_dev, "RC%d Enable FMD\n", pcie_dev->rc_idx); + if (pcie_dev->fmd_enable) { + pr_err("PCIe: FMD is already enabled for RC%d\n", pcie_dev->rc_idx); + return 0; + } + + if (!gpio_get_value(pcie_dev->gpio[MSM_PCIE_GPIO_PERST].num)) + msm_pcie_config_perst(pcie_dev, false); + + pcie_dev->fmd_enable = true; + return 0; +} +EXPORT_SYMBOL_GPL(msm_pcie_fmd_enable); + static void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value) { writel_relaxed(value, base + offset); @@ -2432,15 +2483,13 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev, case MSM_PCIE_ASSERT_PERST: PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n", dev->rc_idx); - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - dev->gpio[MSM_PCIE_GPIO_PERST].on); + msm_pcie_config_perst(dev, true); usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max); break; case MSM_PCIE_DEASSERT_PERST: PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n", dev->rc_idx); - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on); + msm_pcie_config_perst(dev, false); usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max); break; case MSM_PCIE_KEEP_RESOURCES_ON: @@ -5764,8 +5813,7 @@ static int msm_pcie_link_train(struct msm_pcie_dev_t *dev) #endif PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n", dev->rc_idx); - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - dev->gpio[MSM_PCIE_GPIO_PERST].on); + msm_pcie_config_perst(dev, true); PCIE_ERR(dev, "PCIe RC%d link initialization failed\n", dev->rc_idx); return MSM_PCIE_ERROR; @@ -6171,8 +6219,7 @@ static int msm_pcie_enable_link(struct msm_pcie_dev_t *dev) PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n", dev->rc_idx); - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on); + msm_pcie_config_perst(dev, false); usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max); ep_up_timeout = jiffies + usecs_to_jiffies(EP_UP_TIMEOUT_US); @@ -6289,10 +6336,8 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev) PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n", dev->rc_idx); - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - dev->gpio[MSM_PCIE_GPIO_PERST].on); - usleep_range(PERST_PROPAGATION_DELAY_US_MIN, - PERST_PROPAGATION_DELAY_US_MAX); + msm_pcie_config_perst(dev, true); + usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max); /* enable power */ ret = msm_pcie_vreg_init(dev); @@ -6422,8 +6467,7 @@ static void msm_pcie_disable(struct msm_pcie_dev_t *dev) PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n", dev->rc_idx); - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - dev->gpio[MSM_PCIE_GPIO_PERST].on); + msm_pcie_config_perst(dev, true); if (dev->phy_power_down_offset) msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0); @@ -6588,6 +6632,7 @@ int msm_pcie_enumerate(u32 rc_idx) PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx); + dev->fmd_enable = false; if (!dev->drv_ready) { PCIE_DBG(dev, "PCIe: RC%d: has not been successfully probed yet\n", @@ -7528,7 +7573,7 @@ static void msm_pcie_handle_linkdown(struct msm_pcie_dev_t *dev) return; } - if (!dev->suspending) { + if (!dev->suspending && !dev->fmd_enable) { /* PCIe registers dump on link down */ PCIE_DUMP(dev, "PCIe:Linkdown IRQ for RC%d Dumping PCIe registers\n", @@ -7550,8 +7595,7 @@ static void msm_pcie_handle_linkdown(struct msm_pcie_dev_t *dev) /* assert PERST */ if (!(msm_pcie_keep_resources_on & BIT(dev->rc_idx))) - gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, - dev->gpio[MSM_PCIE_GPIO_PERST].on); + msm_pcie_config_perst(dev, true); PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx); diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h index e504768d8c5d..de4c318c5c99 100644 --- a/include/linux/msm_pcie.h +++ b/include/linux/msm_pcie.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ +/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __MSM_PCIE_H #define __MSM_PCIE_H @@ -236,6 +236,17 @@ int msm_pcie_reg_dump(struct pci_dev *pci_dev, u8 *buff, u32 len); */ int msm_pcie_dsp_link_control(struct pci_dev *pci_dev, bool link_enable); + +/* + * msm_pcie_fmd_enable - deassert perst and enable FMD bit + * @pci_dev: pci device structure + * + * This function will de-assert PERST if PERST is already in assert state + * and set fmd_enable bit, after that no further perst assert/de-assert + * are allowed. + */ +int msm_pcie_fmd_enable(struct pci_dev *pci_dev); + #else /* !CONFIG_PCI_MSM */ static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, void *data, u32 options) @@ -304,6 +315,11 @@ static inline int msm_pcie_dsp_link_control(struct pci_dev *pci_dev, { return -ENODEV; } + +static inline int msm_pcie_fmd_enable(struct pci_dev *pci_dev) +{ + return -ENODEV; +} #endif /* CONFIG_PCI_MSM */ #endif /* __MSM_PCIE_H */ From a700c4049142c46a5dc70a2321828ef474d68680 Mon Sep 17 00:00:00 2001 From: Mukesh Ojha Date: Mon, 29 Jul 2024 13:28:43 +0530 Subject: [PATCH 013/117] remoteproc: pas: Update panic handler priority Currently, both remoteproc and wdog driver panic handler has the same priority due to which both gets registered one after the other. Having the same priority breaks WDOG panic handler which should be called as early as possible otherwise we will the devices gets stuck if any of previous handlers gets stuck due to some reason like sleep while atomic etc., Fix this by correcting the priorities as dload_mode => INT_MAX wdog => INT_MAX - 1 rproc => INT_MAX - 2 minidump => INT_MAX - 3 va-minidump => INT_MAX - 4, INT_MAX - 5 . Change-Id: I79e454c7c671bbc83e5fac86e16717c3e09a06ce Signed-off-by: Mukesh Ojha Signed-off-by: Srinivasarao Pathipati --- drivers/remoteproc/qcom_q6v5_pas.c | 2 +- drivers/soc/qcom/minidump_log.c | 4 ++-- drivers/soc/qcom/qcom_va_minidump.c | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index edb9cfd86780..0a09e7fc2188 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -1764,7 +1764,7 @@ static int adsp_probe(struct platform_device *pdev) mutex_unlock(&q6v5_pas_mutex); if (adsp->check_status) { - adsp->panic_blk.priority = INT_MAX - 1; + adsp->panic_blk.priority = INT_MAX - 2; adsp->panic_blk.notifier_call = rproc_panic_handler; atomic_notifier_chain_register(&panic_notifier_list, &adsp->panic_blk); } diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c index 547107d5e7a8..5c2b8493d730 100644 --- a/drivers/soc/qcom/minidump_log.c +++ b/drivers/soc/qcom/minidump_log.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1153,7 +1153,7 @@ static int md_panic_handler(struct notifier_block *this, static struct notifier_block md_panic_blk = { .notifier_call = md_panic_handler, - .priority = INT_MAX - 2, /* < msm watchdog panic notifier */ + .priority = INT_MAX - 3, /* < msm watchdog panic notifier */ }; static int md_register_minidump_entry(char *name, u64 virt_addr, diff --git a/drivers/soc/qcom/qcom_va_minidump.c b/drivers/soc/qcom/qcom_va_minidump.c index 99520d4652e5..581a1e97b011 100644 --- a/drivers/soc/qcom/qcom_va_minidump.c +++ b/drivers/soc/qcom/qcom_va_minidump.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022,2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "va-minidump: %s: " fmt, __func__ @@ -690,12 +690,12 @@ static int qcom_va_md_elf_panic_handler(struct notifier_block *this, static struct notifier_block qcom_va_md_panic_blk = { .notifier_call = qcom_va_md_panic_handler, - .priority = INT_MAX - 3, + .priority = INT_MAX - 4, }; static struct notifier_block qcom_va_md_elf_panic_blk = { .notifier_call = qcom_va_md_elf_panic_handler, - .priority = INT_MAX - 4, + .priority = INT_MAX - 5, }; static int qcom_va_md_reserve_mem(struct device *dev) From f32c9f673e5de7eca589108fba6f89852ab2eae6 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Mon, 29 Jul 2024 18:24:11 +0800 Subject: [PATCH 014/117] defconfig: autoghgvm: refine configs for LV GVM booting To ensure the configs is optimized and workable for LV booting, we need to refine the configs: 1. remove SWIOTLB_NONLINEAR and VIRTIO_MMIO_SWIOTLB as bounce buffers are no longer used for LV GVM Virtio device. 2. disable ZONE_DMA, otherwise general SWIOTLB init will fail, which causes kernel panic. 3. Set all modules as in-built. Change-Id: If4fbf5ecf7391bc16e3d75b803444a67cc884e43 Signed-off-by: Wei Liu --- arch/arm64/configs/vendor/autoghgvm.config | 93 +++++++++++----------- 1 file changed, 46 insertions(+), 47 deletions(-) diff --git a/arch/arm64/configs/vendor/autoghgvm.config b/arch/arm64/configs/vendor/autoghgvm.config index 51856eb8111f..1e412b86c7e8 100644 --- a/arch/arm64/configs/vendor/autoghgvm.config +++ b/arch/arm64/configs/vendor/autoghgvm.config @@ -4,82 +4,81 @@ CONFIG_ARCH_QCOM=y CONFIG_ARCH_QTI_VM=y CONFIG_ARM64=y CONFIG_ARM64_PMEM=y -CONFIG_ARM_SMMU=m +CONFIG_ARM_SMMU=y CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y -CONFIG_ARM_SMMU_QCOM=m -CONFIG_COMMON_CLK_QCOM=m -CONFIG_FAILOVER=m +CONFIG_ARM_SMMU_QCOM=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_FAILOVER=y CONFIG_GH_ARM64_DRV=y CONFIG_GH_CTRL=y CONFIG_GH_DBL=y CONFIG_GH_MSGQ=y CONFIG_GH_RM_DRV=y -CONFIG_GH_VIRT_WATCHDOG=m +CONFIG_GH_VIRT_WATCHDOG=y CONFIG_GUNYAH=y CONFIG_GUNYAH_DRIVERS=y CONFIG_HVC_GUNYAH=y CONFIG_HVC_GUNYAH_CONSOLE=y -CONFIG_I2C_MSM_GENI=m -CONFIG_I2C_VIRTIO=m +CONFIG_I2C_MSM_GENI=y +CONFIG_I2C_VIRTIO=y CONFIG_IOMMU_IO_PGTABLE_FAST=y # CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB is not set # CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST is not set -CONFIG_IPC_LOGGING=m +CONFIG_IPC_LOGGING=y CONFIG_LOCALVERSION="-perf" CONFIG_MAILBOX=y -CONFIG_MHI_BUS=m +CONFIG_MHI_BUS=y CONFIG_MHI_BUS_MISC=y -CONFIG_MHI_UCI=m +CONFIG_MHI_UCI=y # CONFIG_MODULE_SIG_ALL is not set -CONFIG_MSM_HAB=m +CONFIG_MSM_HAB=y CONFIG_MSM_VIRTIO_HAB=y -CONFIG_NET_FAILOVER=m -CONFIG_PINCTRL_LEMANS=m -CONFIG_PINCTRL_MONACO_AUTO=m -CONFIG_PINCTRL_MSM=m -CONFIG_QCOM_DMABUF_HEAPS=m +CONFIG_NET_FAILOVER=y +CONFIG_PINCTRL_LEMANS=y +CONFIG_PINCTRL_MONACO_AUTO=y +CONFIG_PINCTRL_MSM=y +CONFIG_QCOM_DMABUF_HEAPS=y CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y CONFIG_QCOM_DMABUF_HEAPS_CMA=y CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_QCOM_HGSL=m -CONFIG_QCOM_HGSL_TCSR_SIGNAL=m -CONFIG_QCOM_IOMMU_DEBUG=m -CONFIG_QCOM_IOMMU_UTIL=m -CONFIG_QCOM_IPCC=m -CONFIG_QCOM_LOGBUF_BOOTLOG=m -CONFIG_QCOM_MEM_BUF=m -CONFIG_QCOM_MEM_BUF_DEV=m -CONFIG_QCOM_QMI_HELPERS=m -CONFIG_QCOM_RPROC_COMMON=m -CONFIG_QCOM_RUN_QUEUE_STATS=m +CONFIG_QCOM_HGSL=y +CONFIG_QCOM_HGSL_TCSR_SIGNAL=y +CONFIG_QCOM_IOMMU_DEBUG=y +CONFIG_QCOM_IOMMU_UTIL=y +CONFIG_QCOM_IPCC=y +CONFIG_QCOM_LOGBUF_BOOTLOG=y +CONFIG_QCOM_MEM_BUF=y +CONFIG_QCOM_MEM_BUF_DEV=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_RPROC_COMMON=y +CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_SCM=y -CONFIG_QCOM_SECURE_BUFFER=m -CONFIG_QCOM_SMEM=m +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_QCOM_SMEM=y # CONFIG_QCOM_SOC_WATCHDOG is not set -CONFIG_QCOM_SYSMON=m +CONFIG_QCOM_SYSMON=y # CONFIG_QCOM_WATCHDOG_USERSPACE_PET is not set -CONFIG_QCOM_WDT_CORE=m -CONFIG_QRTR=m -CONFIG_QRTR_MHI=m +CONFIG_QCOM_WDT_CORE=y +CONFIG_QRTR=y +CONFIG_QRTR_MHI=y CONFIG_QRTR_NODE_ID=1 CONFIG_QRTR_WAKEUP_MS=0 -CONFIG_QTI_IOMMU_SUPPORT=m -CONFIG_RENAME_DEVICES=m -CONFIG_RPMSG_QCOM_GLINK=m -CONFIG_RPMSG_QCOM_GLINK_CMA=m -CONFIG_RPMSG_QCOM_GLINK_SMEM=m -CONFIG_SERIAL_MSM_GENI=m -CONFIG_SPI_MSM_GENI=m -CONFIG_SPI_SPIDEV=m -CONFIG_SWIOTLB_NONLINEAR=y +CONFIG_QTI_IOMMU_SUPPORT=y +CONFIG_RENAME_DEVICES=y +CONFIG_RPMSG_QCOM_GLINK=y +CONFIG_RPMSG_QCOM_GLINK_CMA=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_SERIAL_MSM_GENI=y +CONFIG_SPI_MSM_GENI=y +CONFIG_SPI_SPIDEV=y CONFIG_VHOST_MENU=y -CONFIG_VIRTIO_BLK=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO_POLL_RESET=y -CONFIG_VIRTIO_MMIO_SWIOTLB=y -CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_NET=y CONFIG_VIRT_DRIVERS=y +# CONFIG_ZONE_DMA is not set # CONFIG_ZONE_DMA32 is not set From c61660bfe4e90606f87ad761c10f747385e93a52 Mon Sep 17 00:00:00 2001 From: Gokul krishna Krishnakumar Date: Tue, 25 Jun 2024 09:43:48 -0700 Subject: [PATCH 015/117] remoteproc: pas: Add Data Synchronization barrier before reading tcsr reg Add memory barrier for data syncronization before reading the TCSR config register. The value written at Sub System is mismatching the read back value at APPS. Change-Id: I74862f6d8aa54a67988eca2fb9569e23cc89844a Signed-off-by: Gokul krishna Krishnakumar Signed-off-by: Kamati Srinivas --- drivers/remoteproc/qcom_q6v5_pas.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index edb9cfd86780..d462e6c8456a 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -834,6 +834,22 @@ static irqreturn_t soccp_running_ack(int irq, void *data) * Return: 0 if the WFI status register reflects the requested state. */ static int rproc_config_check(struct qcom_adsp *adsp, u32 state) +{ + unsigned int retry_num = 50; + u32 val; + + do { + usleep_range(SOCCP_SLEEP_US, SOCCP_SLEEP_US + 100); + /* Making sure the mem mapped io is read correctly*/ + dsb(sy); + val = readl(adsp->config_addr); + if ((state == SOCCP_D0) && (val == SOCCP_D1)) + return 0; + } while (val != state && --retry_num); + + return (val == state) ? 0 : -ETIMEDOUT; +} +static int rproc_config_check_atomic(struct qcom_adsp *adsp, u32 state) { u32 val; @@ -959,6 +975,7 @@ int rproc_set_state(struct rproc *rproc, bool state) ret = rproc_config_check(adsp, SOCCP_D0); if (ret) { + dsb(sy); dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update tcsr val=%d\n", current->comm, readl(adsp->config_addr)); goto soccp_out; @@ -990,6 +1007,7 @@ int rproc_set_state(struct rproc *rproc, bool state) ret = rproc_config_check(adsp, SOCCP_D3); if (ret) { + dsb(sy); dev_err(adsp->dev, "%s requested D0->D3 failed: TCSR value:%d\n", current->comm, readl(adsp->config_addr)); goto soccp_out; @@ -1030,7 +1048,7 @@ static int rproc_panic_handler(struct notifier_block *this, dev_err(adsp->dev, "failed to update smem bits for D3 to D0\n"); goto done; } - ret = rproc_config_check(adsp, SOCCP_D0); + ret = rproc_config_check_atomic(adsp, SOCCP_D0); if (ret) dev_err(adsp->dev, "failed to change to D0\n"); done: @@ -1044,10 +1062,13 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5) if (adsp->check_status) { ret = rproc_config_check(adsp, SOCCP_D3); + dsb(sy); if (ret) - dev_err(adsp->dev, "state not changed in handover\n"); + dev_err(adsp->dev, "state not changed in handover TCSR val = %d\n", + readl(adsp->config_addr)); else - dev_info(adsp->dev, "state changed in handover for soccp!\n"); + dev_info(adsp->dev, "state changed in handover for soccp! TCSR val = %d\n", + readl(adsp->config_addr)); } disable_regulators(adsp); clk_disable_unprepare(adsp->aggre2_clk); From 94be87df73636109d81560d4634f48f6472b7a0b Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Mon, 29 Jul 2024 16:51:32 +0800 Subject: [PATCH 016/117] bazel: remove all in-tree modules for autoghgvm_lxc remove all in-tree modules for autoghgvm_lxc. Change-Id: I1eeee689ec4ad0c36474c293ccb02bcbe9ca78a0 Signed-off-by: Wei Liu --- autoghgvm_lxc.bzl | 44 -------------------------------------------- 1 file changed, 44 deletions(-) diff --git a/autoghgvm_lxc.bzl b/autoghgvm_lxc.bzl index dc92620844e7..ab6f0fc8abbb 100644 --- a/autoghgvm_lxc.bzl +++ b/autoghgvm_lxc.bzl @@ -7,50 +7,6 @@ target_name = "autoghgvm" def define_autoghgvm_lxc(): _autoghgvm_lxc_in_tree_modules = [ # keep sorted - "drivers/block/virtio_blk.ko", - "drivers/bus/mhi/devices/mhi_dev_uci.ko", - "drivers/bus/mhi/host/mhi.ko", - "drivers/clk/qcom/clk-dummy.ko", - "drivers/clk/qcom/clk-qcom.ko", - "drivers/dma-buf/heaps/qcom_dma_heaps.ko", - "drivers/i2c/busses/i2c-msm-geni.ko", - "drivers/i2c/busses/i2c-virtio.ko", - "drivers/iommu/arm/arm-smmu/arm_smmu.ko", - "drivers/iommu/iommu-logger.ko", - "drivers/iommu/qcom_iommu_debug.ko", - "drivers/iommu/qcom_iommu_util.ko", - "drivers/mailbox/qcom-ipcc.ko", - "drivers/net/net_failover.ko", - "drivers/net/virtio_net.ko", - "drivers/pinctrl/qcom/pinctrl-lemans.ko", - "drivers/pinctrl/qcom/pinctrl-monaco_auto.ko", - "drivers/pinctrl/qcom/pinctrl-msm.ko", - "drivers/remoteproc/qcom_sysmon.ko", - "drivers/remoteproc/rproc_qcom_common.ko", - "drivers/rpmsg/qcom_glink.ko", - "drivers/rpmsg/qcom_glink_cma.ko", - "drivers/rpmsg/qcom_glink_smem.ko", - "drivers/soc/qcom/hab/msm_hab.ko", - "drivers/soc/qcom/hgsl/qcom_hgsl.ko", - "drivers/soc/qcom/mem_buf/mem_buf.ko", - "drivers/soc/qcom/mem_buf/mem_buf_dev.ko", - "drivers/soc/qcom/qcom_logbuf_boot_log.ko", - "drivers/soc/qcom/qcom_wdt_core.ko", - "drivers/soc/qcom/qmi_helpers.ko", - "drivers/soc/qcom/rename_devices.ko", - "drivers/soc/qcom/rq_stats.ko", - "drivers/soc/qcom/secure_buffer.ko", - "drivers/soc/qcom/smem.ko", - "drivers/spi/spi-msm-geni.ko", - "drivers/spi/spidev.ko", - "drivers/tty/serial/msm_geni_serial.ko", - "drivers/virt/gunyah/gh_virt_wdt.ko", - "drivers/virtio/virtio_input.ko", - "drivers/virtio/virtio_mmio.ko", - "kernel/trace/qcom_ipc_logging.ko", - "net/core/failover.ko", - "net/qrtr/qrtr.ko", - "net/qrtr/qrtr-mhi.ko", ] for variant in lxc_variants: From 0bf975d67152b19ea6a20a83f45ec8291be44db9 Mon Sep 17 00:00:00 2001 From: Jayasri Bhattacharyya Date: Fri, 2 Aug 2024 14:44:46 +0530 Subject: [PATCH 017/117] usb: phy: qusb: Fix for USBNOC issue in stability The power needs to be switched ON before operating over the phy, in case set_suspend is not called, the power is not ON. Without dp dm and extcon, there might be cases when qusb_init gets called without power ON. Perform power on specifically during qusb init and maintain the reference count by checking newly introduced flags power_enabled/ clk_enabled. Change-Id: I529d619359f7320f12ce747fc0f24421f65a9973 Signed-off-by: Jayasri Bhattacharyya --- drivers/usb/phy/phy-msm-qusb.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c index 43f81d5f1437..56372a2c8964 100644 --- a/drivers/usb/phy/phy-msm-qusb.c +++ b/drivers/usb/phy/phy-msm-qusb.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -164,7 +164,8 @@ struct qusb_phy { int tune2_efuse_bit_pos; int tune2_efuse_num_of_bits; int tune2_efuse_correction; - + bool power_enabled; + bool clk_enabled; bool cable_connected; bool suspended; bool ulpi_mode; @@ -210,6 +211,10 @@ static void qusb_phy_update_tcsr_level_shifter(struct qusb_phy *qphy, static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on) { dev_dbg(qphy->phy.dev, "%s(): on:%d\n", __func__, on); + if (qphy->clk_enabled == on) { + dev_dbg(qphy->phy.dev, "%s(): clock is already %d\n", __func__, on); + return; + } if (on) { clk_prepare_enable(qphy->ref_clk_src); @@ -228,6 +233,7 @@ static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on) clk_disable_unprepare(qphy->ref_clk); clk_disable_unprepare(qphy->ref_clk_src); } + qphy->clk_enabled = on; } @@ -280,6 +286,10 @@ static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on) dev_dbg(qphy->phy.dev, "%s turn %s regulators\n", __func__, on ? "on" : "off"); + if (qphy->power_enabled == on) { + dev_dbg(qphy->phy.dev, "qphy->power_enabled is already %d\n", on); + return ret; + } if (!on) goto disable_vdda33; @@ -338,6 +348,7 @@ static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on) } pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__); + qphy->power_enabled = true; return ret; disable_vdda33: @@ -385,7 +396,7 @@ static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on) ret); err_vdd: dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n"); - + qphy->power_enabled = false; return ret; } @@ -496,6 +507,9 @@ static int qusb_phy_init(struct usb_phy *phy) if (ret) dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__); + qusb_phy_enable_power(qphy, true); + qusb_phy_enable_clocks(qphy, true); + /* Disable the PHY */ if (qphy->major_rev < 2) writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN, From 5a09efd2186ebaf38c2bb9b7f7dfa5fbb242ee28 Mon Sep 17 00:00:00 2001 From: Auditya Bhattaram Date: Thu, 18 Jul 2024 13:16:36 +0530 Subject: [PATCH 018/117] drivers: nvmem: qfprom: Add keepout_regions for pineapple Add keepout_regions for pineapple. Change-Id: I54840adc0ec5c4f3124495d5b2d5c763bf41390d Signed-off-by: Auditya Bhattaram --- drivers/nvmem/qfprom.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 260f18b56593..d17e743b693e 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -204,6 +204,16 @@ static const struct qfprom_soc_compatible_data niobe_qfprom = { .nkeepout = ARRAY_SIZE(niobe_qfprom_keepout) }; +static const struct nvmem_keepout pineapple_qfprom_keepout[] = { + {.start = 0, .end = 0x9b}, + {.start = 0x9c, .end = 0x1000}, +}; + +static const struct qfprom_soc_compatible_data pineapple_qfprom = { + .keepout = pineapple_qfprom_keepout, + .nkeepout = ARRAY_SIZE(pineapple_qfprom_keepout) +}; + /** * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing. * @priv: Our driver data. @@ -547,6 +557,7 @@ static const struct of_device_id qfprom_of_match[] = { { .compatible = "qcom,cliffs-qfprom", .data = &cliffs_qfprom}, { .compatible = "qcom,pitti-qfprom", .data = &pitti_qfprom}, { .compatible = "qcom,niobe-qfprom", .data = &niobe_qfprom}, + { .compatible = "qcom,pineapple-qfprom", .data = &pineapple_qfprom}, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, qfprom_of_match); From af3839c3456493025dd57a6ca5dcffd1a457e455 Mon Sep 17 00:00:00 2001 From: Navya Vemula Date: Wed, 31 Jul 2024 15:13:40 +0530 Subject: [PATCH 019/117] build: bazel: Enable compilation for Seraph SoC Add initial files to build Seraph with bazel. Change-Id: I5e9677c43a0a31ddec8c0808c22af7f054b8107c Signed-off-by: Navya Vemula --- arch/arm64/Kconfig.platforms | 9 +++ arch/arm64/configs/vendor/seraph_GKI.config | 4 ++ .../configs/vendor/seraph_consolidate.config | 14 ++++ build.config.msm.seraph | 33 ++++++++++ build.targets | 1 + modules.list.msm.seraph | 2 + modules.systemdlkm_blocklist.msm.seraph | 2 + modules.vendor_blocklist.msm.seraph | 65 +++++++++++++++++++ msm_platforms.bzl | 2 + seraph.bzl | 52 +++++++++++++++ target_variants.bzl | 1 + 11 files changed, 185 insertions(+) create mode 100644 arch/arm64/configs/vendor/seraph_GKI.config create mode 100644 arch/arm64/configs/vendor/seraph_consolidate.config create mode 100644 build.config.msm.seraph create mode 100644 modules.list.msm.seraph create mode 100644 modules.systemdlkm_blocklist.msm.seraph create mode 100644 modules.vendor_blocklist.msm.seraph create mode 100644 seraph.bzl diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index d24aaf2497dd..cb891bacc422 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -268,6 +268,15 @@ config ARCH_NIOBE chipset. If you do not wish to build a kernel that runs on this chipset or if you are unsure, say 'N' here. +config ARCH_SERAPH + bool "Enable support for Qualcomm Technologies, Inc. Seraph" + depends on ARCH_QCOM + help + This enables support for Qualcomm Technologies, Inc. Seraph + chipset. If you do not wish to build a kernel that runs on this + chipset or if you are unsure, + say 'N' here. + config ARCH_MONACO_AUTO bool "Enable Support for Qualcomm Technologies, Inc. MONACO_AUTO" depends on ARCH_QCOM diff --git a/arch/arm64/configs/vendor/seraph_GKI.config b/arch/arm64/configs/vendor/seraph_GKI.config new file mode 100644 index 000000000000..96ca36750dce --- /dev/null +++ b/arch/arm64/configs/vendor/seraph_GKI.config @@ -0,0 +1,4 @@ +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SERAPH=y +CONFIG_LOCALVERSION="-gki" +# CONFIG_MODULE_SIG_ALL is not set diff --git a/arch/arm64/configs/vendor/seraph_consolidate.config b/arch/arm64/configs/vendor/seraph_consolidate.config new file mode 100644 index 000000000000..c555bb67b4aa --- /dev/null +++ b/arch/arm64/configs/vendor/seraph_consolidate.config @@ -0,0 +1,14 @@ +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_CMA_DEBUG=y +CONFIG_CMA_DEBUGFS=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_LKDTM=m +CONFIG_LOCALVERSION="-consolidate" +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_PAGE_OWNER=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_RUNTIME_TESTING_MENU=y +CONFIG_TEST_USER_COPY=m diff --git a/build.config.msm.seraph b/build.config.msm.seraph new file mode 100644 index 000000000000..ab9adc2330b0 --- /dev/null +++ b/build.config.msm.seraph @@ -0,0 +1,33 @@ +################################################################################ +## Inheriting configs from ACK +. ${ROOT_DIR}/msm-kernel/build.config.common +. ${ROOT_DIR}/msm-kernel/build.config.aarch64 + +################################################################################ +## Variant setup +MSM_ARCH=seraph +VARIANTS=(consolidate gki) +[ -z "${VARIANT}" ] && VARIANT=consolidate + +ABL_SRC=bootable/bootloader/edk2 +BOOT_IMAGE_HEADER_VERSION=4 +BASE_ADDRESS=0x80000000 +PAGE_SIZE=4096 +BUILD_VENDOR_DLKM=1 +PREPARE_SYSTEM_DLKM=1 +SYSTEM_DLKM_MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules +SUPER_IMAGE_SIZE=0x10000000 +TRIM_UNUSED_MODULES=1 +BUILD_INIT_BOOT_IMG=1 +[ -z "${DT_OVERLAY_SUPPORT}" ] && DT_OVERLAY_SUPPORT=1 + +if [ "${KERNEL_CMDLINE_CONSOLE_AUTO}" != "0" ]; then + KERNEL_VENDOR_CMDLINE+='console=ttyMSM0,115200n8 earlycon' +fi + +KERNEL_VENDOR_CMDLINE+=' bootconfig ' + +################################################################################ +## Inheriting MSM configs +. ${KERNEL_DIR}/build.config.msm.common +. ${KERNEL_DIR}/build.config.msm.gki diff --git a/build.targets b/build.targets index e7fc99f9fff3..60d6a4389d3e 100644 --- a/build.targets +++ b/build.targets @@ -3,6 +3,7 @@ build.config.msm.autoghgvm build.config.msm.pineapple build.config.msm.anorak build.config.msm.niobe +build.config.msm.seraph build.config.msm.kalama build.config.msm.pineapple.vm build.config.msm.kalama.vm diff --git a/modules.list.msm.seraph b/modules.list.msm.seraph new file mode 100644 index 000000000000..5f22fd052ba6 --- /dev/null +++ b/modules.list.msm.seraph @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. diff --git a/modules.systemdlkm_blocklist.msm.seraph b/modules.systemdlkm_blocklist.msm.seraph new file mode 100644 index 000000000000..5f22fd052ba6 --- /dev/null +++ b/modules.systemdlkm_blocklist.msm.seraph @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. diff --git a/modules.vendor_blocklist.msm.seraph b/modules.vendor_blocklist.msm.seraph new file mode 100644 index 000000000000..c1ea82573d9d --- /dev/null +++ b/modules.vendor_blocklist.msm.seraph @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + +blocklist 8250_of +blocklist adc-tm +blocklist atomic64_test +blocklist can-bcm +blocklist can-gw +blocklist can-raw +blocklist dummy-cpufreq +blocklist dummy_hcd +blocklist dummy_hcd +blocklist e4000 +blocklist failover +blocklist fc0011 +blocklist fc0012 +blocklist fc0013 +blocklist fc2580 +blocklist it913x +blocklist kheaders +blocklist limits_stat +blocklist lkdtm +blocklist llcc_perfmon +blocklist locktorture +blocklist m88rs6000t +blocklist max2165 +blocklist mc44s803 +blocklist mmrm_test_module +blocklist msi001 +blocklist mt2060 +blocklist mt2063 +blocklist mt20xx +blocklist mt2131 +blocklist mt2266 +blocklist mxl301rf +blocklist net_failover +blocklist qca_cld3_kiwi +blocklist qm1d1b0004 +blocklist qm1d1c0042 +blocklist qt1010 +blocklist r820t +blocklist rcutorture +blocklist rtc-test +blocklist si2157 +blocklist tda18212 +blocklist tda18218 +blocklist tda18250 +blocklist tda18271 +blocklist tda827x +blocklist tda8290 +blocklist tda9887 +blocklist tea5761 +blocklist tea5767 +blocklist test_user_copy +blocklist torture +blocklist tua9001 +blocklist tuner-simple +blocklist tuner-types +blocklist tuner-xc2028 +blocklist vmw_vsock_virtio_transport +blocklist vmw_vsock_virtio_transport_common +blocklist vsock +blocklist vsock_diag +blocklist xc4000 +blocklist xc5000 diff --git a/msm_platforms.bzl b/msm_platforms.bzl index 5f52e19c379b..517c21a67aa9 100644 --- a/msm_platforms.bzl +++ b/msm_platforms.bzl @@ -6,6 +6,7 @@ load(":gen4auto_lxc.bzl", "define_gen4auto_lxc") load(":sdmsteppeauto.bzl", "define_sdmsteppeauto") load(":pineapple.bzl", "define_pineapple") load(":niobe.bzl", "define_niobe") +load(":seraph.bzl", "define_seraph") load(":pineapple_tuivm.bzl", "define_pineapple_tuivm") load(":pineapple_oemvm.bzl", "define_pineapple_oemvm") load(":pineapple_vms.bzl", "define_pineapple_vms") @@ -28,6 +29,7 @@ def define_msm_platforms(): define_sdmsteppeauto() define_pineapple() define_niobe() + define_seraph() define_pineapple_tuivm() define_pineapple_oemvm() define_pineapple_allyes() diff --git a/seraph.bzl b/seraph.bzl new file mode 100644 index 000000000000..5f1e2ec93d8f --- /dev/null +++ b/seraph.bzl @@ -0,0 +1,52 @@ +load(":target_variants.bzl", "la_variants") +load(":msm_kernel_la.bzl", "define_msm_la") +load(":image_opts.bzl", "boot_image_opts") + +target_name = "seraph" + +def define_seraph(): + _seraph_in_tree_modules = [ + # keep sorted + # TODO: Need to add GKI modules + ] + + _seraph_consolidate_in_tree_modules = _seraph_in_tree_modules + [ + # keep sorted + "drivers/misc/lkdtm/lkdtm.ko", + "kernel/locking/locktorture.ko", + "kernel/rcu/rcutorture.ko", + "kernel/torture.ko", + "lib/atomic64_test.ko", + "lib/test_user_copy.ko", + ] + + kernel_vendor_cmdline_extras = [ + # do not sort + "console=ttyMSM0,115200n8", + "qcom_geni_serial.con_enabled=1", + "bootconfig", + ] + + board_kernel_cmdline_extras = [] + board_bootconfig_extras = [] + + for variant in la_variants: + if variant == "consolidate": + mod_list = _seraph_consolidate_in_tree_modules + else: + mod_list = _seraph_in_tree_modules + board_kernel_cmdline_extras += ["nosoftlockup"] + kernel_vendor_cmdline_extras += ["nosoftlockup"] + board_bootconfig_extras += ["androidboot.console=0"] + + define_msm_la( + msm_target = target_name, + variant = variant, + in_tree_module_list = mod_list, + boot_image_opts = boot_image_opts( + earlycon_addr = "qcom_geni,0x00884000", + kernel_vendor_cmdline_extras = kernel_vendor_cmdline_extras, + board_kernel_cmdline_extras = board_kernel_cmdline_extras, + board_bootconfig_extras = board_bootconfig_extras, + ), + ) diff --git a/target_variants.bzl b/target_variants.bzl index 1b5644c46507..c38817de6f60 100644 --- a/target_variants.bzl +++ b/target_variants.bzl @@ -8,6 +8,7 @@ la_targets = [ "pineapple", "pitti", "sdmsteppeauto", + "seraph", ] la_variants = [ From 57c9b20c847793eac43c53c5d70e3ddca397895f Mon Sep 17 00:00:00 2001 From: Sneh Mankad Date: Wed, 24 Jul 2024 15:24:55 +0530 Subject: [PATCH 020/117] soc: qcom: rpmh: add cam_rsc check in rpmh_rsc_get_device() API This API is only meant for getting Camera RSC device, hence preventing any other client to use it for any other device. Change-Id: I6b1cef1cd98eb1d0c5fa92b67a2af141022ec7a3 Signed-off-by: Sneh Mankad --- drivers/soc/qcom/rpmh-rsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index a83c09c64599..aeee21188f6c 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -1495,7 +1495,7 @@ const struct device *rpmh_rsc_get_device(const char *name, u32 drv_id) struct rsc_drv_top *rsc_top = rpmh_rsc_get_top_device(name); int i; - if (IS_ERR(rsc_top)) + if (IS_ERR(rsc_top) || strcmp(name, "cam_rsc")) return ERR_PTR(-ENODEV); for (i = 0; i < rsc_top->drv_count; i++) { From 6103dd41dc8d88e66b8673b1cbfa9461523c3b0f Mon Sep 17 00:00:00 2001 From: Kamal Wadhwa Date: Tue, 20 Sep 2022 18:15:15 +0530 Subject: [PATCH 021/117] power: supply: qti_battery_charger: Register wls_psy based on DT property Currently, wireless power supply (wls_psy) is registered by default to support wireless charging. However, on some boards, wireless charging is not supported. When the properties under wls_psy are read from the userspace or a client, it times out and prints error logs unnecessarily leading to a poor user experience. Hence, add a DT property "qcom,wireless-charging-not-supported" which if specified wouldn't register the wls_psy. Change-Id: If4ed36957fa8425018f80dfa43d82ee746a45566 Signed-off-by: Kamal Wadhwa Signed-off-by: Jishnu Prakash --- drivers/power/supply/qti_battery_charger.c | 68 +++++++++++++++++++--- 1 file changed, 60 insertions(+), 8 deletions(-) diff --git a/drivers/power/supply/qti_battery_charger.c b/drivers/power/supply/qti_battery_charger.c index acc1523bac8b..3fbb45ee2ed7 100644 --- a/drivers/power/supply/qti_battery_charger.c +++ b/drivers/power/supply/qti_battery_charger.c @@ -269,6 +269,7 @@ struct battery_chg_dev { bool block_tx; bool ship_mode_en; bool debug_battery_detected; + bool wls_not_supported; bool wls_fw_update_reqd; u32 wls_fw_version; u16 wls_fw_crc; @@ -1584,13 +1585,19 @@ static int battery_chg_init_psy(struct battery_chg_dev *bcdev) } } - bcdev->psy_list[PSY_TYPE_WLS].psy = - devm_power_supply_register(bcdev->dev, &wls_psy_desc, &psy_cfg); - if (IS_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy)) { - rc = PTR_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy); - bcdev->psy_list[PSY_TYPE_WLS].psy = NULL; - pr_err("Failed to register wireless power supply, rc=%d\n", rc); - return rc; + + if (bcdev->wls_not_supported) { + pr_debug("Wireless charging is not supported\n"); + } else { + bcdev->psy_list[PSY_TYPE_WLS].psy = + devm_power_supply_register(bcdev->dev, &wls_psy_desc, &psy_cfg); + + if (IS_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy)) { + rc = PTR_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy); + bcdev->psy_list[PSY_TYPE_WLS].psy = NULL; + pr_err("Failed to register wireless power supply, rc=%d\n", rc); + return rc; + } } bcdev->psy_list[PSY_TYPE_BATTERY].psy = @@ -2258,6 +2265,44 @@ static struct attribute *battery_class_usb_2_attrs[] = { }; ATTRIBUTE_GROUPS(battery_class_usb_2); +static struct attribute *battery_class_no_wls_attrs[] = { + &class_attr_soh.attr, + &class_attr_resistance.attr, + &class_attr_moisture_detection_status.attr, + &class_attr_moisture_detection_en.attr, + &class_attr_fake_soc.attr, + &class_attr_ship_mode_en.attr, + &class_attr_restrict_chg.attr, + &class_attr_restrict_cur.attr, + &class_attr_usb_real_type.attr, + &class_attr_usb_typec_compliant.attr, + &class_attr_usb_num_ports.attr, + &class_attr_charge_control_en.attr, + NULL, +}; +ATTRIBUTE_GROUPS(battery_class_no_wls); + +static struct attribute *battery_class_usb_2_no_wls_attrs[] = { + &class_attr_soh.attr, + &class_attr_resistance.attr, + &class_attr_moisture_detection_status.attr, + &class_attr_moisture_detection_usb_2_status.attr, + &class_attr_moisture_detection_en.attr, + &class_attr_moisture_detection_usb_2_en.attr, + &class_attr_fake_soc.attr, + &class_attr_ship_mode_en.attr, + &class_attr_restrict_chg.attr, + &class_attr_restrict_cur.attr, + &class_attr_usb_real_type.attr, + &class_attr_usb_2_real_type.attr, + &class_attr_usb_typec_compliant.attr, + &class_attr_usb_num_ports.attr, + &class_attr_usb_2_typec_compliant.attr, + &class_attr_charge_control_en.attr, + NULL, +}; +ATTRIBUTE_GROUPS(battery_class_usb_2_no_wls); + #ifdef CONFIG_DEBUG_FS static void battery_chg_add_debugfs(struct battery_chg_dev *bcdev) { @@ -2286,6 +2331,9 @@ static int battery_chg_parse_dt(struct battery_chg_dev *bcdev) int i, rc, len; u32 prev, val; + bcdev->wls_not_supported = of_property_read_bool(node, + "qcom,wireless-charging-not-supported"); + of_property_read_string(node, "qcom,wireless-fw-name", &bcdev->wls_fw_name); @@ -2623,8 +2671,12 @@ static int battery_chg_probe(struct platform_device *pdev) bcdev->battery_class.name = "qcom-battery"; - if (bcdev->num_usb_ports == 2) + if (bcdev->num_usb_ports == 2 && bcdev->wls_not_supported) + bcdev->battery_class.class_groups = battery_class_usb_2_no_wls_groups; + else if (bcdev->num_usb_ports == 2) bcdev->battery_class.class_groups = battery_class_usb_2_groups; + else if (bcdev->wls_not_supported) + bcdev->battery_class.class_groups = battery_class_no_wls_groups; else bcdev->battery_class.class_groups = battery_class_groups; From 591653de4d7817c80574926146828b183cd0090f Mon Sep 17 00:00:00 2001 From: Sneh Mankad Date: Mon, 5 Aug 2024 14:59:50 +0530 Subject: [PATCH 022/117] soc: qcom: rpmh: shift spin_lock_init before cpu_pm_register_notifier cpu_pm_notifier is acquiring the spinlock before it is initialized, leading to spinlock with bad magic, hence moving the initialization prior to that. Change-Id: Ibae81d3bc2338dd0be5f2bc509cec559b121fc73 Signed-off-by: Sneh Mankad --- drivers/soc/qcom/rpmh-rsc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index aeee21188f6c..f5c522c0af6c 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -1748,6 +1748,10 @@ static int rpmh_rsc_probe(struct platform_device *pdev) drv[i].regs[DRV_SOLVER_CONFIG]); solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; + + spin_lock_init(&drv[i].lock); + spin_lock_init(&drv[i].client.cache_lock); + if (of_find_property(dn, "power-domains", NULL)) { ret = rpmh_rsc_pd_attach(&drv[i]); if (ret) @@ -1772,7 +1776,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev) drv[i].regs = rpmh_rsc_reg_offsets_ver_3_0_hw_channel; } - spin_lock_init(&drv[i].lock); init_waitqueue_head(&drv[i].tcs_wait); bitmap_zero(drv[i].tcs_in_use, MAX_TCS_NR); drv[i].client.non_batch_cache = devm_kcalloc(&pdev->dev, CMD_DB_MAX_RESOURCES, @@ -1795,8 +1798,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev) if (ret) return ret; - spin_lock_init(&drv[i].client.cache_lock); - drv[i].ipc_log_ctx = ipc_log_context_create( RSC_DRV_IPC_LOG_SIZE, drv[i].name, 0); From 422eacf6fcb1795e89b2075b3f052e612f3c8b95 Mon Sep 17 00:00:00 2001 From: Richard Maina Date: Wed, 7 Aug 2024 00:38:09 -0700 Subject: [PATCH 023/117] hwspinlock: qcom: Provide function to bust hwspinlock Implement a new operation qcom_hwspinlock_bust() which can be invoked to bust any locks that are in use when a remoteproc is stopped or crashed. Change-Id: I0486d5345a47007f254f17c4b88f802a6c962e3a Signed-off-by: Richard Maina Reviewed-by: Bjorn Andersson Signed-off-by: Chris Lew Link: https://lore.kernel.org/r/20240529-hwspinlock-bust-v3-1-c8b924ffa5a2@quicinc.com Signed-off-by: Bjorn Andersson Git-commit: 73100deb59c3892e280234fcc0171a5376c71788 Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [quic_deesin@quicinc.com: Adding bust api in core hwspinlock module is breaking KMI, so exporting bust function from qcom hwspilock module to avoid dependency on core hwspinlock function] Signed-off-by: Deepak Kumar Singh --- drivers/hwspinlock/qcom_hwspinlock.c | 38 ++++++++++++++++++++++++ include/linux/soc/qcom/qcom_hwspinlock.h | 24 +++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 include/linux/soc/qcom/qcom_hwspinlock.h diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c index 3f08cd4a5c28..e6cb9426c91d 100644 --- a/drivers/hwspinlock/qcom_hwspinlock.c +++ b/drivers/hwspinlock/qcom_hwspinlock.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "hwspinlock_internal.h" @@ -25,6 +26,43 @@ struct qcom_hwspinlock_of_data { const struct regmap_config *regmap_config; }; +/** + * qcom_hwspinlock_bust() - bust qcom specific hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to bust + * @id: identifier of the remote lock holder, if applicable + * + * This function will bust a hwspinlock that was previously acquired as + * long as the current owner of the lock matches the id given by the caller. + * + * Context: Process context. + * + * Returns: 0 on success, or error if bust operation fails + */ +int qcom_hwspinlock_bust(struct hwspinlock *lock, unsigned int id) +{ + struct regmap_field *field = lock->priv; + u32 owner; + int ret; + + ret = regmap_field_read(field, &owner); + if (ret) { + pr_err("%s: unable to query spinlock owner\n", __func__); + return ret; + } + + if (owner != id) + return 0; + + ret = regmap_field_write(field, 0); + if (ret) { + pr_err("%s: failed to bust spinlock\n", __func__); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_hwspinlock_bust); + static int qcom_hwspinlock_trylock(struct hwspinlock *lock) { struct regmap_field *field = lock->priv; diff --git a/include/linux/soc/qcom/qcom_hwspinlock.h b/include/linux/soc/qcom/qcom_hwspinlock.h new file mode 100644 index 000000000000..a5f5a7ec85fc --- /dev/null +++ b/include/linux/soc/qcom/qcom_hwspinlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __QCOM_HWSPINLOCK_H +#define __QCOM_HWSPINLOCK_H + +struct hwspinlock; + +#if IS_ENABLED(CONFIG_HWSPINLOCK_QCOM) + +int qcom_hwspinlock_bust(struct hwspinlock *hwlock, unsigned int id); + +#else /* !CONFIG_HWSPINLOCK_QCOM */ + +static inline int qcom_hwspinlock_bust(struct hwspinlock *hwlock, unsigned int id) +{ + return 0; +} + +#endif /* CONFIG_HWSPINLOCK_QCOM */ + +#endif /* __QCOM_HWSPINLOCK_H */ From ba3c82504e3282974672540089e9b0b9d4c9f6c8 Mon Sep 17 00:00:00 2001 From: Pranav Mahesh Phansalkar Date: Wed, 3 Jul 2024 16:05:16 +0530 Subject: [PATCH 024/117] rpmsg: native: Remove IRQF_ONESHOT flag Remove IRQF_ONESHOT flag as it disables incoming interrupts while running threaded irq. Change-Id: Ieb045e8aee3fc3b179fc7e7517064ba1c76a4483 Signed-off-by: Pranav Mahesh Phansalkar --- drivers/rpmsg/qcom_glink_native.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 3b869e0599e0..afd3ecb74355 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -2542,7 +2542,7 @@ int qcom_glink_native_start(struct qcom_glink *glink) ret = devm_request_threaded_irq(dev, irq, qcom_glink_native_intr, qcom_glink_native_thread_intr, - IRQF_NO_SUSPEND | IRQF_ONESHOT, + IRQF_NO_SUSPEND, glink->irqname, glink); if (ret) { dev_err(dev, "failed to request IRQ with %d\n", ret); From 96415a61d207aeb0f973dcb946ce80366e0dde45 Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Fri, 5 Jul 2024 14:54:14 +0530 Subject: [PATCH 025/117] pinctrl: qcom: Add TLMM support for Neo platform Neo pinctrl driver snapshot from msm-5.10 branch commit 1c8fbf07237c ("drivers: pinctrl: qcom: Add gpio to pdc mapping for neo"). Change-Id: I944d2ae61c2f80501442e2ca8d9229c4229c9a2d Signed-off-by: Asit Shah --- drivers/pinctrl/qcom/Kconfig | 12 + drivers/pinctrl/qcom/Makefile | 1 + drivers/pinctrl/qcom/pinctrl-neo.c | 82 ++ drivers/pinctrl/qcom/pinctrl-neo.h | 1470 ++++++++++++++++++++++++++++ 4 files changed, 1565 insertions(+) create mode 100644 drivers/pinctrl/qcom/pinctrl-neo.c create mode 100644 drivers/pinctrl/qcom/pinctrl-neo.h diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 7843f20cdd61..12ccd1f262f9 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -23,6 +23,18 @@ config PINCTRL_PINEAPPLE Say Y here to compile statically, or M here to compile it as a module. If unsure, say N. +config PINCTRL_NEO + tristate "Qualcomm Technologies Inc NEO pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM) + block found on the Qualcomm Technologies Inc NEO platforms. + This driver could also be used for a target supporting secondary VM. + Say Y here to compile statically, or M here to compile it as a module. + If unsure, say N. + config PINCTRL_ANORAK tristate "Qualcomm Technologies Inc ANORAK pin controller driver" depends on GPIOLIB && OF diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 587473d00056..4caaa19ec09a 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -2,6 +2,7 @@ # Qualcomm pin control drivers obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o obj-$(CONFIG_PINCTRL_PINEAPPLE) += pinctrl-pineapple.o +obj-$(CONFIG_PINCTRL_NEO) += pinctrl-neo.o obj-$(CONFIG_PINCTRL_ANORAK) += pinctrl-anorak.o obj-$(CONFIG_PINCTRL_NIOBE) += pinctrl-niobe.o obj-$(CONFIG_PINCTRL_CLIFFS) += pinctrl-cliffs.o diff --git a/drivers/pinctrl/qcom/pinctrl-neo.c b/drivers/pinctrl/qcom/pinctrl-neo.c new file mode 100644 index 000000000000..d938e1bea2ba --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-neo.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "pinctrl-msm.h" +#include "pinctrl-neo.h" + +static const struct msm_pinctrl_soc_data neo_pinctrl = { + .pins = neo_pins, + .npins = ARRAY_SIZE(neo_pins), + .functions = neo_functions, + .nfunctions = ARRAY_SIZE(neo_functions), + .groups = neo_groups, + .ngroups = ARRAY_SIZE(neo_groups), + .ngpios = 156, + .qup_regs = neo_qup_regs, + .nqup_regs = ARRAY_SIZE(neo_qup_regs), + .wakeirq_map = neo_pdc_map, + .nwakeirq_map = ARRAY_SIZE(neo_pdc_map), +}; + +static void qcom_trace_gpio_read(void *unused, + struct gpio_device *gdev, + bool *block_gpio_read) +{ + *block_gpio_read = true; +} + +static int neo_pinctrl_probe(struct platform_device *pdev) +{ + const struct msm_pinctrl_soc_data *pinctrl_data; + struct device *dev = &pdev->dev; + + pinctrl_data = of_device_get_match_data(&pdev->dev); + if (!pinctrl_data) + return -EINVAL; + + if (of_device_is_compatible(dev->of_node, "qcom,neo-vm-pinctrl")) + register_trace_android_vh_gpio_block_read(qcom_trace_gpio_read, + NULL); + + return msm_pinctrl_probe(pdev, pinctrl_data); +} + +static const struct of_device_id neo_pinctrl_of_match[] = { + { .compatible = "qcom,neo-pinctrl", .data = &neo_pinctrl}, + { }, +}; + +static struct platform_driver neo_pinctrl_driver = { + .driver = { + .name = "neo-pinctrl", + .of_match_table = neo_pinctrl_of_match, + }, + .probe = neo_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init neo_pinctrl_init(void) +{ + return platform_driver_register(&neo_pinctrl_driver); +} +arch_initcall(neo_pinctrl_init); + +static void __exit neo_pinctrl_exit(void) +{ + platform_driver_unregister(&neo_pinctrl_driver); +} +module_exit(neo_pinctrl_exit); + +MODULE_DESCRIPTION("QTI neo pinctrl driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, neo_pinctrl_of_match); +MODULE_SOFTDEP("pre: qcom_tlmm_vm_irqchip"); diff --git a/drivers/pinctrl/qcom/pinctrl-neo.h b/drivers/pinctrl/qcom/pinctrl-neo.h new file mode 100644 index 000000000000..2735470293ab --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-neo.h @@ -0,0 +1,1470 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define REG_BASE 0x100000 +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_BASE + REG_SIZE * id, \ + .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \ + .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 4, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + .wake_reg = REG_BASE + wake_off, \ + .wake_bit = bit, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define QUP_I3C(qup_mode, qup_offset) \ + { \ + .mode = qup_mode, \ + .offset = qup_offset, \ + } + +static const struct pinctrl_pin_desc neo_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "GPIO_150"), + PINCTRL_PIN(151, "GPIO_151"), + PINCTRL_PIN(152, "GPIO_152"), + PINCTRL_PIN(153, "GPIO_153"), + PINCTRL_PIN(154, "GPIO_154"), + PINCTRL_PIN(155, "GPIO_155"), + PINCTRL_PIN(156, "SDC1_RCLK"), + PINCTRL_PIN(157, "SDC1_CLK"), + PINCTRL_PIN(158, "SDC1_CMD"), + PINCTRL_PIN(159, "SDC1_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); +DECLARE_MSM_GPIO_PINS(150); +DECLARE_MSM_GPIO_PINS(151); +DECLARE_MSM_GPIO_PINS(152); +DECLARE_MSM_GPIO_PINS(153); +DECLARE_MSM_GPIO_PINS(154); +DECLARE_MSM_GPIO_PINS(155); + +static const unsigned int sdc1_rclk_pins[] = { 156 }; +static const unsigned int sdc1_clk_pins[] = { 157 }; +static const unsigned int sdc1_cmd_pins[] = { 158 }; +static const unsigned int sdc1_data_pins[] = { 159 }; + +enum neo_functions { + msm_mux_gpio, + msm_mux_PCIE0_CLK_REQ_N, + msm_mux_PCIE1_CLK_REQ_N, + msm_mux_aoss_cti, + msm_mux_atest_char, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_usb0, + msm_mux_atest_usb00, + msm_mux_atest_usb01, + msm_mux_atest_usb02, + msm_mux_atest_usb03, + msm_mux_audio_ref, + msm_mux_cam_mclk, + msm_mux_cci_async, + msm_mux_cci_i2c, + msm_mux_cci_timer0, + msm_mux_cci_timer1, + msm_mux_cci_timer2, + msm_mux_cci_timer3, + msm_mux_cci_timer4, + msm_mux_cri_trng, + msm_mux_cri_trng0, + msm_mux_cri_trng1, + msm_mux_dbg_out, + msm_mux_ddr_bist, + msm_mux_ddr_pxi0, + msm_mux_ddr_pxi1, + msm_mux_ddr_pxi2, + msm_mux_ddr_pxi3, + msm_mux_dp0_hot, + msm_mux_ext_mclk0, + msm_mux_ext_mclk1, + msm_mux_gcc_gp1, + msm_mux_gcc_gp2, + msm_mux_gcc_gp3, + msm_mux_host2wlan_sol, + msm_mux_i2s0_data0, + msm_mux_i2s0_data1, + msm_mux_i2s0_sck, + msm_mux_i2s0_ws, + msm_mux_ibi_i3c, + msm_mux_jitter_bist, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync0, + msm_mux_mdp_vsync1, + msm_mux_mdp_vsync2, + msm_mux_mdp_vsync3, + msm_mux_phase_flag0, + msm_mux_phase_flag1, + msm_mux_phase_flag10, + msm_mux_phase_flag11, + msm_mux_phase_flag12, + msm_mux_phase_flag13, + msm_mux_phase_flag14, + msm_mux_phase_flag15, + msm_mux_phase_flag16, + msm_mux_phase_flag17, + msm_mux_phase_flag18, + msm_mux_phase_flag19, + msm_mux_phase_flag2, + msm_mux_phase_flag20, + msm_mux_phase_flag21, + msm_mux_phase_flag22, + msm_mux_phase_flag23, + msm_mux_phase_flag24, + msm_mux_phase_flag25, + msm_mux_phase_flag26, + msm_mux_phase_flag27, + msm_mux_phase_flag28, + msm_mux_phase_flag29, + msm_mux_phase_flag3, + msm_mux_phase_flag30, + msm_mux_phase_flag31, + msm_mux_phase_flag4, + msm_mux_phase_flag5, + msm_mux_phase_flag6, + msm_mux_phase_flag7, + msm_mux_phase_flag8, + msm_mux_phase_flag9, + msm_mux_pll_bist, + msm_mux_pll_clk, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_qdss_cti, + msm_mux_qdss_gpio, + msm_mux_qdss_gpio0, + msm_mux_qdss_gpio1, + msm_mux_qdss_gpio10, + msm_mux_qdss_gpio11, + msm_mux_qdss_gpio12, + msm_mux_qdss_gpio13, + msm_mux_qdss_gpio14, + msm_mux_qdss_gpio15, + msm_mux_qdss_gpio2, + msm_mux_qdss_gpio3, + msm_mux_qdss_gpio4, + msm_mux_qdss_gpio5, + msm_mux_qdss_gpio6, + msm_mux_qdss_gpio7, + msm_mux_qdss_gpio8, + msm_mux_qdss_gpio9, + msm_mux_qspi00, + msm_mux_qspi01, + msm_mux_qspi02, + msm_mux_qspi03, + msm_mux_qspi0_clk, + msm_mux_qspi0_cs0, + msm_mux_qspi0_cs1, + msm_mux_qup0_se0, + msm_mux_qup0_se1, + msm_mux_qup0_se2, + msm_mux_qup0_se3, + msm_mux_qup0_se4, + msm_mux_qup0_se5, + msm_mux_qup1_se0, + msm_mux_qup1_se1, + msm_mux_qup1_se2, + msm_mux_qup1_se3, + msm_mux_qup1_se4, + msm_mux_qup1_se5, + msm_mux_tb_trig, + msm_mux_tgu_ch0, + msm_mux_tgu_ch1, + msm_mux_tgu_ch2, + msm_mux_tgu_ch3, + msm_mux_tmess_prng0, + msm_mux_tmess_prng1, + msm_mux_tmess_prng2, + msm_mux_tmess_prng3, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_usb0_phy, + msm_mux_vsense_trigger, + msm_mux_NA, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146", + "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152", + "gpio153", "gpio154", "gpio155", +}; +static const char * const PCIE0_CLK_REQ_N_groups[] = { + "gpio56", +}; +static const char * const PCIE1_CLK_REQ_N_groups[] = { + "gpio59", +}; +static const char * const aoss_cti_groups[] = { + "gpio20", "gpio21", "gpio22", "gpio23", +}; +static const char * const atest_char_groups[] = { + "gpio45", +}; +static const char * const atest_char0_groups[] = { + "gpio90", +}; +static const char * const atest_char1_groups[] = { + "gpio89", +}; +static const char * const atest_char2_groups[] = { + "gpio88", +}; +static const char * const atest_char3_groups[] = { + "gpio87", +}; +static const char * const atest_usb0_groups[] = { + "gpio26", +}; +static const char * const atest_usb00_groups[] = { + "gpio110", +}; +static const char * const atest_usb01_groups[] = { + "gpio109", +}; +static const char * const atest_usb02_groups[] = { + "gpio27", +}; +static const char * const atest_usb03_groups[] = { + "gpio60", +}; +static const char * const audio_ref_groups[] = { + "gpio103", +}; +static const char * const cam_mclk_groups[] = { + "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", + "gpio76", +}; +static const char * const cci_async_groups[] = { + "gpio80", "gpio81", "gpio82", +}; +static const char * const cci_i2c_groups[] = { + "gpio67", "gpio68", "gpio78", "gpio79", "gpio80", "gpio81", "gpio83", + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", + "gpio91", "gpio92", +}; +static const char * const cci_timer0_groups[] = { + "gpio77", +}; +static const char * const cci_timer1_groups[] = { + "gpio78", +}; +static const char * const cci_timer2_groups[] = { + "gpio79", +}; +static const char * const cci_timer3_groups[] = { + "gpio80", +}; +static const char * const cci_timer4_groups[] = { + "gpio81", +}; +static const char * const cri_trng_groups[] = { + "gpio60", +}; +static const char * const cri_trng0_groups[] = { + "gpio70", +}; +static const char * const cri_trng1_groups[] = { + "gpio71", +}; +static const char * const dbg_out_groups[] = { + "gpio59", +}; +static const char * const ddr_bist_groups[] = { + "gpio4", "gpio5", "gpio100", "gpio103", +}; +static const char * const ddr_pxi0_groups[] = { + "gpio56", "gpio57", +}; +static const char * const ddr_pxi1_groups[] = { + "gpio41", "gpio45", +}; +static const char * const ddr_pxi2_groups[] = { + "gpio48", "gpio55", +}; +static const char * const ddr_pxi3_groups[] = { + "gpio46", "gpio47", +}; +static const char * const dp0_hot_groups[] = { + "gpio35", "gpio103", +}; +static const char * const ext_mclk0_groups[] = { + "gpio104", +}; +static const char * const ext_mclk1_groups[] = { + "gpio103", +}; +static const char * const gcc_gp1_groups[] = { + "gpio129", "gpio132", +}; +static const char * const gcc_gp2_groups[] = { + "gpio130", "gpio135", +}; +static const char * const gcc_gp3_groups[] = { + "gpio131", "gpio136", +}; +static const char * const host2wlan_sol_groups[] = { + "gpio111", +}; +static const char * const i2s0_data0_groups[] = { + "gpio106", +}; +static const char * const i2s0_data1_groups[] = { + "gpio107", +}; +static const char * const i2s0_sck_groups[] = { + "gpio105", +}; +static const char * const i2s0_ws_groups[] = { + "gpio108", +}; +static const char * const ibi_i3c_groups[] = { + "gpio0", "gpio1", "gpio91", "gpio92", +}; +static const char * const jitter_bist_groups[] = { + "gpio0", +}; +static const char * const mdp_vsync_groups[] = { + "gpio12", "gpio13", "gpio41", "gpio49", "gpio50", +}; +static const char * const mdp_vsync0_groups[] = { + "gpio49", +}; +static const char * const mdp_vsync1_groups[] = { + "gpio49", +}; +static const char * const mdp_vsync2_groups[] = { + "gpio50", +}; +static const char * const mdp_vsync3_groups[] = { + "gpio50", +}; +static const char * const phase_flag0_groups[] = { + "gpio133", +}; +static const char * const phase_flag1_groups[] = { + "gpio128", +}; +static const char * const phase_flag10_groups[] = { + "gpio94", +}; +static const char * const phase_flag11_groups[] = { + "gpio93", +}; +static const char * const phase_flag12_groups[] = { + "gpio134", +}; +static const char * const phase_flag13_groups[] = { + "gpio139", +}; +static const char * const phase_flag14_groups[] = { + "gpio138", +}; +static const char * const phase_flag15_groups[] = { + "gpio137", +}; +static const char * const phase_flag16_groups[] = { + "gpio62", +}; +static const char * const phase_flag17_groups[] = { + "gpio61", +}; +static const char * const phase_flag18_groups[] = { + "gpio41", +}; +static const char * const phase_flag19_groups[] = { + "gpio23", +}; +static const char * const phase_flag2_groups[] = { + "gpio127", +}; +static const char * const phase_flag20_groups[] = { + "gpio22", +}; +static const char * const phase_flag21_groups[] = { + "gpio21", +}; +static const char * const phase_flag22_groups[] = { + "gpio19", +}; +static const char * const phase_flag23_groups[] = { + "gpio18", +}; +static const char * const phase_flag24_groups[] = { + "gpio17", +}; +static const char * const phase_flag25_groups[] = { + "gpio16", +}; +static const char * const phase_flag26_groups[] = { + "gpio13", +}; +static const char * const phase_flag27_groups[] = { + "gpio12", +}; +static const char * const phase_flag28_groups[] = { + "gpio3", +}; +static const char * const phase_flag29_groups[] = { + "gpio2", +}; +static const char * const phase_flag3_groups[] = { + "gpio126", +}; +static const char * const phase_flag30_groups[] = { + "gpio149", +}; +static const char * const phase_flag31_groups[] = { + "gpio148", +}; +static const char * const phase_flag4_groups[] = { + "gpio151", +}; +static const char * const phase_flag5_groups[] = { + "gpio150", +}; +static const char * const phase_flag6_groups[] = { + "gpio98", +}; +static const char * const phase_flag7_groups[] = { + "gpio97", +}; +static const char * const phase_flag8_groups[] = { + "gpio96", +}; +static const char * const phase_flag9_groups[] = { + "gpio95", +}; +static const char * const pll_bist_groups[] = { + "gpio8", +}; +static const char * const pll_clk_groups[] = { + "gpio54", +}; +static const char * const prng_rosc0_groups[] = { + "gpio72", +}; +static const char * const prng_rosc1_groups[] = { + "gpio73", +}; +static const char * const prng_rosc2_groups[] = { + "gpio74", +}; +static const char * const prng_rosc3_groups[] = { + "gpio75", +}; +static const char * const qdss_cti_groups[] = { + "gpio28", "gpio29", "gpio36", "gpio37", "gpio38", "gpio38", "gpio47", + "gpio48", "gpio53", "gpio53", "gpio105", "gpio106", "gpio154", + "gpio155", +}; +static const char * const qdss_gpio_groups[] = { + "gpio89", "gpio90", "gpio109", "gpio110", +}; +static const char * const qdss_gpio0_groups[] = { + "gpio24", "gpio65", +}; +static const char * const qdss_gpio1_groups[] = { + "gpio25", "gpio66", +}; +static const char * const qdss_gpio10_groups[] = { + "gpio63", "gpio83", +}; +static const char * const qdss_gpio11_groups[] = { + "gpio64", "gpio84", +}; +static const char * const qdss_gpio12_groups[] = { + "gpio39", "gpio85", +}; +static const char * const qdss_gpio13_groups[] = { + "gpio10", "gpio86", +}; +static const char * const qdss_gpio14_groups[] = { + "gpio45", "gpio87", +}; +static const char * const qdss_gpio15_groups[] = { + "gpio11", "gpio88", +}; +static const char * const qdss_gpio2_groups[] = { + "gpio26", "gpio67", +}; +static const char * const qdss_gpio3_groups[] = { + "gpio27", "gpio68", +}; +static const char * const qdss_gpio4_groups[] = { + "gpio30", "gpio77", +}; +static const char * const qdss_gpio5_groups[] = { + "gpio31", "gpio78", +}; +static const char * const qdss_gpio6_groups[] = { + "gpio4", "gpio79", +}; +static const char * const qdss_gpio7_groups[] = { + "gpio5", "gpio80", +}; +static const char * const qdss_gpio8_groups[] = { + "gpio6", "gpio81", +}; +static const char * const qdss_gpio9_groups[] = { + "gpio7", "gpio82", +}; +static const char * const qspi00_groups[] = { + "gpio32", +}; +static const char * const qspi01_groups[] = { + "gpio33", +}; +static const char * const qspi02_groups[] = { + "gpio36", +}; +static const char * const qspi03_groups[] = { + "gpio37", +}; +static const char * const qspi0_clk_groups[] = { + "gpio34", +}; +static const char * const qspi0_cs0_groups[] = { + "gpio35", +}; +static const char * const qspi0_cs1_groups[] = { + "gpio38", +}; +static const char * const qup0_se0_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio93", +}; +static const char * const qup0_se1_groups[] = { + "gpio2", "gpio3", "gpio61", "gpio62", +}; +static const char * const qup0_se2_groups[] = { + "gpio12", "gpio13", "gpio22", "gpio23", +}; +static const char * const qup0_se3_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19", "gpio41", +}; +static const char * const qup0_se4_groups[] = { + "gpio20", "gpio21", "gpio22", "gpio23", "gpio94", +}; +static const char * const qup0_se5_groups[] = { + "gpio95", "gpio96", "gpio97", "gpio98", +}; +static const char * const qup1_se0_groups[] = { + "gpio63", "gpio64", "gpio91", "gpio92", +}; +static const char * const qup1_se1_groups[] = { + "gpio24", "gpio25", "gpio26", "gpio27", +}; +static const char * const qup1_se2_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; +static const char * const qup1_se3_groups[] = { + "gpio34", "gpio35", "gpio109", "gpio110", +}; +static const char * const qup1_se4_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", +}; +static const char * const qup1_se5_groups[] = { + "gpio14", "gpio15", "gpio28", "gpio30", +}; +static const char * const tb_trig_groups[] = { + "gpio69", +}; +static const char * const tgu_ch0_groups[] = { + "gpio20", +}; +static const char * const tgu_ch1_groups[] = { + "gpio21", +}; +static const char * const tgu_ch2_groups[] = { + "gpio22", +}; +static const char * const tgu_ch3_groups[] = { + "gpio23", +}; +static const char * const tmess_prng0_groups[] = { + "gpio80", +}; +static const char * const tmess_prng1_groups[] = { + "gpio79", +}; +static const char * const tmess_prng2_groups[] = { + "gpio83", +}; +static const char * const tmess_prng3_groups[] = { + "gpio81", +}; +static const char * const tsense_pwm1_groups[] = { + "gpio86", +}; +static const char * const tsense_pwm2_groups[] = { + "gpio86", +}; +static const char * const usb0_phy_groups[] = { + "gpio100", +}; +static const char * const vsense_trigger_groups[] = { + "gpio36", +}; + +static const struct msm_function neo_functions[] = { + FUNCTION(gpio), + FUNCTION(qup0_se0), + FUNCTION(ibi_i3c), + FUNCTION(jitter_bist), + FUNCTION(qup0_se1), + FUNCTION(phase_flag29), + FUNCTION(phase_flag28), + FUNCTION(qup1_se4), + FUNCTION(ddr_bist), + FUNCTION(qdss_gpio6), + FUNCTION(qdss_gpio7), + FUNCTION(qdss_gpio8), + FUNCTION(qdss_gpio9), + FUNCTION(qup1_se2), + FUNCTION(pll_bist), + FUNCTION(qdss_gpio13), + FUNCTION(qdss_gpio15), + FUNCTION(qup0_se2), + FUNCTION(mdp_vsync), + FUNCTION(phase_flag27), + FUNCTION(phase_flag26), + FUNCTION(qup1_se5), + FUNCTION(qup0_se3), + FUNCTION(phase_flag25), + FUNCTION(phase_flag24), + FUNCTION(phase_flag23), + FUNCTION(phase_flag22), + FUNCTION(qup0_se4), + FUNCTION(aoss_cti), + FUNCTION(tgu_ch0), + FUNCTION(phase_flag21), + FUNCTION(tgu_ch1), + FUNCTION(phase_flag20), + FUNCTION(tgu_ch2), + FUNCTION(phase_flag19), + FUNCTION(tgu_ch3), + FUNCTION(qup1_se1), + FUNCTION(qdss_gpio0), + FUNCTION(qdss_gpio1), + FUNCTION(qdss_gpio2), + FUNCTION(atest_usb0), + FUNCTION(qdss_gpio3), + FUNCTION(atest_usb02), + FUNCTION(qdss_cti), + FUNCTION(qdss_gpio4), + FUNCTION(qdss_gpio5), + FUNCTION(qspi00), + FUNCTION(qspi01), + FUNCTION(qspi0_clk), + FUNCTION(qup1_se3), + FUNCTION(qspi0_cs0), + FUNCTION(dp0_hot), + FUNCTION(qspi02), + FUNCTION(vsense_trigger), + FUNCTION(qspi03), + FUNCTION(qspi0_cs1), + FUNCTION(qdss_gpio12), + FUNCTION(phase_flag18), + FUNCTION(ddr_pxi1), + FUNCTION(qdss_gpio14), + FUNCTION(atest_char), + FUNCTION(ddr_pxi3), + FUNCTION(ddr_pxi2), + FUNCTION(mdp_vsync0), + FUNCTION(mdp_vsync1), + FUNCTION(mdp_vsync2), + FUNCTION(mdp_vsync3), + FUNCTION(pll_clk), + FUNCTION(PCIE0_CLK_REQ_N), + FUNCTION(ddr_pxi0), + FUNCTION(PCIE1_CLK_REQ_N), + FUNCTION(dbg_out), + FUNCTION(cri_trng), + FUNCTION(atest_usb03), + FUNCTION(phase_flag17), + FUNCTION(phase_flag16), + FUNCTION(qup1_se0), + FUNCTION(qdss_gpio10), + FUNCTION(qdss_gpio11), + FUNCTION(cci_i2c), + FUNCTION(cam_mclk), + FUNCTION(tb_trig), + FUNCTION(cri_trng0), + FUNCTION(cri_trng1), + FUNCTION(prng_rosc0), + FUNCTION(prng_rosc1), + FUNCTION(prng_rosc2), + FUNCTION(prng_rosc3), + FUNCTION(cci_timer0), + FUNCTION(cci_timer1), + FUNCTION(cci_timer2), + FUNCTION(tmess_prng1), + FUNCTION(cci_timer3), + FUNCTION(cci_async), + FUNCTION(tmess_prng0), + FUNCTION(cci_timer4), + FUNCTION(tmess_prng3), + FUNCTION(tmess_prng2), + FUNCTION(tsense_pwm1), + FUNCTION(tsense_pwm2), + FUNCTION(atest_char3), + FUNCTION(atest_char2), + FUNCTION(qdss_gpio), + FUNCTION(atest_char1), + FUNCTION(atest_char0), + FUNCTION(phase_flag11), + FUNCTION(phase_flag10), + FUNCTION(qup0_se5), + FUNCTION(phase_flag9), + FUNCTION(phase_flag8), + FUNCTION(phase_flag7), + FUNCTION(phase_flag6), + FUNCTION(usb0_phy), + FUNCTION(ext_mclk1), + FUNCTION(audio_ref), + FUNCTION(ext_mclk0), + FUNCTION(i2s0_sck), + FUNCTION(i2s0_data0), + FUNCTION(i2s0_data1), + FUNCTION(i2s0_ws), + FUNCTION(atest_usb01), + FUNCTION(atest_usb00), + FUNCTION(host2wlan_sol), + FUNCTION(phase_flag3), + FUNCTION(phase_flag2), + FUNCTION(phase_flag1), + FUNCTION(gcc_gp1), + FUNCTION(gcc_gp2), + FUNCTION(gcc_gp3), + FUNCTION(phase_flag0), + FUNCTION(phase_flag12), + FUNCTION(phase_flag15), + FUNCTION(phase_flag14), + FUNCTION(phase_flag13), + FUNCTION(phase_flag31), + FUNCTION(phase_flag30), + FUNCTION(phase_flag5), + FUNCTION(phase_flag4), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup neo_groups[] = { + [0] = PINGROUP(0, qup0_se0, ibi_i3c, jitter_bist, NA, NA, NA, NA, NA, + NA, 0x9C014, 0), + [1] = PINGROUP(1, qup0_se0, ibi_i3c, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [2] = PINGROUP(2, qup0_se0, qup0_se1, phase_flag29, NA, NA, NA, NA, NA, + NA, 0, -1), + [3] = PINGROUP(3, qup0_se0, qup0_se1, phase_flag28, NA, NA, NA, NA, NA, + NA, 0x9C014, 1), + [4] = PINGROUP(4, qup1_se4, ddr_bist, qdss_gpio6, NA, NA, NA, NA, NA, + NA, 0, -1), + [5] = PINGROUP(5, qup1_se4, ddr_bist, qdss_gpio7, NA, NA, NA, NA, NA, + NA, 0, -1), + [6] = PINGROUP(6, qup1_se4, qdss_gpio8, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 3), + [7] = PINGROUP(7, qup1_se4, qdss_gpio9, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 4), + [8] = PINGROUP(8, qup1_se2, pll_bist, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [9] = PINGROUP(9, qup1_se2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [10] = PINGROUP(10, qup1_se2, qdss_gpio13, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 5), + [11] = PINGROUP(11, qup1_se2, qdss_gpio15, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 6), + [12] = PINGROUP(12, qup0_se2, mdp_vsync, phase_flag27, NA, NA, NA, NA, + NA, NA, 0x9C014, 2), + [13] = PINGROUP(13, qup0_se2, mdp_vsync, phase_flag26, NA, NA, NA, NA, + NA, NA, 0x9C014, 3), + [14] = PINGROUP(14, qup1_se5, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 7), + [15] = PINGROUP(15, qup1_se5, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 8), + [16] = PINGROUP(16, qup0_se3, phase_flag25, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [17] = PINGROUP(17, qup0_se3, phase_flag24, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [18] = PINGROUP(18, qup0_se3, phase_flag23, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 4), + [19] = PINGROUP(19, qup0_se3, phase_flag22, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 5), + [20] = PINGROUP(20, qup0_se4, aoss_cti, tgu_ch0, NA, NA, NA, NA, NA, + NA, 0, -1), + [21] = PINGROUP(21, qup0_se4, aoss_cti, phase_flag21, tgu_ch1, NA, NA, + NA, NA, NA, 0, -1), + [22] = PINGROUP(22, qup0_se4, qup0_se2, aoss_cti, phase_flag20, + tgu_ch2, NA, NA, NA, NA, 0, -1), + [23] = PINGROUP(23, qup0_se4, qup0_se2, aoss_cti, phase_flag19, + tgu_ch3, NA, NA, NA, NA, 0x9C014, 6), + [24] = PINGROUP(24, qup1_se1, qdss_gpio0, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [25] = PINGROUP(25, qup1_se1, qdss_gpio1, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [26] = PINGROUP(26, qup1_se1, qdss_gpio2, atest_usb0, NA, NA, NA, NA, + NA, NA, 0, -1), + [27] = PINGROUP(27, qup1_se1, qdss_gpio3, atest_usb02, NA, NA, NA, NA, + NA, NA, 0x9C008, 9), + [28] = PINGROUP(28, qup1_se5, qdss_cti, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 10), + [29] = PINGROUP(29, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C000, 0), + [30] = PINGROUP(30, qup1_se5, qdss_gpio4, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 11), + [31] = PINGROUP(31, qdss_gpio5, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 12), + [32] = PINGROUP(32, qspi00, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 13), + [33] = PINGROUP(33, qspi01, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C008, 14), + [34] = PINGROUP(34, qspi0_clk, qup1_se3, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [35] = PINGROUP(35, qspi0_cs0, qup1_se3, dp0_hot, NA, NA, NA, NA, NA, + NA, 0x9C008, 15), + [36] = PINGROUP(36, qspi02, qdss_cti, vsense_trigger, NA, NA, NA, NA, + NA, NA, 0, -1), + [37] = PINGROUP(37, qspi03, qdss_cti, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 0), + [38] = PINGROUP(38, qspi0_cs1, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, + NA, 0x9C00C, 1), + [39] = PINGROUP(39, qdss_gpio12, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 2), + [40] = PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 1), + [41] = PINGROUP(41, qup0_se3, mdp_vsync, phase_flag18, NA, ddr_pxi1, + NA, NA, NA, NA, 0x9C014, 7), + [42] = PINGROUP(42, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C00C, 3), + [43] = PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 2), + [44] = PINGROUP(44, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 3), + [45] = PINGROUP(45, qdss_gpio14, ddr_pxi1, atest_char, NA, NA, NA, NA, + NA, NA, 0x9C00C, 4), + [46] = PINGROUP(46, ddr_pxi3, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 5), + [47] = PINGROUP(47, qdss_cti, ddr_pxi3, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 6), + [48] = PINGROUP(48, qdss_cti, ddr_pxi2, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 7), + [49] = PINGROUP(49, mdp_vsync, mdp_vsync0, mdp_vsync1, NA, NA, NA, NA, + NA, NA, 0x9C014, 8), + [50] = PINGROUP(50, mdp_vsync, mdp_vsync2, mdp_vsync3, NA, NA, NA, NA, + NA, NA, 0x9C014, 9), + [51] = PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [52] = PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [53] = PINGROUP(53, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA, + 0x9C000, 4), + [54] = PINGROUP(54, pll_clk, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C000, 5), + [55] = PINGROUP(55, NA, ddr_pxi2, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 8), + [56] = PINGROUP(56, PCIE0_CLK_REQ_N, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA, + 0x9C00C, 9), + [57] = PINGROUP(57, ddr_pxi0, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 10), + [58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C00C, 11), + [59] = PINGROUP(59, PCIE1_CLK_REQ_N, dbg_out, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 12), + [60] = PINGROUP(60, cri_trng, atest_usb03, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 13), + [61] = PINGROUP(61, qup0_se1, phase_flag17, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 10), + [62] = PINGROUP(62, qup0_se1, phase_flag16, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 11), + [63] = PINGROUP(63, qup1_se0, qdss_gpio10, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 14), + [64] = PINGROUP(64, qup1_se0, qdss_gpio11, NA, NA, NA, NA, NA, NA, NA, + 0x9C00C, 15), + [65] = PINGROUP(65, qdss_gpio0, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 12), + [66] = PINGROUP(66, qdss_gpio1, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [67] = PINGROUP(67, cci_i2c, qdss_gpio2, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 13), + [68] = PINGROUP(68, cci_i2c, qdss_gpio3, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 14), + [69] = PINGROUP(69, cam_mclk, tb_trig, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [70] = PINGROUP(70, cam_mclk, cri_trng0, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [71] = PINGROUP(71, cam_mclk, cri_trng1, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [72] = PINGROUP(72, cam_mclk, prng_rosc0, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [73] = PINGROUP(73, cam_mclk, prng_rosc1, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [74] = PINGROUP(74, cam_mclk, prng_rosc2, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [75] = PINGROUP(75, cam_mclk, prng_rosc3, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [76] = PINGROUP(76, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C010, 0), + [77] = PINGROUP(77, cci_timer0, qdss_gpio4, NA, NA, NA, NA, NA, NA, NA, + 0x9C014, 15), + [78] = PINGROUP(78, cci_timer1, cci_i2c, qdss_gpio5, NA, NA, NA, NA, + NA, NA, 0, -1), + [79] = PINGROUP(79, cci_timer2, cci_i2c, tmess_prng1, qdss_gpio6, NA, + NA, NA, NA, NA, 0, -1), + [80] = PINGROUP(80, cci_timer3, cci_i2c, cci_async, tmess_prng0, + qdss_gpio7, NA, NA, NA, NA, 0, -1), + [81] = PINGROUP(81, cci_timer4, cci_i2c, cci_async, tmess_prng3, + qdss_gpio8, NA, NA, NA, NA, 0, -1), + [82] = PINGROUP(82, cci_async, qdss_gpio9, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [83] = PINGROUP(83, cci_i2c, tmess_prng2, qdss_gpio10, NA, NA, NA, NA, + NA, NA, 0x9C018, 0), + [84] = PINGROUP(84, cci_i2c, qdss_gpio11, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [85] = PINGROUP(85, cci_i2c, qdss_gpio12, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [86] = PINGROUP(86, cci_i2c, qdss_gpio13, tsense_pwm1, tsense_pwm2, NA, + NA, NA, NA, NA, 0, -1), + [87] = PINGROUP(87, cci_i2c, qdss_gpio14, atest_char3, NA, NA, NA, NA, + NA, NA, 0, -1), + [88] = PINGROUP(88, cci_i2c, qdss_gpio15, atest_char2, NA, NA, NA, NA, + NA, NA, 0, -1), + [89] = PINGROUP(89, cci_i2c, qdss_gpio, atest_char1, NA, NA, NA, NA, + NA, NA, 0, -1), + [90] = PINGROUP(90, cci_i2c, qdss_gpio, atest_char0, NA, NA, NA, NA, + NA, NA, 0, -1), + [91] = PINGROUP(91, cci_i2c, qup1_se0, ibi_i3c, NA, NA, NA, NA, NA, NA, + 0x9C010, 1), + [92] = PINGROUP(92, cci_i2c, qup1_se0, ibi_i3c, NA, NA, NA, NA, NA, NA, + 0, -1), + [93] = PINGROUP(93, qup0_se0, phase_flag11, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [94] = PINGROUP(94, qup0_se4, phase_flag10, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 1), + [95] = PINGROUP(95, qup0_se5, phase_flag9, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 2), + [96] = PINGROUP(96, qup0_se5, phase_flag8, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [97] = PINGROUP(97, qup0_se5, phase_flag7, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [98] = PINGROUP(98, qup0_se5, phase_flag6, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 3), + [99] = PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [100] = PINGROUP(100, usb0_phy, ddr_bist, NA, NA, NA, NA, NA, NA, NA, + 0x9C000, 6), + [101] = PINGROUP(101, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 7), + [102] = PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 8), + [103] = PINGROUP(103, ext_mclk1, audio_ref, dp0_hot, ddr_bist, NA, NA, + NA, NA, NA, 0x9C018, 4), + [104] = PINGROUP(104, ext_mclk0, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 5), + [105] = PINGROUP(105, i2s0_sck, NA, qdss_cti, NA, NA, NA, NA, NA, NA, + 0x9C018, 6), + [106] = PINGROUP(106, i2s0_data0, NA, qdss_cti, NA, NA, NA, NA, NA, NA, + 0x9C018, 7), + [107] = PINGROUP(107, i2s0_data1, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 8), + [108] = PINGROUP(108, i2s0_ws, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 9), + [109] = PINGROUP(109, qup1_se3, qdss_gpio, atest_usb01, NA, NA, NA, NA, + NA, NA, 0x9C010, 2), + [110] = PINGROUP(110, qup1_se3, qdss_gpio, atest_usb00, NA, NA, NA, NA, + NA, NA, 0, -1), + [111] = PINGROUP(111, host2wlan_sol, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C000, 9), + [112] = PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 10), + [113] = PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C010, 3), + [114] = PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 11), + [115] = PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 12), + [116] = PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 13), + [117] = PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 14), + [118] = PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C000, 15), + [119] = PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [120] = PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [121] = PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 0), + [122] = PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C004, 1), + [123] = PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [124] = PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C018, 10), + [125] = PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [126] = PINGROUP(126, phase_flag3, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [127] = PINGROUP(127, phase_flag2, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 11), + [128] = PINGROUP(128, phase_flag1, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [129] = PINGROUP(129, gcc_gp1, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [130] = PINGROUP(130, gcc_gp2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [131] = PINGROUP(131, gcc_gp3, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [132] = PINGROUP(132, gcc_gp1, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 12), + [133] = PINGROUP(133, phase_flag0, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [134] = PINGROUP(134, phase_flag12, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 13), + [135] = PINGROUP(135, gcc_gp2, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 14), + [136] = PINGROUP(136, gcc_gp3, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C018, 15), + [137] = PINGROUP(137, phase_flag15, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [138] = PINGROUP(138, phase_flag14, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [139] = PINGROUP(139, phase_flag13, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C01C, 0), + [140] = PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 1), + [141] = PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 2), + [142] = PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [143] = PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 3), + [144] = PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 4), + [145] = PINGROUP(145, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 5), + [146] = PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 6), + [147] = PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [148] = PINGROUP(148, phase_flag31, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C01C, 7), + [149] = PINGROUP(149, phase_flag30, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [150] = PINGROUP(150, phase_flag5, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C01C, 8), + [151] = PINGROUP(151, phase_flag4, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C01C, 9), + [152] = PINGROUP(152, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [153] = PINGROUP(153, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x9C01C, 10), + [154] = PINGROUP(154, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [155] = PINGROUP(155, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, + 0x9C01C, 11), + [156] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1a1000, 0, 0), + [157] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1a0000, 13, 6), + [158] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3), + [159] = SDC_QDSD_PINGROUP(sdc1_data, 0x1a0000, 9, 0), +}; + +static const struct msm_gpio_wakeirq_map neo_pdc_map[] = { + { 0, 50 }, { 3, 68 }, { 6, 88 }, { 7, 55 }, { 10, 66 }, { 11, 96 }, { 12, 48 }, + { 13, 49 }, { 15, 62 }, { 18, 57 }, { 19, 59 }, { 23, 51 }, { 27, 74 }, + { 28, 67 }, { 29, 84 }, { 30, 58 }, { 31, 94 }, { 32, 60 }, { 33, 61 }, + { 35, 69 }, { 37, 70 }, { 38, 64 }, { 39, 65 }, { 40, 63 }, { 41, 92 }, + { 42, 82 }, { 44, 83 }, { 45, 43 }, { 46, 72 }, { 47, 45 }, { 48, 44 }, + { 49, 71 }, { 50, 87 }, { 53, 77 }, { 54, 78 }, { 55, 106 }, { 56, 79 }, + { 57, 80 }, { 58, 107 }, { 59, 81 }, { 60, 89 }, { 61, 54 }, { 62, 73 }, + { 63, 93 }, { 64, 86 }, { 65, 75 }, { 67, 42 }, { 68, 76 }, { 76, 116 }, + { 77, 12 }, { 83, 13 }, { 91, 90 }, { 94, 95 }, { 95, 91 }, { 98, 47 }, + { 100, 85 }, { 101, 52 }, { 102, 53 }, { 103, 97 }, { 104, 98 }, { 105, 99 }, + { 106, 100 }, { 107, 101 }, { 108, 102 }, { 109, 103 }, { 111, 104 }, { 113, 46 }, + { 114, 56 }, { 115, 108 }, { 116, 109 }, { 117, 110 }, { 118, 111 }, { 121, 112 }, + { 122, 113 }, { 124, 114 }, { 127, 115 }, { 132, 118 }, { 134, 119 }, { 135, 120 }, + { 136, 121 }, { 139, 122 }, { 140, 123 }, { 141, 124 }, { 143, 128 }, { 144, 129 }, + { 145, 130 }, { 146, 131 }, { 148, 132 }, { 150, 133 }, { 151, 134 }, { 153, 135 }, + { 155, 137 }, +}; + +static struct pinctrl_qup neo_qup_regs[] = { +}; From e5676bf12c05a46ee8c0e889de7edd3f26a5dbd4 Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Wed, 31 Jul 2024 20:02:11 +0530 Subject: [PATCH 026/117] pinctrl: qcom: Remove trace hooks from Neo pinctrl driver Removed support of reading gpio read using trace hooks in Neo pinctrl driver, as it is not used anymore in Kernel Platform 3.0. Change-Id: If367c8e98a51aaa2c8ca52425b2b2da80177c059 Signed-off-by: Asit Shah --- drivers/pinctrl/qcom/pinctrl-neo.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/pinctrl/qcom/pinctrl-neo.c b/drivers/pinctrl/qcom/pinctrl-neo.c index d938e1bea2ba..a6c17a884609 100644 --- a/drivers/pinctrl/qcom/pinctrl-neo.c +++ b/drivers/pinctrl/qcom/pinctrl-neo.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "pinctrl-msm.h" #include "pinctrl-neo.h" @@ -27,26 +26,14 @@ static const struct msm_pinctrl_soc_data neo_pinctrl = { .nwakeirq_map = ARRAY_SIZE(neo_pdc_map), }; -static void qcom_trace_gpio_read(void *unused, - struct gpio_device *gdev, - bool *block_gpio_read) -{ - *block_gpio_read = true; -} - static int neo_pinctrl_probe(struct platform_device *pdev) { const struct msm_pinctrl_soc_data *pinctrl_data; - struct device *dev = &pdev->dev; pinctrl_data = of_device_get_match_data(&pdev->dev); if (!pinctrl_data) return -EINVAL; - if (of_device_is_compatible(dev->of_node, "qcom,neo-vm-pinctrl")) - register_trace_android_vh_gpio_block_read(qcom_trace_gpio_read, - NULL); - return msm_pinctrl_probe(pdev, pinctrl_data); } From 247d81e484862335d37b43e99ed5af4ff4b3371b Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Wed, 17 Jul 2024 11:23:16 +0530 Subject: [PATCH 027/117] arm64: defconfig: Enable pinctrl for neo platform Enable TLMM pinctrl driver for neo platform in GKI build. Change-Id: I5e9a2427e7ab96e7a56da6c0f8fbe80796d8b1d1 Signed-off-by: Asit Shah --- arch/arm64/configs/vendor/neo_la_GKI.config | 1 + neo_la.bzl | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index bca6d11710ef..84493d57adcf 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -30,6 +30,7 @@ CONFIG_MSM_PERFORMANCE=m CONFIG_MSM_SYSSTATS=m CONFIG_PDR_INDICATION_NOTIF_TIMEOUT=9000 CONFIG_PINCTRL_MSM=m +CONFIG_PINCTRL_NEO=m CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m diff --git a/neo_la.bzl b/neo_la.bzl index 2433fe9bcf6e..5c627dc6207d 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -20,6 +20,7 @@ def define_neo_la(): "drivers/irqchip/msm_show_resume_irq.ko", "drivers/perf/qcom_llcc_pmu.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", + "drivers/pinctrl/qcom/pinctrl-neo.ko", "drivers/power/reset/qcom-dload-mode.ko", "drivers/power/reset/qcom-reboot-reason.ko", "drivers/soc/qcom/boot_stats.ko", From d58c8c798440d5f6850346b26f315a27ad35877f Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Wed, 17 Jul 2024 11:34:08 +0530 Subject: [PATCH 028/117] modules.list: neo: Add neo pinctrl related module to first stage Added pinctrl module for neo platform in first stage module list. Change-Id: I9131e3a89ef905d283e2bcc3fecd9d479c6fb693 Signed-off-by: Asit Shah --- modules.list.msm.neo-la | 1 + 1 file changed, 1 insertion(+) diff --git a/modules.list.msm.neo-la b/modules.list.msm.neo-la index d3c7e01b4024..fc592862b893 100644 --- a/modules.list.msm.neo-la +++ b/modules.list.msm.neo-la @@ -51,6 +51,7 @@ msm_rtb.ko mem_buf.ko mem_buf_dev.ko pinctrl-msm.ko +pinctrl-neo.ko phy-generic.ko phy-qcom-emu.ko qcom_dma_heaps.ko From 0222920c9e617b42b462090721a3a186010f2690 Mon Sep 17 00:00:00 2001 From: Dhaval Radiya Date: Mon, 22 Jul 2024 11:43:41 +0530 Subject: [PATCH 029/117] arm64: defconfig: Enable PDC, CPUidle goverrnor, stats driver for neo This change enables PDC, CPUidle governor and various stats drivers. Change-Id: If2e61f913a2fc350c13ae8c9c0cbde926a4c48b7 Signed-off-by: Dhaval Radiya --- arch/arm64/configs/vendor/neo_la_GKI.config | 4 ++++ modules.list.msm.neo-la | 1 + neo_la.bzl | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index bca6d11710ef..227c0ab4d5da 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -2,6 +2,7 @@ CONFIG_ARCH_NEO=y CONFIG_ARM_SMMU=m CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_SELFTEST=y +CONFIG_CPU_IDLE_GOV_QCOM_LPM=m CONFIG_EDAC_KRYO_ARM64=m # CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE is not set CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y @@ -35,6 +36,7 @@ CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y CONFIG_QCOM_COMMAND_DB=m +CONFIG_QCOM_CPUSS_SLEEP_STATS=m CONFIG_QCOM_DMABUF_HEAPS=m CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y CONFIG_QCOM_DMABUF_HEAPS_CMA=y @@ -54,6 +56,7 @@ CONFIG_QCOM_MEM_BUF_DEV=m CONFIG_QCOM_MEM_HOOKS=m CONFIG_QCOM_PANIC_ON_NOTIF_TIMEOUT=y CONFIG_QCOM_PANIC_ON_PDR_NOTIF_TIMEOUT=y +CONFIG_QCOM_PDC=m CONFIG_QCOM_RAMDUMP=m CONFIG_QCOM_RPMH=m CONFIG_QCOM_RUN_QUEUE_STATS=m @@ -63,6 +66,7 @@ CONFIG_QCOM_SHOW_RESUME_IRQ=m CONFIG_QCOM_SMEM=m CONFIG_QCOM_SOCINFO=m CONFIG_QCOM_SOC_WATCHDOG=m +CONFIG_QCOM_STATS=m CONFIG_QCOM_WATCHDOG_BARK_TIME=11000 CONFIG_QCOM_WATCHDOG_IPI_PING=y CONFIG_QCOM_WATCHDOG_PET_TIME=9360 diff --git a/modules.list.msm.neo-la b/modules.list.msm.neo-la index d3c7e01b4024..3d9591ab58fc 100644 --- a/modules.list.msm.neo-la +++ b/modules.list.msm.neo-la @@ -88,3 +88,4 @@ crypto-qti-common.ko crypto-qti-hwkm.ko hwkm.ko tmecom-intf.ko +qcom-pdc.ko diff --git a/neo_la.bzl b/neo_la.bzl index 2433fe9bcf6e..d73a15d4d5df 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -7,6 +7,7 @@ target_name = "neo-la" def define_neo_la(): _neo_in_tree_modules = [ # keep sorted + "drivers/cpuidle/governors/qcom_lpm.ko", "drivers/dma-buf/heaps/qcom_dma_heaps.ko", "drivers/edac/kryo_arm64_edac.ko", "drivers/edac/qcom_edac.ko", @@ -18,6 +19,7 @@ def define_neo_la(): "drivers/iommu/qcom_iommu_debug.ko", "drivers/iommu/qcom_iommu_util.ko", "drivers/irqchip/msm_show_resume_irq.ko", + "drivers/irqchip/qcom-pdc.ko", "drivers/perf/qcom_llcc_pmu.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/power/reset/qcom-dload-mode.ko", @@ -32,9 +34,11 @@ def define_neo_la(): "drivers/soc/qcom/mem_buf/mem_buf_dev.ko", "drivers/soc/qcom/memory_dump_v2.ko", "drivers/soc/qcom/msm_performance.ko", + "drivers/soc/qcom/qcom_cpuss_sleep_stats.ko", "drivers/soc/qcom/qcom_ramdump.ko", "drivers/soc/qcom/qcom_rpmh.ko", "drivers/soc/qcom/qcom_soc_wdt.ko", + "drivers/soc/qcom/qcom_stats.ko", "drivers/soc/qcom/qcom_wdt_core.ko", "drivers/soc/qcom/rq_stats.ko", "drivers/soc/qcom/secure_buffer.ko", From 9fd9595d1164e37111b5064934b2c838d89b46dc Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Fri, 2 Aug 2024 18:15:33 +0530 Subject: [PATCH 030/117] bindings: clock: qcom: Snapshot for clock bindings on NEO Add snapshot of clock handles for CAMCC/DISPCC/GCC/GPUCC/VIDEOCC/TCSRCC on NEO so that clients can request on the clock ids,from msm-5.10 branch commit e42f1c7f152e ("bindings: clock: qcom: Add support for clock IDs for Neo"). Change-Id: If1ed852cb3e7cf91da1953beaaeeda309288ab14 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- include/dt-bindings/clock/qcom,camcc-neo.h | 131 ++++++++++++++ include/dt-bindings/clock/qcom,dispcc-neo.h | 97 +++++++++++ include/dt-bindings/clock/qcom,gcc-neo.h | 173 +++++++++++++++++++ include/dt-bindings/clock/qcom,gpucc-neo.h | 48 +++++ include/dt-bindings/clock/qcom,tcsrcc-neo.h | 15 ++ include/dt-bindings/clock/qcom,videocc-neo.h | 39 +++++ 6 files changed, 503 insertions(+) create mode 100644 include/dt-bindings/clock/qcom,camcc-neo.h create mode 100644 include/dt-bindings/clock/qcom,dispcc-neo.h create mode 100644 include/dt-bindings/clock/qcom,gcc-neo.h create mode 100644 include/dt-bindings/clock/qcom,gpucc-neo.h create mode 100644 include/dt-bindings/clock/qcom,tcsrcc-neo.h create mode 100644 include/dt-bindings/clock/qcom,videocc-neo.h diff --git a/include/dt-bindings/clock/qcom,camcc-neo.h b/include/dt-bindings/clock/qcom,camcc-neo.h new file mode 100644 index 000000000000..d0a62f7a08a7 --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-neo.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_NEO_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_NEO_H + +/* CAM_CC clocks */ +#define CAM_CC_PLL0 0 +#define CAM_CC_PLL0_OUT_EVEN 1 +#define CAM_CC_PLL0_OUT_ODD 2 +#define CAM_CC_PLL1 3 +#define CAM_CC_PLL1_OUT_EVEN 4 +#define CAM_CC_PLL2 5 +#define CAM_CC_PLL2_OUT_EVEN 6 +#define CAM_CC_PLL3 7 +#define CAM_CC_PLL3_OUT_EVEN 8 +#define CAM_CC_PLL4 9 +#define CAM_CC_PLL4_OUT_EVEN 10 +#define CAM_CC_PLL5 11 +#define CAM_CC_PLL5_OUT_EVEN 12 +#define CAM_CC_PLL6 13 +#define CAM_CC_PLL6_OUT_EVEN 14 +#define CAM_CC_PLL6_OUT_ODD 15 +#define CAM_CC_BPS_AHB_CLK 16 +#define CAM_CC_BPS_CLK 17 +#define CAM_CC_BPS_CLK_SRC 18 +#define CAM_CC_BPS_FAST_AHB_CLK 19 +#define CAM_CC_CAMNOC_AHB_CLK 20 +#define CAM_CC_CAMNOC_AXI_CLK 21 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 22 +#define CAM_CC_CAMNOC_DCD_XO_CLK 23 +#define CAM_CC_CAMNOC_XO_CLK 24 +#define CAM_CC_CCI_0_CLK 25 +#define CAM_CC_CCI_0_CLK_SRC 26 +#define CAM_CC_CCI_1_CLK 27 +#define CAM_CC_CCI_1_CLK_SRC 28 +#define CAM_CC_CCI_2_CLK 29 +#define CAM_CC_CCI_2_CLK_SRC 30 +#define CAM_CC_CCI_3_CLK 31 +#define CAM_CC_CCI_3_CLK_SRC 32 +#define CAM_CC_CORE_AHB_CLK 33 +#define CAM_CC_CPAS_AHB_CLK 34 +#define CAM_CC_CPAS_BPS_CLK 35 +#define CAM_CC_CPAS_FAST_AHB_CLK 36 +#define CAM_CC_CPAS_IFE_0_CLK 37 +#define CAM_CC_CPAS_IFE_1_CLK 38 +#define CAM_CC_CPAS_IFE_LITE_CLK 39 +#define CAM_CC_CPAS_IPE_NPS_CLK 40 +#define CAM_CC_CPHY_RX_CLK_SRC 41 +#define CAM_CC_CSI0PHYTIMER_CLK 42 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 43 +#define CAM_CC_CSI1PHYTIMER_CLK 44 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 45 +#define CAM_CC_CSI2PHYTIMER_CLK 46 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 47 +#define CAM_CC_CSI3PHYTIMER_CLK 48 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 49 +#define CAM_CC_CSID_CLK 50 +#define CAM_CC_CSID_CLK_SRC 51 +#define CAM_CC_CSID_CSIPHY_RX_CLK 52 +#define CAM_CC_CSIPHY0_CLK 53 +#define CAM_CC_CSIPHY1_CLK 54 +#define CAM_CC_CSIPHY2_CLK 55 +#define CAM_CC_CSIPHY3_CLK 56 +#define CAM_CC_DRV_AHB_CLK 57 +#define CAM_CC_DRV_XO_CLK 58 +#define CAM_CC_FAST_AHB_CLK_SRC 59 +#define CAM_CC_GDSC_CLK 60 +#define CAM_CC_ICP_AHB_CLK 61 +#define CAM_CC_ICP_CLK 62 +#define CAM_CC_ICP_CLK_SRC 63 +#define CAM_CC_IFE_0_CLK 64 +#define CAM_CC_IFE_0_CLK_SRC 65 +#define CAM_CC_IFE_0_DSP_CLK 66 +#define CAM_CC_IFE_0_FAST_AHB_CLK 67 +#define CAM_CC_IFE_1_CLK 68 +#define CAM_CC_IFE_1_CLK_SRC 69 +#define CAM_CC_IFE_1_DSP_CLK 70 +#define CAM_CC_IFE_1_FAST_AHB_CLK 71 +#define CAM_CC_IFE_LITE_AHB_CLK 72 +#define CAM_CC_IFE_LITE_CLK 73 +#define CAM_CC_IFE_LITE_CLK_SRC 74 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 75 +#define CAM_CC_IFE_LITE_CSID_CLK 76 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 77 +#define CAM_CC_IPE_NPS_AHB_CLK 78 +#define CAM_CC_IPE_NPS_CLK 79 +#define CAM_CC_IPE_NPS_CLK_SRC 80 +#define CAM_CC_IPE_NPS_FAST_AHB_CLK 81 +#define CAM_CC_IPE_PPS_CLK 82 +#define CAM_CC_IPE_PPS_FAST_AHB_CLK 83 +#define CAM_CC_JPEG_1_CLK 84 +#define CAM_CC_JPEG_2_CLK 85 +#define CAM_CC_JPEG_CLK 86 +#define CAM_CC_JPEG_CLK_SRC 87 +#define CAM_CC_MCLK0_CLK 88 +#define CAM_CC_MCLK0_CLK_SRC 89 +#define CAM_CC_MCLK1_CLK 90 +#define CAM_CC_MCLK1_CLK_SRC 91 +#define CAM_CC_MCLK2_CLK 92 +#define CAM_CC_MCLK2_CLK_SRC 93 +#define CAM_CC_MCLK3_CLK 94 +#define CAM_CC_MCLK3_CLK_SRC 95 +#define CAM_CC_MCLK4_CLK 96 +#define CAM_CC_MCLK4_CLK_SRC 97 +#define CAM_CC_MCLK5_CLK 98 +#define CAM_CC_MCLK5_CLK_SRC 99 +#define CAM_CC_MCLK6_CLK 100 +#define CAM_CC_MCLK6_CLK_SRC 101 +#define CAM_CC_MCLK7_CLK 102 +#define CAM_CC_MCLK7_CLK_SRC 103 +#define CAM_CC_SLEEP_CLK 107 +#define CAM_CC_SLEEP_CLK_SRC 108 +#define CAM_CC_SLOW_AHB_CLK_SRC 109 +#define CAM_CC_XO_CLK_SRC 110 +#define CAM_CC_QDSS_DEBUG_CLK 111 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 112 +#define CAM_CC_QDSS_DEBUG_XO_CLK 113 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_DRV_BCR 1 +#define CAM_CC_ICP_BCR 2 +#define CAM_CC_IFE_0_BCR 3 +#define CAM_CC_IFE_1_BCR 4 +#define CAM_CC_IPE_0_BCR 5 +#define CAM_CC_QDSS_DEBUG_BCR 6 + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-neo.h b/include/dt-bindings/clock/qcom,dispcc-neo.h new file mode 100644 index 000000000000..2559492aac59 --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-neo.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_NEO_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_NEO_H + +/* DISP_CC clocks */ +#define DISP_CC_PLL0 0 +#define DISP_CC_PLL1 1 +#define DISP_CC_MDSS_ACCU_CLK 2 +#define DISP_CC_MDSS_AHB1_CLK 3 +#define DISP_CC_MDSS_AHB_CLK 4 +#define DISP_CC_MDSS_AHB_CLK_SRC 5 +#define DISP_CC_MDSS_BYTE0_CLK 6 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 7 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 8 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 9 +#define DISP_CC_MDSS_BYTE1_CLK 10 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 11 +#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 12 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 13 +#define DISP_CC_MDSS_DPTX0_AUX_CLK 14 +#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 15 +#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 16 +#define DISP_CC_MDSS_DPTX0_LINK_CLK 17 +#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 18 +#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 19 +#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 20 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 21 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 22 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 23 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 24 +#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 25 +#define DISP_CC_MDSS_DPTX1_AUX_CLK 26 +#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 27 +#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 28 +#define DISP_CC_MDSS_DPTX1_LINK_CLK 29 +#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 30 +#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 31 +#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 32 +#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 33 +#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 34 +#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 35 +#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 36 +#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 37 +#define DISP_CC_MDSS_DPTX2_AUX_CLK 38 +#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 39 +#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 40 +#define DISP_CC_MDSS_DPTX2_LINK_CLK 41 +#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 42 +#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 43 +#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 44 +#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 45 +#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 46 +#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 47 +#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 48 +#define DISP_CC_MDSS_DPTX3_AUX_CLK 49 +#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 50 +#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 51 +#define DISP_CC_MDSS_DPTX3_LINK_CLK 52 +#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 53 +#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 54 +#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 55 +#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 56 +#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 57 +#define DISP_CC_MDSS_ESC0_CLK 58 +#define DISP_CC_MDSS_ESC0_CLK_SRC 59 +#define DISP_CC_MDSS_ESC1_CLK 60 +#define DISP_CC_MDSS_ESC1_CLK_SRC 61 +#define DISP_CC_MDSS_MDP1_CLK 62 +#define DISP_CC_MDSS_MDP_CLK 63 +#define DISP_CC_MDSS_MDP_CLK_SRC 64 +#define DISP_CC_MDSS_MDP_LUT1_CLK 65 +#define DISP_CC_MDSS_MDP_LUT_CLK 66 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 67 +#define DISP_CC_MDSS_PCLK0_CLK 68 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 69 +#define DISP_CC_MDSS_PCLK1_CLK 70 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 71 +#define DISP_CC_MDSS_RSCC_AHB_CLK 72 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73 +#define DISP_CC_MDSS_VSYNC1_CLK 74 +#define DISP_CC_MDSS_VSYNC_CLK 75 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 76 +#define DISP_CC_SLEEP_CLK 77 +#define DISP_CC_SLEEP_CLK_SRC 78 +#define DISP_CC_XO_CLK 79 +#define DISP_CC_XO_CLK_SRC 80 + +/* DISP_CC resets */ +#define DISP_CC_MDSS_CORE_BCR 0 +#define DISP_CC_MDSS_CORE_INT2_BCR 1 +#define DISP_CC_MDSS_RSCC_BCR 2 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-neo.h b/include/dt-bindings/clock/qcom,gcc-neo.h new file mode 100644 index 000000000000..aca87cf4f470 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-neo.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_NEO_H +#define _DT_BINDINGS_CLK_QCOM_GCC_NEO_H + +/* GCC clocks */ +#define GCC_GPLL0 0 +#define GCC_GPLL0_OUT_EVEN 1 +#define GCC_GPLL1 2 +#define GCC_GPLL9 3 +#define GCC_GPLL9_OUT_EVEN 4 +#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 5 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6 +#define GCC_BOOT_ROM_AHB_CLK 7 +#define GCC_CAMERA_AHB_CLK 8 +#define GCC_CAMERA_HF_AXI_CLK 9 +#define GCC_CAMERA_SF_AXI_CLK 10 +#define GCC_CAMERA_XO_CLK 11 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 12 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13 +#define GCC_DDRSS_GPU_AXI_CLK 14 +#define GCC_DDRSS_PCIE_SF_CLK 15 +#define GCC_DISP_AHB_CLK 16 +#define GCC_DISP_HF_AXI_CLK 17 +#define GCC_GP1_CLK 18 +#define GCC_GP1_CLK_SRC 19 +#define GCC_GP2_CLK 20 +#define GCC_GP2_CLK_SRC 21 +#define GCC_GP3_CLK 22 +#define GCC_GP3_CLK_SRC 23 +#define GCC_GPU_CFG_AHB_CLK 24 +#define GCC_GPU_GPLL0_CLK_SRC 25 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 26 +#define GCC_GPU_MEMNOC_GFX_CLK 27 +#define GCC_GPU_SNOC_DVM_GFX_CLK 28 +#define GCC_IRIS_SS_HF_AXI1_CLK 29 +#define GCC_IRIS_SS_SPD_AXI1_CLK 30 +#define GCC_PCIE_0_AUX_CLK 31 +#define GCC_PCIE_0_AUX_CLK_SRC 32 +#define GCC_PCIE_0_CFG_AHB_CLK 33 +#define GCC_PCIE_0_MSTR_AXI_CLK 34 +#define GCC_PCIE_0_PHY_RCHNG_CLK 35 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 36 +#define GCC_PCIE_0_PIPE_CLK 37 +#define GCC_PCIE_0_PIPE_CLK_SRC 38 +#define GCC_PCIE_0_SLV_AXI_CLK 39 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 40 +#define GCC_PCIE_1_AUX_CLK 41 +#define GCC_PCIE_1_AUX_CLK_SRC 42 +#define GCC_PCIE_1_CFG_AHB_CLK 43 +#define GCC_PCIE_1_MSTR_AXI_CLK 44 +#define GCC_PCIE_1_PHY_RCHNG_CLK 45 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 46 +#define GCC_PCIE_1_PIPE_CLK 47 +#define GCC_PCIE_1_PIPE_CLK_SRC 48 +#define GCC_PCIE_1_SLV_AXI_CLK 49 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 50 +#define GCC_PDM2_CLK 51 +#define GCC_PDM2_CLK_SRC 52 +#define GCC_PDM_AHB_CLK 53 +#define GCC_PDM_XO4_CLK 54 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 55 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 56 +#define GCC_QMIP_GPU_AHB_CLK 57 +#define GCC_QMIP_PCIE_AHB_CLK 58 +#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 59 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 60 +#define GCC_QMIP_VIDEO_LSR_AHB_CLK 61 +#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 62 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 63 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 64 +#define GCC_QUPV3_WRAP0_CORE_CLK 65 +#define GCC_QUPV3_WRAP0_S0_CLK 66 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 67 +#define GCC_QUPV3_WRAP0_S1_CLK 68 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 69 +#define GCC_QUPV3_WRAP0_S2_CLK 70 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S3_CLK 72 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S4_CLK 74 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S5_CLK 76 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 77 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 78 +#define GCC_QUPV3_WRAP1_CORE_CLK 79 +#define GCC_QUPV3_WRAP1_S0_CLK 80 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 81 +#define GCC_QUPV3_WRAP1_S1_CLK 82 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 83 +#define GCC_QUPV3_WRAP1_S2_CLK 84 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_S3_CLK 86 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S4_CLK 88 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S5_CLK 90 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 91 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95 +#define GCC_SDCC1_AHB_CLK 96 +#define GCC_SDCC1_APPS_CLK 97 +#define GCC_SDCC1_APPS_CLK_SRC 98 +#define GCC_SDCC1_ICE_CORE_CLK 99 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 100 +#define GCC_USB30_PRIM_MASTER_CLK 101 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 102 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 103 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 104 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 105 +#define GCC_USB30_PRIM_SLEEP_CLK 106 +#define GCC_USB3_PRIM_PHY_AUX_CLK 107 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 108 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 109 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 110 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 111 +#define GCC_VIDEO_AHB_CLK 112 +#define GCC_VIDEO_AXI0_CLK 113 +#define GCC_VIDEO_AXI1_CLK 114 +#define GCC_VIDEO_XO_CLK 115 +#define GCC_GPLL4 116 +#define GCC_GPLL5 117 +#define GCC_GPLL7 118 +#define GCC_DDRSS_SPAD_CLK 119 +#define GCC_DDRSS_SPAD_CLK_SRC 120 +#define GCC_VIDEO_AXI0_SREG 121 +#define GCC_VIDEO_AXI1_SREG 122 +#define GCC_IRIS_SS_HF_AXI1_SREG 123 +#define GCC_IRIS_SS_SPD_AXI1_SREG 124 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_PCIE_0_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_1_BCR 8 +#define GCC_PCIE_1_LINK_DOWN_BCR 9 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10 +#define GCC_PCIE_1_PHY_BCR 11 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12 +#define GCC_PCIE_PHY_BCR 13 +#define GCC_PCIE_PHY_CFG_AHB_BCR 14 +#define GCC_PCIE_PHY_COM_BCR 15 +#define GCC_PDM_BCR 16 +#define GCC_QUPV3_WRAPPER_0_BCR 17 +#define GCC_QUPV3_WRAPPER_1_BCR 18 +#define GCC_QUSB2PHY_PRIM_BCR 19 +#define GCC_QUSB2PHY_SEC_BCR 20 +#define GCC_SDCC1_BCR 21 +#define GCC_USB30_PRIM_BCR 22 +#define GCC_USB3_DP_PHY_PRIM_BCR 23 +#define GCC_USB3_DP_PHY_SEC_BCR 24 +#define GCC_USB3_PHY_PRIM_BCR 25 +#define GCC_USB3_PHY_SEC_BCR 26 +#define GCC_USB3PHY_PHY_PRIM_BCR 27 +#define GCC_USB3PHY_PHY_SEC_BCR 28 +#define GCC_VIDEO_AXI0_CLK_ARES 29 +#define GCC_VIDEO_AXI1_CLK_ARES 30 +#define GCC_VIDEO_BCR 31 +#define GCC_IRIS_SS_HF_AXI_CLK_ARES 32 +#define GCC_IRIS_SS_SPD_AXI_CLK_ARES 33 +#define GCC_DDRSS_SPAD_CLK_ARES 34 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-neo.h b/include/dt-bindings/clock/qcom,gpucc-neo.h new file mode 100644 index 000000000000..eb8451dbaaf8 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-neo.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_NEO_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_NEO_H + +/* GPU_CC clocks */ +#define GPU_CC_PLL0 0 +#define GPU_CC_PLL1 1 +#define GPU_CC_AHB_CLK 2 +#define GPU_CC_CB_CLK 3 +#define GPU_CC_CRC_AHB_CLK 4 +#define GPU_CC_CX_FF_CLK 5 +#define GPU_CC_CX_GMU_CLK 6 +#define GPU_CC_CXO_AON_CLK 7 +#define GPU_CC_CXO_CLK 8 +#define GPU_CC_DEMET_CLK 9 +#define GPU_CC_DEMET_DIV_CLK_SRC 10 +#define GPU_CC_FF_CLK_SRC 11 +#define GPU_CC_FREQ_MEASURE_CLK 12 +#define GPU_CC_GMU_CLK_SRC 13 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14 +#define GPU_CC_HUB_AON_CLK 15 +#define GPU_CC_HUB_CLK_SRC 16 +#define GPU_CC_HUB_CX_INT_CLK 17 +#define GPU_CC_MEMNOC_GFX_CLK 18 +#define GPU_CC_MND1X_0_GFX3D_CLK 19 +#define GPU_CC_MND1X_1_GFX3D_CLK 20 +#define GPU_CC_SLEEP_CLK 21 +#define GPU_CC_XO_CLK_SRC 22 +#define GPU_CC_XO_DIV_CLK_SRC 23 +#define GPU_CC_GX_GMU_CLK 24 + +/*GPU_CC resets */ +#define GPUCC_GPU_CC_ACD_BCR 0 +#define GPUCC_GPU_CC_CB_BCR 1 +#define GPUCC_GPU_CC_CX_BCR 2 +#define GPUCC_GPU_CC_FAST_HUB_BCR 3 +#define GPUCC_GPU_CC_FF_BCR 4 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 5 +#define GPUCC_GPU_CC_GMU_BCR 6 +#define GPUCC_GPU_CC_GX_BCR 7 +#define GPUCC_GPU_CC_RBCPR_BCR 8 +#define GPUCC_GPU_CC_XO_BCR 9 + +#endif diff --git a/include/dt-bindings/clock/qcom,tcsrcc-neo.h b/include/dt-bindings/clock/qcom,tcsrcc-neo.h new file mode 100644 index 000000000000..2f381cb47ad3 --- /dev/null +++ b/include/dt-bindings/clock/qcom,tcsrcc-neo.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_H +#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_H + +/* TCSR_CC clocks */ +#define TCSR_PCIE_0_CLKREF_EN 0 +#define TCSR_PCIE_1_CLKREF_EN 1 +#define TCSR_USB2_CLKREF_EN 2 +#define TCSR_USB3_CLKREF_EN 3 + +#endif diff --git a/include/dt-bindings/clock/qcom,videocc-neo.h b/include/dt-bindings/clock/qcom,videocc-neo.h new file mode 100644 index 000000000000..b985264ddcfd --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-neo.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_NEO_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_NEO_H + +/* VIDEO_CC clocks */ +#define VIDEO_CC_PLL0 0 +#define VIDEO_CC_PLL1 1 +#define VIDEO_CC_AHB_CLK 2 +#define VIDEO_CC_AHB_CLK_SRC 3 +#define VIDEO_CC_MVS0_CLK 4 +#define VIDEO_CC_MVS0_CLK_SRC 5 +#define VIDEO_CC_MVS0_DIV_CLK_SRC 6 +#define VIDEO_CC_MVS0C_CLK 7 +#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8 +#define VIDEO_CC_MVS1_CLK 9 +#define VIDEO_CC_MVS1_CLK_SRC 10 +#define VIDEO_CC_MVS1_DIV_CLK_SRC 11 +#define VIDEO_CC_MVS1C_CLK 12 +#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 13 +#define VIDEO_CC_SLEEP_CLK 14 +#define VIDEO_CC_SLEEP_CLK_SRC 15 +#define VIDEO_CC_XO_CLK 16 +#define VIDEO_CC_XO_CLK_SRC 17 + +/* VIDEO_CC resets */ +#define CVP_VIDEO_CC_INTERFACE_BCR 0 +#define CVP_VIDEO_CC_MVS0_BCR 1 +#define VIDEO_CC_MVS0C_CLK_ARES 2 +#define CVP_VIDEO_CC_MVS0C_BCR 3 +#define CVP_VIDEO_CC_MVS1_BCR 4 +#define VIDEO_CC_MVS1C_CLK_ARES 5 +#define CVP_VIDEO_CC_MVS1C_BCR 6 +#define VIDEO_CC_XO_CLK_ARES 7 + +#endif From 56f042d8ed51c604d9043769daad0be2e4387fd0 Mon Sep 17 00:00:00 2001 From: Asit Shah Date: Wed, 31 Jul 2024 20:02:11 +0530 Subject: [PATCH 031/117] soc: qcom: socinfo: Add neo-la soc-id in socinfo list Add Neo-LA msm-id to the list of soc-ids supported. Change-Id: I9601135db35026067fc15116879950ece40d9fbe Signed-off-by: Asit Shah --- drivers/soc/qcom/socinfo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 15f83883662a..894324a59acc 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -593,6 +593,7 @@ static const struct soc_id soc_id[] = { { 642, "CLIFFSP" }, { 643, "CLIFFS7P" }, { 549, "ANORAK" }, + { 554, "NEO-LA" }, }; static struct attribute *msm_custom_socinfo_attrs[MAX_SOCINFO_ATTRS]; From bb0fd5f452bf372243e6d024a5c400fd6fb07947 Mon Sep 17 00:00:00 2001 From: Jeyaprabu J Date: Tue, 6 Aug 2024 15:55:43 +0530 Subject: [PATCH 032/117] Revert "soc: qcom: hgsl: enable memflag for hgsl allocation" This reverts commit 4cacf643195bab0d186e7ce9e912327a76d827f2. Reason for revert: Regressing Performance. Change-Id: Ib69607b76cca91fc525cbc5cd86b4bc547b80253 Signed-off-by: Jeyaprabu J Signed-off-by: Vishakh --- drivers/soc/qcom/hgsl/hgsl.c | 5 ----- drivers/soc/qcom/hgsl/hgsl_types.h | 8 -------- 2 files changed, 13 deletions(-) diff --git a/drivers/soc/qcom/hgsl/hgsl.c b/drivers/soc/qcom/hgsl/hgsl.c index c2b3d3c79b4a..c20609edbf2e 100644 --- a/drivers/soc/qcom/hgsl/hgsl.c +++ b/drivers/soc/qcom/hgsl/hgsl.c @@ -2351,10 +2351,6 @@ static int hgsl_ioctl_mem_alloc(struct file *filep, unsigned long arg) goto out; } - /* let the back end aware that this is HGSL allocation */ - params.flags &= ~GSL_MEMFLAGS_USERMEM_MASK; - params.flags |= GSL_MEMFLAGS_USERMEM_HGSL_ALLOC; - mem_node->flags = params.flags; ret = hgsl_sharedmem_alloc(hgsl->dev, params.sizebytes, params.flags, mem_node); @@ -2544,7 +2540,6 @@ static int hgsl_ioctl_mem_map_smmu(struct file *filep, unsigned long arg) } params.size = PAGE_ALIGN(params.size); - params.flags &= ~GSL_MEMFLAGS_USERMEM_MASK; mem_node->flags = params.flags; mem_node->fd = params.fd; mem_node->memtype = params.memtype; diff --git a/drivers/soc/qcom/hgsl/hgsl_types.h b/drivers/soc/qcom/hgsl/hgsl_types.h index 5f3c1500ad4d..154db78ac305 100644 --- a/drivers/soc/qcom/hgsl/hgsl_types.h +++ b/drivers/soc/qcom/hgsl/hgsl_types.h @@ -69,14 +69,6 @@ #define GSL_MEMFLAGS_GPUIOCOHERENT 0x80000000 #define GSL_MEMFLAGS_CACHEMODE_MASK 0x0C000000 -/* external or internal buffer */ -#define GSL_MEMFLAGS_USERMEM_HGSL_ALLOC 0x00000020 -#define GSL_MEMFLAGS_USERMEM_ASHMEM 0x00000040 -#define GSL_MEMFLAGS_USERMEM_ADDR 0x00000060 -#define GSL_MEMFLAGS_USERMEM_ION 0x00000080 -#define GSL_MEMFLAGS_USERMEM_SHIFT 5 -#define GSL_MEMFLAGS_USERMEM_MASK 0x000000e0 - /****************************************************************************/ /* cache flags */ /****************************************************************************/ From 4ab95acc04c4ee19a7961a2d10477ca99519b194 Mon Sep 17 00:00:00 2001 From: Ram Kumar Dwivedi Date: Wed, 17 Jul 2024 19:31:40 +0530 Subject: [PATCH 033/117] ufs: qcom: Create sysfs node "ufs_pm_mode" In some HQX targets (where Linux is running as guest OS), when S2R is triggered from host OS, the GVM is sent to Deep Sleep state. Since Deep Sleep is getting triggered from Guest point of view, ufs spm level is set to 5, Power OFF SSU command is sent to the ufs device and all regulators are turned off from ufs. But since it is actually an S2R event from Host PoV, the regulators are not turned off by the PMIC. This is causing power leakage. To fix this issue, create a sysfs node "ufs_pm_mode". The Host OS will write to this node which the event (S2R or Deep Sleep) is getting triggered at the Host. UFS will set the correct spm level accordingly. For S2R case, the default spm level is retained while for Deep Sleep it is set to 5. The default value for this node is 0( which is set during ufs init). This means it will not set anything unless "S2R" or "DEEPSLEEP" is explicitly written to the node. ufs_pm_mode can have three values: "NONE","S2R","DEEPSLEEP": 1. When "NONE" is written to sysfs node: the spm level is set to 5 for deep sleep and existing spm value left unchanged for s2idle case. 2. When "S2R" is written to the node: the default spm level value is retained. 3. When "DEEPSLEEP" is written to the node: the spm level is set to 5. Change-Id: I232449cf93a3b37652897e74a621bf219302b76b Signed-off-by: Ram Kumar Dwivedi --- drivers/ufs/host/ufs-qcom.c | 80 +++++++++++++++++++++++++++++++++++-- drivers/ufs/host/ufs-qcom.h | 1 + 2 files changed, 78 insertions(+), 3 deletions(-) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 535f40c02ea0..7551409c51e0 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -87,6 +87,12 @@ enum { UFS_QCOM_CMD_COMPL, }; +enum { + UFS_QCOM_SYSFS_NONE, + UFS_QCOM_SYSFS_S2R, + UFS_QCOM_SYSFS_DEEPSLEEP, +}; + static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; static DEFINE_PER_CPU(struct freq_qos_request, qos_min_req); @@ -5571,6 +5577,59 @@ static ssize_t irq_affinity_support_show(struct device *dev, static DEVICE_ATTR_RW(irq_affinity_support); +static ssize_t ufs_pm_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = -1; + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + switch (host->ufs_pm_mode) { + case 0: + ret = scnprintf(buf, 6, "NONE\n"); + break; + case 1: + ret = scnprintf(buf, 5, "S2R\n"); + break; + case 2: + ret = scnprintf(buf, 12, "DEEPSLEEP\n"); + break; + default: + break; + } + + return ret; +} + +static ssize_t ufs_pm_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char kbuff[12] = {0}; + + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!buf) + return -EINVAL; + + strscpy(kbuff, buf, 11); + + if (!strncasecmp(kbuff, "NONE", 4)) + host->ufs_pm_mode = 0; + else if (!strncasecmp(kbuff, "S2R", 3)) + host->ufs_pm_mode = 1; + else if (!strncasecmp(kbuff, "DEEPSLEEP", 9)) + host->ufs_pm_mode = 2; + else + dev_err(hba->dev, "Invalid entry for ufs_pm_mode\n"); + + return count; +} + + +static DEVICE_ATTR_RW(ufs_pm_mode); + static struct attribute *ufs_qcom_sysfs_attrs[] = { &dev_attr_err_state.attr, &dev_attr_power_mode.attr, @@ -5582,6 +5641,7 @@ static struct attribute *ufs_qcom_sysfs_attrs[] = { &dev_attr_hibern8_count.attr, &dev_attr_ber_th_exceeded.attr, &dev_attr_irq_affinity_support.attr, + &dev_attr_ufs_pm_mode.attr, NULL }; @@ -6052,10 +6112,24 @@ static int ufs_qcom_suspend_prepare(struct device *dev) * regulators is turned off in DS. For other senerios * like s2idle, retain the default spm level. */ - if (pm_suspend_target_state == PM_SUSPEND_MEM) - hba->spm_lvl = UFS_PM_LVL_5; - else + switch (host->ufs_pm_mode) { + case UFS_QCOM_SYSFS_NONE: + if (pm_suspend_target_state == PM_SUSPEND_MEM) + hba->spm_lvl = UFS_PM_LVL_5; + else + hba->spm_lvl = host->spm_lvl_default; + break; + case UFS_QCOM_SYSFS_S2R: hba->spm_lvl = host->spm_lvl_default; + break; + case UFS_QCOM_SYSFS_DEEPSLEEP: + hba->spm_lvl = UFS_PM_LVL_5; + break; + default: + break; + } + + dev_info(dev, "spm level is set to %d\n", hba->spm_lvl); return ufshcd_suspend_prepare(dev); } diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index f038e5fb51cd..775670b26d9f 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -592,6 +592,7 @@ struct ufs_qcom_host { bool bypass_g4_cfgready; bool is_dt_pm_level_read; u32 spm_lvl_default; + u32 ufs_pm_mode; bool is_phy_pwr_on; /* Protect the usage of is_phy_pwr_on against racing */ struct mutex phy_mutex; From 5f276b69543351a0d31b4b816de1078706cc8443 Mon Sep 17 00:00:00 2001 From: Abdul Salam Date: Thu, 8 Aug 2024 17:40:17 +0530 Subject: [PATCH 034/117] clk: qcom: Adding SSC_QUP clocks Adding SSC_QUP clocks for slpi. Change-Id: Id2f5f14bd5e40eed0c8bfa30235d3702733f5f22 Signed-off-by: Abdul Salam --- drivers/clk/qcom/virtio_clk_sm6150.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/clk/qcom/virtio_clk_sm6150.c b/drivers/clk/qcom/virtio_clk_sm6150.c index 2b93e06ef825..eae9e188c982 100644 --- a/drivers/clk/qcom/virtio_clk_sm6150.c +++ b/drivers/clk/qcom/virtio_clk_sm6150.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -91,6 +91,8 @@ static const struct virtio_clk_init_data sm6150_scc_virtio_clocks[] = { [SCC_QUPV3_SE1_CLK] = {.name = "scc_qupv3_se1_clk",}, [SCC_QUPV3_SE2_CLK] = {.name = "scc_qupv3_se2_clk",}, [SCC_QUPV3_SE3_CLK] = {.name = "scc_qupv3_se3_clk",}, + [SCC_QUPV3_SE4_CLK] = {.name = "scc_qupv3_se4_clk",}, + [SCC_QUPV3_SE5_CLK] = {.name = "scc_qupv3_se5_clk",}, [SCC_QUPV3_M_HCLK_CLK] = {.name = "scc_qupv3_m_hclk_clk",}, [SCC_QUPV3_S_HCLK_CLK] = {.name = "scc_qupv3_s_hclk_clk",}, }; From b4d55a4b49c407448dc422c46c3d4a9e7f0cd676 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Wed, 31 Aug 2022 17:59:20 +0530 Subject: [PATCH 035/117] clk: qcom: clk-branch: Add support for sreg branch ops Add support for SREG branch ops as sreg register operations are needed as part of ASSERT/DEASSERT sequence. Change-Id: Ic0cc76d5160cd34130afc460b07d5ec4e9ed85eb Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/clk-branch.c | 66 +++++++++++++++++++++++++++++++++++ drivers/clk/qcom/clk-branch.h | 4 +++ 2 files changed, 70 insertions(+) diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index f66db9e751f3..83ef97bf386a 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -211,6 +211,12 @@ static void clk_branch2_list_registers(struct seq_file *f, struct clk_hw *hw) {"MEM_ENABLE_ACK_MASK", 0x0}, }; + static struct clk_register_data data3[] = { + {"SREG_ENABLE_REG", 0x0}, + {"SREG_CORE_ACK_MASK", 0x0}, + {"SREG_PERIPH_ACK_MASK", 0x0}, + }; + size = ARRAY_SIZE(data); for (i = 0; i < size; i++) { @@ -243,6 +249,16 @@ static void clk_branch2_list_registers(struct seq_file *f, struct clk_hw *hw) clock_debug_output(f, "%20s: 0x%.8x\n", data2[2].name, br->mem_enable_ack_bit); } + + if (br->sreg_enable_reg) { + regmap_read(br->clkr.regmap, br->sreg_enable_reg + + data3[0].offset, &val); + clock_debug_output(f, "%20s: 0x%.8x\n", data3[0].name, val); + clock_debug_output(f, "%20s: 0x%.8x\n", data3[1].name, + br->sreg_core_ack_bit); + clock_debug_output(f, "%20s: 0x%.8x\n", data3[2].name, + br->sreg_periph_ack_bit); + } } static int clk_branch2_set_flags(struct clk_hw *hw, unsigned long flags) @@ -323,6 +339,29 @@ static int clk_branch2_mem_enable(struct clk_hw *hw) return -EBUSY; } +static int clk_branch2_sreg_enable(struct clk_hw *hw) +{ + struct clk_branch *br = to_clk_branch(hw); + u32 val; + int count = 200; + int ret; + + ret = clk_enable_regmap(hw); + if (ret) + return -EINVAL; + + regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val); + + while (count-- > 0) { + if (!(val & br->sreg_core_ack_bit)) + return 0; + udelay(1); + regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val); + } + + return -EBUSY; +} + static void clk_branch2_mem_disable(struct clk_hw *hw) { struct clk_branch *br = to_clk_branch(hw); @@ -332,6 +371,24 @@ static void clk_branch2_mem_disable(struct clk_hw *hw) return clk_branch2_disable(hw); } +static void clk_branch2_sreg_disable(struct clk_hw *hw) +{ + struct clk_branch *br = to_clk_branch(hw); + u32 val; + int count = 200; + + clk_disable_regmap(hw); + + regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val); + + while (count-- > 0) { + if (val & br->sreg_periph_ack_bit) + return; + udelay(1); + regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val); + } +} + static void clk_branch_restore_context_aon(struct clk_hw *hw) { if (clk_enable_regmap(hw)) @@ -395,6 +452,15 @@ const struct clk_ops clk_branch2_mem_ops = { }; EXPORT_SYMBOL(clk_branch2_mem_ops); +const struct clk_ops clk_branch2_sreg_ops = { + .enable = clk_branch2_sreg_enable, + .disable = clk_branch2_sreg_disable, + .is_enabled = clk_is_enabled_regmap, + .init = clk_branch2_init, + .debug_init = clk_branch_debug_init, +}; +EXPORT_SYMBOL_GPL(clk_branch2_sreg_ops); + static unsigned long clk_branch2_hw_ctl_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h index 0c3584e2a0c3..b211e588f5ec 100644 --- a/drivers/clk/qcom/clk-branch.h +++ b/drivers/clk/qcom/clk-branch.h @@ -26,9 +26,12 @@ struct clk_branch { u32 halt_reg; u32 mem_enable_reg; u32 mem_ack_reg; + u32 sreg_enable_reg; u8 hwcg_bit; u8 halt_bit; u8 mem_enable_ack_bit; + u32 sreg_core_ack_bit; + u32 sreg_periph_ack_bit; u8 halt_check; #define BRANCH_VOTED BIT(7) /* Delay on disable */ #define BRANCH_HALT 0 /* pol: 1 = halt */ @@ -50,6 +53,7 @@ extern const struct clk_ops clk_branch2_aon_ops; extern const struct clk_ops clk_branch2_force_off_ops; extern const struct clk_ops clk_branch2_mem_ops; extern const struct clk_ops clk_branch2_crm_ops; +extern const struct clk_ops clk_branch2_sreg_ops; #define to_clk_branch(_hw) \ container_of(to_clk_regmap(_hw), struct clk_branch, clkr) From c787ac5c9f4f5705f517bfeb36c98a1ec63c8b12 Mon Sep 17 00:00:00 2001 From: Wasim Nazir Date: Thu, 8 Aug 2024 16:50:03 +0530 Subject: [PATCH 036/117] firmware: scm: Add checks for NULL pointer dereference Add macro to check for __scm pointer before accessing. Also add check for device pointer. Change-Id: Ib3ef303fd9574bedd87077dcd62a480066d7a7d8 Signed-off-by: Wasim Nazir --- drivers/firmware/qcom_scm-legacy.c | 5 +- drivers/firmware/qcom_scm-smc.c | 8 +- drivers/firmware/qcom_scm.c | 429 +++++++++++++++++++++++++---- 3 files changed, 380 insertions(+), 62 deletions(-) diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c index 0e192271d22c..f9f15cea7daf 100644 --- a/drivers/firmware/qcom_scm-legacy.c +++ b/drivers/firmware/qcom_scm-legacy.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -137,6 +137,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, __le32 *arg_buf; const __le32 *res_buf; + if (!dev) + return -EPROBE_DEFER; + cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); if (!cmd) return -ENOMEM; diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c index 9c6a05843c57..a2e7de0b6d80 100644 --- a/drivers/firmware/qcom_scm-smc.c +++ b/drivers/firmware/qcom_scm-smc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -212,6 +212,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, struct arm_smccc_res smc_res; struct arm_smccc_args smc = {0}; + if (!dev) + return -EPROBE_DEFER; + smc.args[0] = ARM_SMCCC_CALL_VAL( smccc_call_type, qcom_smccc_convention, @@ -222,9 +225,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { - if (!dev) - return -EPROBE_DEFER; - alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); use_qtee_shmbridge = qtee_shmbridge_is_enabled(); if (use_qtee_shmbridge) { diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index fc828d74874d..9c015d4c3adc 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -109,6 +109,8 @@ static const char * const qcom_scm_convention_names[] = { static struct qcom_scm *__scm; +#define SCM_NOT_INITIALIZED() (unlikely(!__scm) ? pr_err("SCM not initialized\n") : 0) + static int qcom_scm_clk_enable(void) { int ret; @@ -212,7 +214,7 @@ static enum qcom_scm_convention __get_convention(void) * needed to dma_map_single to secure world */ probed_convention = SMC_CONVENTION_ARM_64; - ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); + ret = __scm_smc_call(__scm->dev, &desc, probed_convention, &res, true); if (!ret && res.result[0] == 1) goto found; @@ -223,14 +225,14 @@ static enum qcom_scm_convention __get_convention(void) * early calls into the firmware on these SoCs so the device pointer * will be valid here to check if the compatible matches. */ - if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { + if (of_device_is_compatible(__scm->dev->of_node, "qcom,scm-sc7180")) { forced = true; goto found; } #endif probed_convention = SMC_CONVENTION_ARM_32; - ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); + ret = __scm_smc_call(__scm->dev, &desc, probed_convention, &res, true); if (!ret && res.result[0] == 1) goto found; @@ -377,6 +379,8 @@ int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) .arginfo = QCOM_SCM_ARGS(2), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; /* * Reassign only if we are switching from hotplug entry point * to cpuidle entry point or vice versa. @@ -429,8 +433,8 @@ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) .owner = ARM_SMCCC_OWNER_SIP, }; - if (!__scm) - return -EINVAL; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; if (!cpus || cpumask_empty(cpus)) return -EINVAL; @@ -467,7 +471,10 @@ void qcom_scm_cpu_power_down(u32 flags) .owner = ARM_SMCCC_OWNER_SIP, }; - qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); + if (SCM_NOT_INITIALIZED()) + return; + + qcom_scm_call_atomic(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_cpu_power_down); @@ -484,6 +491,9 @@ int qcom_scm_sec_wdog_deactivate(void) .arginfo = QCOM_SCM_ARGS(1), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_sec_wdog_deactivate); @@ -503,6 +513,9 @@ int qcom_scm_sec_wdog_trigger(void) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -524,10 +537,8 @@ void qcom_scm_disable_sdi(void) .arginfo = QCOM_SCM_ARGS(2), }; - if (!__scm) { - pr_err("No scm device available\n"); + if (SCM_NOT_INITIALIZED()) return; - } ret = qcom_scm_call_atomic(__scm->dev, &desc, NULL); if (ret) @@ -548,6 +559,9 @@ int qcom_scm_set_remote_state(u32 state, u32 id) struct qcom_scm_res res; int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -564,6 +578,9 @@ int qcom_scm_spin_cpu(void) .arginfo = QCOM_SCM_ARGS(1), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_spin_cpu); @@ -580,15 +597,20 @@ static int __qcom_scm_set_dload_mode(struct device *dev, enum qcom_download_mode desc.args[1] = 0; - return qcom_scm_call_atomic(__scm->dev, &desc, NULL); + return qcom_scm_call_atomic(dev, &desc, NULL); } void qcom_scm_set_download_mode(enum qcom_download_mode mode, phys_addr_t tcsr_boot_misc) { int ret = 0; - struct device *dev = __scm ? __scm->dev : NULL; + struct device *dev = NULL; - if (tcsr_boot_misc || (__scm && __scm->dload_mode_addr)) { + if (SCM_NOT_INITIALIZED()) + return; + + dev = __scm->dev; + + if (tcsr_boot_misc || __scm->dload_mode_addr) { ret = qcom_scm_io_writel(tcsr_boot_misc ? : __scm->dload_mode_addr, mode); } else if (__qcom_scm_is_call_available(dev, QCOM_SCM_SVC_BOOT, @@ -607,9 +629,14 @@ EXPORT_SYMBOL(qcom_scm_set_download_mode); int qcom_scm_get_download_mode(unsigned int *mode, phys_addr_t tcsr_boot_misc) { int ret = -EINVAL; - struct device *dev = __scm ? __scm->dev : NULL; + struct device *dev = NULL; - if (tcsr_boot_misc || (__scm && __scm->dload_mode_addr)) { + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + dev = __scm->dev; + + if (tcsr_boot_misc || __scm->dload_mode_addr) { ret = qcom_scm_io_readl(tcsr_boot_misc ? : __scm->dload_mode_addr, mode); } else { dev_err(dev, @@ -632,6 +659,9 @@ int qcom_scm_config_cpu_errata(void) .arginfo = 0xffffffff, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_config_cpu_errata); @@ -645,10 +675,8 @@ void qcom_scm_phy_update_scm_level_shifter(u32 val) .owner = ARM_SMCCC_OWNER_SIP }; - if (!__scm) { - pr_err("No scm device available\n"); + if (SCM_NOT_INITIALIZED()) return; - } desc.args[0] = val; desc.args[1] = 0; @@ -685,6 +713,9 @@ int qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_clk_enable(); if (ret) return ret; @@ -727,6 +758,9 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_clk_enable(); if (ret) return ret; @@ -762,6 +796,9 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_clk_enable(); if (ret) return ret; @@ -796,6 +833,9 @@ int qcom_scm_pas_shutdown(u32 peripheral) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_clk_enable(); if (ret) return ret; @@ -854,6 +894,9 @@ bool qcom_scm_pas_supported(u32 peripheral) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return false; + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PIL_PAS_IS_SUPPORTED)) return false; @@ -877,7 +920,7 @@ static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) struct qcom_scm_res res; int ret; - ret = qcom_scm_call(__scm->dev, &desc, &res); + ret = qcom_scm_call(dev, &desc, &res); return ret ? : res.result[0]; } @@ -888,6 +931,9 @@ static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, if (idx != 0) return -EINVAL; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return __qcom_scm_pas_mss_reset(__scm->dev, 1); } @@ -897,6 +943,9 @@ static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, if (idx != 0) return -EINVAL; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return __qcom_scm_pas_mss_reset(__scm->dev, 0); } @@ -915,7 +964,10 @@ int qcom_scm_get_sec_dump_state(u32 *dump_state) }; struct qcom_scm_res res; - ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res); + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + ret = qcom_scm_call(__scm->dev, &desc, &res); if (dump_state) *dump_state = res.result[0]; @@ -936,6 +988,9 @@ int qcom_scm_assign_dump_table_region(bool is_assign, phys_addr_t addr, size_t s .args[2] = size, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_assign_dump_table_region); @@ -953,6 +1008,9 @@ int qcom_scm_tz_blsp_modify_owner(int food, u64 subsystem, int *out) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (out) @@ -974,6 +1032,8 @@ int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) struct qcom_scm_res res; int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); if (ret >= 0) @@ -994,6 +1054,9 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) .owner = ARM_SMCCC_OWNER_SIP, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_io_writel); @@ -1010,12 +1073,18 @@ int qcom_scm_io_reset(void) .arginfo = QCOM_SCM_ARGS(2), }; - return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_io_reset); bool qcom_scm_is_secure_wdog_trigger_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_SEC_WDOG_TRIGGER); } @@ -1023,6 +1092,9 @@ EXPORT_SYMBOL(qcom_scm_is_secure_wdog_trigger_available); bool qcom_scm_is_mode_switch_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_SWITCH_MODE); } @@ -1040,6 +1112,9 @@ int __qcom_scm_get_feat_version(struct device *dev, u64 feat_id, u64 *version) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (version) @@ -1050,7 +1125,10 @@ int __qcom_scm_get_feat_version(struct device *dev, u64 feat_id, u64 *version) int qcom_scm_get_jtag_etm_feat_id(u64 *version) { - return __qcom_scm_get_feat_version(__scm ? __scm->dev : NULL, + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return __qcom_scm_get_feat_version(__scm->dev, QCOM_SCM_TZ_DBG_ETM_FEAT_ID, version); } EXPORT_SYMBOL(qcom_scm_get_jtag_etm_feat_id); @@ -1074,6 +1152,9 @@ void qcom_scm_halt_spmi_pmic_arbiter(void) .arginfo = QCOM_SCM_ARGS(1), }; + if (SCM_NOT_INITIALIZED()) + return; + ret = qcom_scm_call_atomic(__scm->dev, &desc, NULL); if (ret) pr_debug("Failed to halt_spmi_pmic_arbiter=0x%x\n", ret); @@ -1098,10 +1179,8 @@ void qcom_scm_deassert_ps_hold(void) .arginfo = QCOM_SCM_ARGS(1), }; - if (!__scm) { - pr_err("No scm device available\n"); + if (SCM_NOT_INITIALIZED()) return; - } ret = qcom_scm_call_atomic(__scm->dev, &desc, NULL); if (ret) @@ -1184,20 +1263,29 @@ int qcom_scm_paravirt_smmu_attach(u64 sid, u64 asid, u64 ste_pa, u64 ste_size, u64 cd_pa, u64 cd_size) { - return __qcom_scm_paravirt_smmu_attach(__scm ? __scm->dev : NULL, sid, asid, + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return __qcom_scm_paravirt_smmu_attach(__scm->dev, sid, asid, ste_pa, ste_size, cd_pa, cd_size); } EXPORT_SYMBOL_GPL(qcom_scm_paravirt_smmu_attach); int qcom_scm_paravirt_tlb_inv(u64 asid, u64 sid) { - return __qcom_scm_paravirt_tlb_inv(__scm ? __scm->dev : NULL, asid, sid); + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return __qcom_scm_paravirt_tlb_inv(__scm->dev, asid, sid); } EXPORT_SYMBOL_GPL(qcom_scm_paravirt_tlb_inv); int qcom_scm_paravirt_smmu_detach(u64 sid) { - return __qcom_scm_paravirt_smmu_detach(__scm ? __scm->dev : NULL, sid); + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return __qcom_scm_paravirt_smmu_detach(__scm->dev, sid); } EXPORT_SYMBOL_GPL(qcom_scm_paravirt_smmu_detach); @@ -1212,10 +1300,8 @@ void qcom_scm_mmu_sync(bool sync) .arginfo = QCOM_SCM_ARGS(1), }; - if (!__scm) { - pr_err("No scm device available\n"); + if (SCM_NOT_INITIALIZED()) return; - } ret = qcom_scm_call_atomic(__scm->dev, &desc, NULL); @@ -1232,6 +1318,9 @@ EXPORT_SYMBOL(qcom_scm_mmu_sync); */ bool qcom_scm_restore_sec_cfg_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, QCOM_SCM_MP_RESTORE_SEC_CFG); } @@ -1250,6 +1339,9 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) struct qcom_scm_res res; int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1268,6 +1360,9 @@ int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) struct qcom_scm_res res; int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (size) @@ -1291,6 +1386,9 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) }; int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, NULL); /* the pg table has been initialized already, ignore the error */ @@ -1319,6 +1417,9 @@ int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1336,6 +1437,9 @@ int qcom_scm_mem_protect_region_id(phys_addr_t paddr, size_t size) .arginfo = QCOM_SCM_ARGS(2), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_mem_protect_region_id); @@ -1359,6 +1463,9 @@ int qcom_scm_mem_protect_lock_id2_flat(phys_addr_t list_addr, QCOM_SCM_VAL, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_mem_protect_lock_id2_flat); @@ -1387,6 +1494,9 @@ int qcom_scm_iommu_secure_map(phys_addr_t sg_list_addr, size_t num_sg, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1410,6 +1520,9 @@ int qcom_scm_iommu_secure_unmap(u64 sec_id, int cbndx, unsigned long iova, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1475,6 +1588,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, int ret, i, b; u64 srcvm_bits = *srcvm; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + src_sz = hweight64(srcvm_bits) * sizeof(*src); mem_to_map_sz = sizeof(*mem_to_map); dest_sz = dest_cnt * sizeof(*destvm); @@ -1551,7 +1667,10 @@ int qcom_scm_assign_mem_regions(struct qcom_scm_mem_map_info *mem_regions, struct qcom_scm_current_perm_info *newvms, size_t newvms_sz) { - return __qcom_scm_assign_mem(__scm ? __scm->dev : NULL, + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return __qcom_scm_assign_mem(__scm->dev, virt_to_phys(mem_regions), mem_regions_sz, virt_to_phys(srcvms), src_sz, virt_to_phys(newvms), newvms_sz); @@ -1579,6 +1698,9 @@ int qcom_scm_mem_protect_sd_ctrl(u32 devid, phys_addr_t mem_addr, u64 mem_size, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1589,6 +1711,9 @@ bool qcom_scm_kgsl_set_smmu_aperture_available(void) { int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, QCOM_SCM_MP_CP_SMMU_APERTURE_ID); @@ -1611,6 +1736,9 @@ int qcom_scm_kgsl_set_smmu_aperture(unsigned int num_context_bank) .arginfo = QCOM_SCM_ARGS(4), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_kgsl_set_smmu_aperture); @@ -1630,6 +1758,9 @@ int qcom_scm_kgsl_set_smmu_lpac_aperture(unsigned int num_context_bank) .arginfo = QCOM_SCM_ARGS(4), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_kgsl_set_smmu_lpac_aperture); @@ -1644,6 +1775,9 @@ int qcom_scm_kgsl_init_regs(u32 gpu_req) .arginfo = QCOM_SCM_ARGS(1), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_GPU, QCOM_SCM_SVC_GPU_INIT_REGS)) return -EOPNOTSUPP; @@ -1662,6 +1796,9 @@ int qcom_scm_enable_shm_bridge(void) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1678,7 +1815,10 @@ int qcom_scm_delete_shm_bridge(u64 handle) .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), }; - return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL); + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_delete_shm_bridge); @@ -1700,6 +1840,9 @@ int qcom_scm_create_shm_bridge(u64 pfn_and_ns_perm_flags, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (handle) @@ -1722,6 +1865,9 @@ int qcom_scm_smmu_prepare_atos_id(u64 dev_id, int cb_num, int operation) QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_smmu_prepare_atos_id); @@ -1746,6 +1892,9 @@ int qcom_mdf_assign_memory_to_subsys(u64 start_addr, u64 end_addr, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1757,7 +1906,12 @@ EXPORT_SYMBOL(qcom_mdf_assign_memory_to_subsys); */ bool qcom_scm_dcvs_core_available(void) { - struct device *dev = __scm ? __scm->dev : NULL; + struct device *dev = NULL; + + if (SCM_NOT_INITIALIZED()) + return false; + + dev = __scm->dev; return __qcom_scm_is_call_available(dev, QCOM_SCM_SVC_DCVS, QCOM_SCM_DCVS_INIT) && @@ -1774,7 +1928,12 @@ EXPORT_SYMBOL(qcom_scm_dcvs_core_available); */ bool qcom_scm_dcvs_ca_available(void) { - struct device *dev = __scm ? __scm->dev : NULL; + struct device *dev = NULL; + + if (SCM_NOT_INITIALIZED()) + return false; + + dev = __scm->dev; return __qcom_scm_is_call_available(dev, QCOM_SCM_SVC_DCVS, QCOM_SCM_DCVS_INIT_CA_V2) && @@ -1794,7 +1953,10 @@ int qcom_scm_dcvs_reset(void) .owner = ARM_SMCCC_OWNER_SIP }; - return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL); + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_dcvs_reset); @@ -1811,6 +1973,9 @@ int qcom_scm_dcvs_init_v2(phys_addr_t addr, size_t size, int *version) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (ret >= 0) @@ -1830,6 +1995,9 @@ int qcom_scm_dcvs_init_ca_v2(phys_addr_t addr, size_t size) .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_dcvs_init_ca_v2); @@ -1848,6 +2016,9 @@ int qcom_scm_dcvs_update(int level, s64 total_time, s64 busy_time) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1868,6 +2039,9 @@ int qcom_scm_dcvs_update_v2(int level, s64 total_time, s64 busy_time) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1890,6 +2064,9 @@ int qcom_scm_dcvs_update_ca_v2(int level, s64 total_time, s64 busy_time, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -1898,6 +2075,9 @@ EXPORT_SYMBOL(qcom_scm_dcvs_update_ca_v2); int qcom_scm_get_feat_version_cp(u64 *version) { + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return __qcom_scm_get_feat_version(__scm->dev, QCOM_SCM_MP_CP_FEAT_ID, version); } @@ -1908,6 +2088,9 @@ EXPORT_SYMBOL(qcom_scm_get_feat_version_cp); */ bool qcom_scm_ocmem_lock_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, QCOM_SCM_OCMEM_LOCK_CMD); } @@ -1935,6 +2118,9 @@ int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, .arginfo = QCOM_SCM_ARGS(4), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_ocmem_lock); @@ -1958,6 +2144,9 @@ int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) .arginfo = QCOM_SCM_ARGS(3), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_ocmem_unlock); @@ -1970,6 +2159,9 @@ EXPORT_SYMBOL(qcom_scm_ocmem_unlock); */ bool qcom_scm_ice_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, QCOM_SCM_ES_INVALIDATE_ICE_KEY) && __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, @@ -1999,6 +2191,9 @@ int qcom_scm_ice_invalidate_key(u32 index) .owner = ARM_SMCCC_OWNER_SIP, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_ice_invalidate_key); @@ -2043,6 +2238,9 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, dma_addr_t key_phys; int ret; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + /* * 'key' may point to vmalloc()'ed memory, but we need to pass a * physical address that's been properly flushed. The sanctioned way to @@ -2088,6 +2286,9 @@ int qcom_scm_config_set_ice_key(uint32_t index, phys_addr_t paddr, size_t size, QCOM_SCM_VAL, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_config_set_ice_key); @@ -2103,6 +2304,9 @@ int qcom_scm_clear_ice_key(uint32_t index, unsigned int ce) .arginfo = QCOM_SCM_ARGS(2), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_clear_ice_key); @@ -2116,6 +2320,9 @@ int qcom_scm_derive_raw_secret(phys_addr_t paddr_key, size_t size_key, .owner = ARM_SMCCC_OWNER_SIP }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + desc.args[0] = paddr_key; desc.args[1] = size_key; desc.args[2] = paddr_secret; @@ -2134,10 +2341,14 @@ EXPORT_SYMBOL(qcom_scm_derive_raw_secret); bool qcom_scm_hdcp_available(void) { bool avail; - int ret = qcom_scm_clk_enable(); + int ret; + if (SCM_NOT_INITIALIZED()) + return false; + + ret = qcom_scm_clk_enable(); if (ret) - return ret; + return false; avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_HDCP_INVOKE); @@ -2179,6 +2390,9 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) return -ERANGE; @@ -2197,6 +2411,9 @@ EXPORT_SYMBOL(qcom_scm_hdcp_req); bool qcom_scm_is_lmh_debug_set_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_DEBUG_SET); } @@ -2204,6 +2421,9 @@ EXPORT_SYMBOL(qcom_scm_is_lmh_debug_set_available); bool qcom_scm_is_lmh_debug_read_buf_size_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_DEBUG_READ_BUF_SIZE); } @@ -2211,6 +2431,9 @@ EXPORT_SYMBOL(qcom_scm_is_lmh_debug_read_buf_size_available); bool qcom_scm_is_lmh_debug_read_buf_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_DEBUG_READ); } @@ -2218,6 +2441,9 @@ EXPORT_SYMBOL(qcom_scm_is_lmh_debug_read_buf_available); bool qcom_scm_is_lmh_debug_get_type_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_DEBUG_GET_TYPE); } @@ -2233,6 +2459,9 @@ int qcom_scm_lmh_read_buf_size(int *size) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (size) @@ -2259,6 +2488,9 @@ int qcom_scm_lmh_limit_dcvsh(phys_addr_t payload, uint32_t payload_size, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_lmh_limit_dcvsh); @@ -2276,6 +2508,9 @@ int qcom_scm_lmh_debug_read(phys_addr_t payload, uint32_t size) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -2300,6 +2535,9 @@ int __qcom_scm_lmh_debug_config_write(struct device *dev, u64 cmd_id, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + if (buf_size < 3) return -EINVAL; @@ -2309,6 +2547,9 @@ int __qcom_scm_lmh_debug_config_write(struct device *dev, u64 cmd_id, int qcom_scm_lmh_debug_set_config_write(phys_addr_t payload, int payload_size, uint32_t *buf, int buf_size) { + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return __qcom_scm_lmh_debug_config_write(__scm->dev, QCOM_SCM_LMH_DEBUG_SET, payload, payload_size, buf, buf_size); @@ -2332,6 +2573,9 @@ int qcom_scm_lmh_get_type(phys_addr_t payload, u64 payload_size, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (size) @@ -2355,6 +2599,9 @@ int qcom_scm_lmh_fetch_data(u32 node_id, u32 debug_type, uint32_t *peak, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_DEBUG_FETCH_DATA); if (ret <= 0) @@ -2384,6 +2631,9 @@ int qcom_scm_smmu_change_pgtbl_format(u64 dev_id, int cbndx) QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_smmu_change_pgtbl_format); @@ -2399,6 +2649,8 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en) .owner = ARM_SMCCC_OWNER_SIP, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } @@ -2415,6 +2667,9 @@ int qcom_scm_smmu_notify_secure_lut(u64 dev_id, bool secure) .arginfo = QCOM_SCM_ARGS(2), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_smmu_notify_secure_lut); @@ -2432,6 +2687,9 @@ int qcom_scm_qdss_invoke(phys_addr_t paddr, size_t size, u64 *out) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (out) @@ -2452,6 +2710,9 @@ int qcom_scm_camera_protect_all(uint32_t protect, uint32_t param) .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_camera_protect_all); @@ -2467,6 +2728,9 @@ int qcom_scm_camera_protect_phy_lanes(bool protect, u64 regmask) .arginfo = QCOM_SCM_ARGS(2), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_camera_protect_phy_lanes); @@ -2488,6 +2752,9 @@ int qcom_scm_camera_update_camnoc_qos(uint32_t use_case_id, .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + if ((cam_qos_cnt > QCOM_SCM_CAMERA_MAX_QOS_CNT) || (cam_qos_cnt && !cam_qos)) { pr_err("Invalid input SmartQoS count: %d\n", cam_qos_cnt); return -EINVAL; @@ -2526,6 +2793,9 @@ int qcom_scm_tsens_reinit(int *tsens_ret) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (tsens_ret) *tsens_ret = res.result[0]; @@ -2553,12 +2823,18 @@ int qcom_scm_ice_restore_cfg(void) .owner = ARM_SMCCC_OWNER_TRUSTED_OS }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_ice_restore_cfg); bool qcom_scm_lmh_dcvsh_available(void) { + if (SCM_NOT_INITIALIZED()) + return false; + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); } EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available); @@ -2573,6 +2849,9 @@ int qcom_scm_lmh_profile_change(u32 profile_id) .owner = ARM_SMCCC_OWNER_SIP, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_lmh_profile_change); @@ -2596,6 +2875,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, .owner = ARM_SMCCC_OWNER_SIP, }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); if (!payload_buf) return -ENOMEM; @@ -2625,12 +2907,18 @@ int qcom_scm_prefetch_tgt_ctrl(bool en) .arginfo = QCOM_SCM_ARGS(1), }; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } EXPORT_SYMBOL(qcom_scm_prefetch_tgt_ctrl); int qcom_scm_get_tz_log_feat_id(u64 *version) { + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return __qcom_scm_get_feat_version(__scm->dev, QCOM_SCM_FEAT_LOG_ID, version); } @@ -2638,6 +2926,9 @@ EXPORT_SYMBOL(qcom_scm_get_tz_log_feat_id); int qcom_scm_get_tz_feat_id_version(u64 feat_id, u64 *version) { + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + return __qcom_scm_get_feat_version(__scm->dev, feat_id, version); } @@ -2656,6 +2947,9 @@ int qcom_scm_register_qsee_log_buf(phys_addr_t buf, size_t len) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); return ret ? : res.result[0]; @@ -2672,6 +2966,9 @@ int qcom_scm_query_encrypted_log_feature(u64 *enabled) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call(__scm->dev, &desc, &res); if (enabled) *enabled = res.result[0]; @@ -2697,6 +2994,9 @@ int qcom_scm_request_encrypted_log(phys_addr_t buf, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + if (is_full_tz_logs_supported) { if (is_full_tz_logs_enabled) { /* requesting full logs */ @@ -2735,6 +3035,9 @@ int qcom_scm_invoke_smc_legacy(phys_addr_t in_buf, size_t in_buf_size, struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call_noretry(__scm->dev, &desc, &res); if (result) @@ -2769,6 +3072,9 @@ int qcom_scm_invoke_smc(phys_addr_t in_buf, size_t in_buf_size, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call_noretry(__scm->dev, &desc, &res); if (result) @@ -2800,6 +3106,9 @@ int qcom_scm_invoke_callback_response(phys_addr_t out_buf, }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + ret = qcom_scm_call_noretry(__scm->dev, &desc, &res); if (result) @@ -2818,7 +3127,7 @@ EXPORT_SYMBOL(qcom_scm_invoke_callback_response); int qcom_scm_qseecom_call(u32 cmd_id, struct qseecom_scm_desc *desc, bool retry) { int ret; - struct device *dev = __scm ? __scm->dev : NULL; + struct device *dev = NULL; struct qcom_scm_desc _desc = { .svc = (cmd_id & 0xff00) >> 8, .cmd = (cmd_id & 0xff), @@ -2837,6 +3146,11 @@ int qcom_scm_qseecom_call(u32 cmd_id, struct qseecom_scm_desc *desc, bool retry) }; struct qcom_scm_res res; + if (SCM_NOT_INITIALIZED()) + return -ENODEV; + + dev = __scm->dev; + if (retry) ret = qcom_scm_call(dev, &_desc, &res); else @@ -2914,7 +3228,7 @@ static int qcom_scm_query_wq_queue_info(struct qcom_scm *scm) struct qcom_scm_res res; scm->waitq.wq_feature = QCOM_SCM_SINGLE_SMC_ALLOW; - ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); + ret = qcom_scm_call_atomic(scm->dev, &desc, &res); if (ret) { pr_err("%s: Failed to get wq queue info: %d\n", __func__, ret); return ret; @@ -2934,6 +3248,9 @@ bool qcom_scm_multi_call_allow(struct device *dev, bool multicall_allowed) { struct qcom_scm *scm; + if (!dev) + return false; + scm = dev_get_drvdata(dev); if (multicall_allowed && scm->waitq.wq_feature == QCOM_SCM_MULTI_SMC_WHITE_LIST_ALLOW) @@ -3031,42 +3348,42 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *p) return IRQ_HANDLED; } -static int __qcom_multi_smc_init(struct qcom_scm *__scm, +static int __qcom_multi_smc_init(struct qcom_scm *scm, struct platform_device *pdev) { int ret = 0, irq; - spin_lock_init(&__scm->waitq.idr_lock); - idr_init(&__scm->waitq.idr); - if (of_device_is_compatible(__scm->dev->of_node, "qcom,scm-v1.1")) { - INIT_WORK(&__scm->waitq.scm_irq_work, scm_irq_work); + spin_lock_init(&scm->waitq.idr_lock); + idr_init(&scm->waitq.idr); + if (of_device_is_compatible(scm->dev->of_node, "qcom,scm-v1.1")) { + INIT_WORK(&scm->waitq.scm_irq_work, scm_irq_work); irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(__scm->dev, "WQ IRQ is not specified: %d\n", irq); + dev_err(scm->dev, "WQ IRQ is not specified: %d\n", irq); return irq; } - ret = devm_request_irq(__scm->dev, irq, + ret = devm_request_irq(scm->dev, irq, qcom_scm_irq_handler, - IRQF_ONESHOT, "qcom-scm", __scm); + IRQF_ONESHOT, "qcom-scm", scm); if (ret < 0) { - dev_err(__scm->dev, "Failed to request qcom-scm irq: %d\n", ret); + dev_err(scm->dev, "Failed to request qcom-scm irq: %d\n", ret); return ret; } /* Return success if "no-multi-smc-support" property is present */ - if (of_property_read_bool(__scm->dev->of_node, + if (of_property_read_bool(scm->dev->of_node, "qcom,no-multi-smc-support")) { - dev_info(__scm->dev, "Multi smc is not supported\n"); + dev_info(scm->dev, "Multi smc is not supported\n"); return 0; } /* Detect Multi SMC support present or not */ - ret = qcom_scm_query_wq_queue_info(__scm); + ret = qcom_scm_query_wq_queue_info(scm); if (!ret) sema_init(&qcom_scm_sem_lock, - (int)__scm->waitq.call_ctx_cnt); + (int)scm->waitq.call_ctx_cnt); } return ret; @@ -3101,10 +3418,8 @@ int scm_mem_protection_init_do(void) struct qcom_scm_res res; - if (!__scm) { - pr_err("SCM dev is not initialized\n"); - return -1; - } + if (SCM_NOT_INITIALIZED()) + return -ENODEV; /* * Fetching offset of PID and task_name from task_struct. @@ -3121,7 +3436,7 @@ int scm_mem_protection_init_do(void) desc.args[0] = pid_offset, desc.args[1] = task_name_offset, - ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res); + ret = qcom_scm_call(__scm->dev, &desc, &res); resp = res.result[0]; pr_debug("SCM call values: ret %d, resp %d\n", From 8a01c3ed0fc0f8e3a47b1ccd0a831740810cf4b8 Mon Sep 17 00:00:00 2001 From: yingdeng Date: Fri, 9 Aug 2024 16:59:09 +0800 Subject: [PATCH 037/117] coresight: tmc: etf: Free its buffer and set NULL when etf can't be enabled In function tmc_enable_etf_sink_sysfs, the local variable buf was allocated and assigned to drvdata->buffer, and buf will be freed because etf can't be enabled, but drvdata->buffer still point to the address. So in function tmc_read_unprepare_etb, beacause drvdata->mode is CS_MODE_DISABLED, local variable buf was assigned by drvdata->buf and free again. Change-Id: I7d25e7db0a983fc134e8d8dfb954936233f0d167 Signed-off-by: yingdeng --- drivers/hwtracing/coresight/coresight-tmc-etf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 286e08135e15..a093e7687253 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -247,7 +247,8 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) atomic_inc(csdev->refcnt); } else { /* Free up the buffer if we failed to enable */ - used = false; + kfree(drvdata->buf); + drvdata->buf = NULL; } out: spin_unlock_irqrestore(&drvdata->spinlock, flags); From ebd0bcb25c02b40fa5be8defac43ef547d4f2afb Mon Sep 17 00:00:00 2001 From: Wasim Nazir Date: Mon, 12 Aug 2024 11:57:59 +0530 Subject: [PATCH 038/117] soc: qcom: sysmon: Add null check for add_delta_time sysmon_smem_power_stats_extended pointer will be NULL in case of invalid dsp_id. Add NULL check for invalid dsp_id cases. Change-Id: If4fe03051ff6388307ff137b55d4e15b2e0f88f1 Signed-off-by: Wasim Nazir --- drivers/soc/qcom/sysmon_subsystem_stats.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/soc/qcom/sysmon_subsystem_stats.c b/drivers/soc/qcom/sysmon_subsystem_stats.c index a6c903b2bd93..4ebef13bc22f 100644 --- a/drivers/soc/qcom/sysmon_subsystem_stats.c +++ b/drivers/soc/qcom/sysmon_subsystem_stats.c @@ -193,6 +193,9 @@ static int add_delta_time( ptr = g_sysmon_stats.sysmon_power_stats_slpi; } + if (ptr == NULL) + return -EINVAL; + if (ver >= 2) { powerstats_ticks = (u64)(((u64)ptr->last_update_time_powerstats_msb << 32) | ptr->last_update_time_powerstats_lsb); From ff2b9fdf51007bff5c614a14f501d8594ad01b7b Mon Sep 17 00:00:00 2001 From: Shashank Shekhar Date: Thu, 8 Aug 2024 00:53:15 +0530 Subject: [PATCH 039/117] net: stmmac: Enable Deep sleep support Add support for Deep sleep in ethernet driver. Change-Id: I6255580b909088c0908674e446ca9d7599051498 Signed-off-by: Shashank Shekhar --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 49 ++++++++++++++++--- .../stmicro/stmmac/dwmac-qcom-serdes.c | 2 +- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 23ae369487a3..14b98ce9db25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -19,7 +19,7 @@ #include #include #include - +#include #include #include #include @@ -171,6 +171,8 @@ void *ipc_emac_log_ctxt; struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0}; +static int qcom_ethqos_hib_restore(struct device *dev); +static int qcom_ethqos_hib_freeze(struct device *dev); struct plat_stmmacenet_data *plat_dat; struct qcom_ethqos *pethqos; @@ -2456,6 +2458,9 @@ static int qcom_ethqos_suspend(struct device *dev) return 0; } + if (pm_suspend_target_state == PM_SUSPEND_MEM) + return qcom_ethqos_hib_freeze(dev); + ethqos = get_stmmac_bsp_priv(dev); if (!ethqos) return -ENODEV; @@ -2491,6 +2496,9 @@ static int qcom_ethqos_resume(struct device *dev) if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded")) return 0; + if (pm_suspend_target_state == PM_SUSPEND_MEM) + return qcom_ethqos_hib_restore(dev); + ethqos = get_stmmac_bsp_priv(dev); if (!ethqos) @@ -2567,8 +2575,32 @@ static int qcom_ethqos_enable_clks(struct qcom_ethqos *ethqos, struct device *de goto error_rgmii_get; } } + ethqos->sgmiref_clk = devm_clk_get(dev, "sgmi_ref"); + if (IS_ERR(ethqos->sgmiref_clk)) { + dev_warn(dev, "Failed sgmi_ref\n"); + ret = PTR_ERR(ethqos->sgmiref_clk); + goto error_sgmi_ref; + } else { + ret = clk_prepare_enable(ethqos->sgmiref_clk); + if (ret) + goto error_sgmi_ref; + } + ethqos->phyaux_clk = devm_clk_get(dev, "phyaux"); + if (IS_ERR(ethqos->phyaux_clk)) { + dev_warn(dev, "Failed phyaux\n"); + ret = PTR_ERR(ethqos->phyaux_clk); + goto error_phyaux_ref; + } else { + ret = clk_prepare_enable(ethqos->phyaux_clk); + if (ret) + goto error_phyaux_ref; + } return 0; +error_phyaux_ref: + clk_disable_unprepare(ethqos->sgmiref_clk); +error_sgmi_ref: + clk_disable_unprepare(ethqos->rgmii_clk); error_rgmii_get: clk_disable_unprepare(priv->plat->pclk); error_pclk_get: @@ -2591,6 +2623,12 @@ static void qcom_ethqos_disable_clks(struct qcom_ethqos *ethqos, struct device * if (ethqos->rgmii_clk) clk_disable_unprepare(ethqos->rgmii_clk); + if (priv->plat->has_gmac4 && ethqos->phyaux_clk) + clk_disable_unprepare(ethqos->phyaux_clk); + + if (priv->plat->has_gmac4 && ethqos->sgmiref_clk) + clk_disable_unprepare(ethqos->sgmiref_clk); + ETHQOSINFO("Exit\n"); } @@ -2622,7 +2660,7 @@ static int qcom_ethqos_hib_restore(struct device *dev) ret = ethqos_init_gpio(ethqos); if (ret) - return ret; + ETHQOSINFO("GPIO init failed\n"); ret = qcom_ethqos_enable_clks(ethqos, dev); if (ret) @@ -2652,11 +2690,6 @@ static int qcom_ethqos_hib_restore(struct device *dev) #endif /* end of DWC_ETH_QOS_CONFIG_PTP */ /* issue software reset to device */ - ret = stmmac_reset(priv, priv->ioaddr); - if (ret) { - dev_err(priv->device, "Failed to reset\n"); - return ret; - } if (!netif_running(ndev)) { rtnl_lock(); @@ -2710,6 +2743,8 @@ static int qcom_ethqos_hib_freeze(struct device *dev) ethqos_free_gpios(ethqos); + ethqos->curr_serdes_speed = 0; + ETHQOSINFO("end\n"); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-serdes.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-serdes.c index 34862beaa850..8e618937d80a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-serdes.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-serdes.c @@ -1188,7 +1188,7 @@ static int qcom_ethqos_serdes_update_sgmii(struct qcom_ethqos *ethqos, switch (speed) { case SPEED_1000: - if (ethqos->curr_serdes_speed == SPEED_2500) + if (ethqos->curr_serdes_speed != SPEED_1000) ret = qcom_ethqos_serdes_sgmii_1Gb(ethqos); ethqos->curr_serdes_speed = SPEED_1000; From a94a1330a160465b19f316b168980b2d8658f25f Mon Sep 17 00:00:00 2001 From: Srinath Pandey Date: Sun, 11 Aug 2024 16:38:44 +0530 Subject: [PATCH 040/117] net: stmmac: Add support for driver remove operation Add driver remove feature for making rmmod dwmac-qcom-eth successful. Change-Id: If6334979a1f603fdd6d99a8d189ed84aa3a3afc7 Signed-off-by: Srinath Pandey --- .../ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 23ae369487a3..3aeab5b440a5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -2422,6 +2422,11 @@ static int qcom_ethqos_remove(struct platform_device *pdev) int ret; struct stmmac_priv *priv; + if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded")) { + of_platform_depopulate(&pdev->dev); + return 0; + } + ethqos = get_stmmac_bsp_priv(&pdev->dev); if (!ethqos) return -ENODEV; @@ -2430,6 +2435,13 @@ static int qcom_ethqos_remove(struct platform_device *pdev) ret = stmmac_pltfr_remove(pdev); + if (ethqos->rgmii_clk) + clk_disable_unprepare(ethqos->rgmii_clk); + if (priv->plat->has_gmac4 && ethqos->phyaux_clk) + clk_disable_unprepare(ethqos->phyaux_clk); + if (priv->plat->has_gmac4 && ethqos->sgmiref_clk) + clk_disable_unprepare(ethqos->sgmiref_clk); + if (priv->plat->phy_intr_en_extn_stm) free_irq(ethqos->phy_intr, ethqos); priv->phy_irq_enabled = false; @@ -2441,6 +2453,8 @@ static int qcom_ethqos_remove(struct platform_device *pdev) ethqos_disable_regulators(ethqos); ethqos_clks_config(ethqos, false); + platform_set_drvdata(pdev, NULL); + of_platform_depopulate(&pdev->dev); return ret; } From 34b67414cf15dcaf0c351aa62d0e4fd644ba328a Mon Sep 17 00:00:00 2001 From: Kamati Srinivas Date: Tue, 6 Aug 2024 21:09:56 +0530 Subject: [PATCH 041/117] soccp: pas: vote for interconnects in D transition Vote for interconnect when in D0, unvote when in D3. Change-Id: I52f6170b1651b3d6bf508482d4ab35165fd395ca Signed-off-by: Kamati Srinivas --- drivers/remoteproc/qcom_q6v5_pas.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 97ca79aa115d..e54b9558b49a 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -957,6 +957,12 @@ int rproc_set_state(struct rproc *rproc, bool state) goto soccp_out; } + ret = do_bus_scaling(adsp, true); + if (ret) { + dev_err(adsp->q6v5.dev, "failed to set bandwidth request\n"); + goto soccp_out; + } + ret = clk_prepare_enable(adsp->xo); if (ret) { dev_err(adsp->dev, "failed to enable clks\n"); @@ -1014,6 +1020,12 @@ int rproc_set_state(struct rproc *rproc, bool state) } disable_regulators(adsp); clk_disable_unprepare(adsp->xo); + ret = do_bus_scaling(adsp, false); + if (ret < 0) { + dev_err(adsp->q6v5.dev, "failed to set bandwidth request\n"); + goto soccp_out; + } + adsp->current_users = 0; } } From 085040c09ff0d028b2128879e1332b9ffcc7e22a Mon Sep 17 00:00:00 2001 From: Odelu Kukatla Date: Mon, 15 Jul 2024 10:58:44 +0530 Subject: [PATCH 042/117] icc: dt-bindings: Add snapshot of endpoint IDs for interconnects for Neo Add master and slave ID constants for all Qualcomm Technologies, Inc. Neo interconnect providers which consumers can use to set bandwidth constraints and find paths in the NoC (Network-On-Chip) topology. This is a snapshot taken from 5.10 kernel. commit c1275fdbd5e2 ("icc: dt-bindings: add endpoint IDs for interconnects for Neo"). Change-Id: I2c0e62f96ba6288b6903f0dd13a2d68aa7b3ec49 Signed-off-by: Odelu Kukatla Signed-off-by: Chintan Kothari --- include/dt-bindings/interconnect/qcom,neo.h | 136 ++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 include/dt-bindings/interconnect/qcom,neo.h diff --git a/include/dt-bindings/interconnect/qcom,neo.h b/include/dt-bindings/interconnect/qcom,neo.h new file mode 100644 index 000000000000..fc08be406c7c --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,neo.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + * + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_NEO_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_NEO_H + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_APPSS_PROC 2 +#define MASTER_LLCC 3 +#define MASTER_CNOC_LPASS_AG_NOC 4 +#define MASTER_GIC_AHB 5 +#define MASTER_CDSP_NOC_CFG 6 +#define MASTER_QDSS_BAM 7 +#define MASTER_QSPI_0 8 +#define MASTER_QUP_0 9 +#define MASTER_QUP_1 10 +#define MASTER_A2NOC_SNOC 11 +#define MASTER_CAMNOC_HF 12 +#define MASTER_CAMNOC_ICP 13 +#define MASTER_CAMNOC_SF 14 +#define MASTER_CNOC_DATAPATH 15 +#define MASTER_GEM_NOC_CNOC 16 +#define MASTER_GEM_NOC_PCIE_SNOC 17 +#define MASTER_GFX3D 18 +#define MASTER_LPASS_ANOC 19 +#define MASTER_LSR 20 +#define MASTER_MDP 21 +#define MASTER_CNOC_MNOC_CFG 22 +#define MASTER_MNOC_HF_MEM_NOC 23 +#define MASTER_MNOC_SF_MEM_NOC 24 +#define MASTER_COMPUTE_NOC 25 +#define MASTER_ANOC_PCIE_GEM_NOC 26 +#define MASTER_SNOC_CFG 27 +#define MASTER_SNOC_GC_MEM_NOC 28 +#define MASTER_SNOC_SF_MEM_NOC 29 +#define MASTER_VIDEO 30 +#define MASTER_VIDEO_CV_PROC 31 +#define MASTER_VIDEO_PROC 32 +#define MASTER_VIDEO_V_PROC 33 +#define MASTER_QUP_CORE_0 34 +#define MASTER_QUP_CORE_1 35 +#define MASTER_CRYPTO 36 +#define MASTER_LPASS_PROC 37 +#define MASTER_CDSP_PROC 38 +#define MASTER_PIMEM 39 +#define MASTER_WLAN_Q6 40 +#define MASTER_GIC 41 +#define MASTER_PCIE_0 42 +#define MASTER_PCIE_1 43 +#define MASTER_QDSS_DAP 44 +#define MASTER_QDSS_ETR 45 +#define MASTER_QDSS_ETR_1 46 +#define MASTER_SDCC_1 47 +#define MASTER_USB3_0 48 +#define SLAVE_EBI1 512 +#define SLAVE_AHB2PHY_SOUTH 513 +#define SLAVE_AOSS 514 +#define SLAVE_CAMERA_CFG 515 +#define SLAVE_CLK_CTL 516 +#define SLAVE_CDSP_CFG 517 +#define SLAVE_RBCPR_CX_CFG 518 +#define SLAVE_RBCPR_MMCX_CFG 519 +#define SLAVE_RBCPR_MXA_CFG 520 +#define SLAVE_RBCPR_MXC_CFG 521 +#define SLAVE_CPR_NSPCX 522 +#define SLAVE_CRYPTO_0_CFG 523 +#define SLAVE_CX_RDPM 524 +#define SLAVE_DISPLAY_CFG 525 +#define SLAVE_GFX3D_CFG 526 +#define SLAVE_IMEM_CFG 527 +#define SLAVE_IPC_ROUTER_CFG 528 +#define SLAVE_LPASS 529 +#define SLAVE_LPASS_CORE_CFG 530 +#define SLAVE_LPASS_LPI_CFG 531 +#define SLAVE_LPASS_MPU_CFG 532 +#define SLAVE_LPASS_TOP_CFG 533 +#define SLAVE_MX_RDPM 534 +#define SLAVE_PCIE_0_CFG 535 +#define SLAVE_PCIE_1_CFG 536 +#define SLAVE_PDM 537 +#define SLAVE_PIMEM_CFG 538 +#define SLAVE_PRNG 539 +#define SLAVE_QDSS_CFG 540 +#define SLAVE_QSPI_0 541 +#define SLAVE_QUP_0 542 +#define SLAVE_QUP_1 543 +#define SLAVE_SDCC_1 544 +#define SLAVE_TCSR 545 +#define SLAVE_TLMM 546 +#define SLAVE_TME_CFG 547 +#define SLAVE_USB3_0 548 +#define SLAVE_VENUS_CFG 549 +#define SLAVE_VSENSE_CTRL_CFG 550 +#define SLAVE_WLAN_Q6_CFG 551 +#define SLAVE_A2NOC_SNOC 552 +#define SLAVE_DDRSS_CFG 553 +#define SLAVE_GEM_NOC_CNOC 554 +#define SLAVE_SNOC_GEM_NOC_GC 555 +#define SLAVE_SNOC_GEM_NOC_SF 556 +#define SLAVE_LLCC 557 +#define SLAVE_MNOC_HF_MEM_NOC 558 +#define SLAVE_MNOC_SF_MEM_NOC 559 +#define SLAVE_CNOC_MNOC_CFG 560 +#define SLAVE_CDSP_MEM_NOC 561 +#define SLAVE_MEM_NOC_PCIE_SNOC 562 +#define SLAVE_ANOC_PCIE_GEM_NOC 563 +#define SLAVE_SNOC_CFG 564 +#define SLAVE_LPASS_SNOC 565 +#define SLAVE_QUP_CORE_0 566 +#define SLAVE_QUP_CORE_1 567 +#define SLAVE_IMEM 568 +#define SLAVE_PIMEM 569 +#define SLAVE_SERVICE_NSP_NOC 570 +#define SLAVE_SERVICE_CNOC 571 +#define SLAVE_SERVICE_MNOC 572 +#define SLAVE_SERVICES_LPASS_AML_NOC 573 +#define SLAVE_SERVICE_LPASS_AG_NOC 574 +#define SLAVE_SERVICE_SNOC 575 +#define SLAVE_PCIE_0 576 +#define SLAVE_PCIE_1 577 +#define SLAVE_QDSS_STM 578 +#define SLAVE_TCU 579 +#define MASTER_LLCC_DISP 1000 +#define MASTER_MDP_DISP 1001 +#define MASTER_MNOC_HF_MEM_NOC_DISP 1002 +#define MASTER_ANOC_PCIE_GEM_NOC_DISP 1003 +#define SLAVE_EBI1_DISP 1512 +#define SLAVE_LLCC_DISP 1513 +#define SLAVE_MNOC_HF_MEM_NOC_DISP 1514 + +#endif From 3399cbff16a7457ddc61f826866359e56b74ac32 Mon Sep 17 00:00:00 2001 From: Wasim Nazir Date: Mon, 12 Aug 2024 11:04:52 +0530 Subject: [PATCH 043/117] power: reboot-reason: Check nvmen write return value Check return value to get proper error logs. Change-Id: I77b33d2ac618ff029921cd751f2d2dde226b8064 Signed-off-by: Wasim Nazir --- drivers/power/reset/qcom-reboot-reason.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/power/reset/qcom-reboot-reason.c b/drivers/power/reset/qcom-reboot-reason.c index be5a2ed6c4f3..673a043e7f15 100644 --- a/drivers/power/reset/qcom-reboot-reason.c +++ b/drivers/power/reset/qcom-reboot-reason.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ +#define pr_fmt(fmt) "qcom-reboot-reason: %s: " fmt, __func__ #include #include @@ -38,6 +40,7 @@ static struct poweroff_reason reasons[] = { static int qcom_reboot_reason_reboot(struct notifier_block *this, unsigned long event, void *ptr) { + int rc; char *cmd = ptr; struct qcom_reboot_reason *reboot = container_of(this, struct qcom_reboot_reason, reboot_nb); @@ -47,9 +50,11 @@ static int qcom_reboot_reason_reboot(struct notifier_block *this, return NOTIFY_OK; for (reason = reasons; reason->cmd; reason++) { if (!strcmp(cmd, reason->cmd)) { - nvmem_cell_write(reboot->nvmem_cell, + rc = nvmem_cell_write(reboot->nvmem_cell, &reason->pon_reason, sizeof(reason->pon_reason)); + if (rc < 0) + pr_err("PON reason store failed, rc=%d\n", rc); break; } } From 7d4a6cb9fbc766c7656ec146522f44ef31b4f7f2 Mon Sep 17 00:00:00 2001 From: Odelu Kukatla Date: Mon, 15 Jul 2024 11:09:57 +0530 Subject: [PATCH 044/117] interconnect: qcom: Add snapshot of interconnect provider driver for neo Add interconnect providers for config_noc, dc_noc, gem_noc, lpass_ag_noc, mc_virt_noc, mmss_noc, nsp_noc and system_noc. This is interconnect provider driver snapshot from msm-5.10 branch commit 11aa9ca0c2da ("interconnect: qcom: Add interconnect stubs for Neo"). Change-Id: If8ed95f84dc70a7040c0dda28dd8adf3df3678cd Signed-off-by: Odelu Kukatla Signed-off-by: Chintan Kothari --- drivers/interconnect/qcom/Kconfig | 13 + drivers/interconnect/qcom/Makefile | 2 + drivers/interconnect/qcom/neo.c | 2298 ++++++++++++++++++++++++++++ 3 files changed, 2313 insertions(+) create mode 100644 drivers/interconnect/qcom/neo.c diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index 39e3e53db7eb..f920aec7c3f7 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -153,6 +153,19 @@ config INTERCONNECT_QCOM_ANORAK for setting bandwidth between two endpoints (path). It also used to configure NOC QoS settings (Quality of Service). +config INTERCONNECT_QCOM_NEO + tristate "NEO interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_RPMH && QCOM_COMMAND_DB && OF + select INTERCONNECT_QCOM_BCM_VOTER + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_QOS + help + This is a driver for the Qualcomm Technologies, Inc. Network-on-Chip + on Neo-based platforms. Interconnect driver provides interfaces + for setting bandwidth between two endpoints (path). It also used to + configure NOC QoS settings (Quality of Service). + config INTERCONNECT_QCOM_SDX55 tristate "Qualcomm SDX55 interconnect driver" depends on INTERCONNECT_QCOM_RPMH_POSSIBLE diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index 59c1858c1f8b..43e4bc2f56e6 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -26,6 +26,7 @@ qnoc-sm6150-objs := sm6150.o qnoc-lemans-objs := lemans.o qnoc-monaco-auto-objs := monaco_auto.o qnoc-anorak-objs := anorak.o +qnoc-neo-objs := neo.o qnoc-sm8150-objs := sm8150.o qnoc-sm8250-objs := sm8250.o qnoc-sm8350-objs := sm8350.o @@ -77,6 +78,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_HOLI) += qnoc-holi.o obj-$(CONFIG_INTERCONNECT_QCOM_PITTI) += qnoc-pitti.o obj-$(CONFIG_INTERCONNECT_QCOM_MONACO_AUTO) += qnoc-monaco-auto.o obj-$(CONFIG_INTERCONNECT_QCOM_ANORAK) += qnoc-anorak.o +obj-$(CONFIG_INTERCONNECT_QCOM_NEO) += qnoc-neo.o obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o obj-$(CONFIG_INTERCONNECT_QCOM_QOS) += qnoc-qos.o obj-$(CONFIG_INTERCONNECT_QCOM_QOS_RPM) += qnoc-qos-rpm.o diff --git a/drivers/interconnect/qcom/neo.c b/drivers/interconnect/qcom/neo.c new file mode 100644 index 000000000000..fc0acbd6abd3 --- /dev/null +++ b/drivers/interconnect/qcom/neo.c @@ -0,0 +1,2298 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "icc-rpmh.h" +#include "qnoc-qos.h" + +#define MSM_ID_SMEM 137 + +enum { + VOTER_IDX_HLOS, + VOTER_IDX_DISP, +}; + +enum target_msm_id { + NEO_LE = 525, + NEO_LA_V1 = 554, + NEO_LA_V2 = 579, +}; + +static const struct regmap_config icc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, +}; + +static struct qcom_icc_node qup0_core_master = { + .name = "qup0_core_master", + .id = MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_QUP_CORE_0 }, +}; + +static struct qcom_icc_node qup1_core_master = { + .name = "qup1_core_master", + .id = MASTER_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_QUP_CORE_1 }, +}; + +static struct qcom_icc_node qnm_gemnoc_cnoc = { + .name = "qnm_gemnoc_cnoc", + .id = MASTER_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 43, + .links = { SLAVE_AHB2PHY_SOUTH, SLAVE_AOSS, + SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, + SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, + SLAVE_RBCPR_MMCX_CFG, SLAVE_RBCPR_MXA_CFG, + SLAVE_RBCPR_MXC_CFG, SLAVE_CPR_NSPCX, + SLAVE_CRYPTO_0_CFG, SLAVE_CX_RDPM, + SLAVE_DISPLAY_CFG, SLAVE_GFX3D_CFG, + SLAVE_IMEM_CFG, SLAVE_IPC_ROUTER_CFG, + SLAVE_LPASS, SLAVE_MX_RDPM, + SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, + SLAVE_PDM, SLAVE_PIMEM_CFG, + SLAVE_PRNG, SLAVE_QDSS_CFG, + SLAVE_QSPI_0, SLAVE_QUP_0, + SLAVE_QUP_1, SLAVE_SDCC_1, + SLAVE_TCSR, SLAVE_TLMM, + SLAVE_TME_CFG, SLAVE_USB3_0, + SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, + SLAVE_WLAN_Q6_CFG, SLAVE_DDRSS_CFG, + SLAVE_CNOC_MNOC_CFG, SLAVE_SNOC_CFG, + SLAVE_IMEM, SLAVE_PIMEM, + SLAVE_SERVICE_CNOC, SLAVE_QDSS_STM, + SLAVE_TCU }, +}; + +static struct qcom_icc_node qnm_gemnoc_pcie = { + .name = "qnm_gemnoc_pcie", + .id = MASTER_GEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 2, + .links = { SLAVE_PCIE_0, SLAVE_PCIE_1 }, +}; + +static struct qcom_icc_node xm_qdss_dap = { + .name = "xm_qdss_dap", + .id = MASTER_QDSS_DAP, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 43, + .links = { SLAVE_AHB2PHY_SOUTH, SLAVE_AOSS, + SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, + SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, + SLAVE_RBCPR_MMCX_CFG, SLAVE_RBCPR_MXA_CFG, + SLAVE_RBCPR_MXC_CFG, SLAVE_CPR_NSPCX, + SLAVE_CRYPTO_0_CFG, SLAVE_CX_RDPM, + SLAVE_DISPLAY_CFG, SLAVE_GFX3D_CFG, + SLAVE_IMEM_CFG, SLAVE_IPC_ROUTER_CFG, + SLAVE_LPASS, SLAVE_MX_RDPM, + SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, + SLAVE_PDM, SLAVE_PIMEM_CFG, + SLAVE_PRNG, SLAVE_QDSS_CFG, + SLAVE_QSPI_0, SLAVE_QUP_0, + SLAVE_QUP_1, SLAVE_SDCC_1, + SLAVE_TCSR, SLAVE_TLMM, + SLAVE_TME_CFG, SLAVE_USB3_0, + SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, + SLAVE_WLAN_Q6_CFG, SLAVE_DDRSS_CFG, + SLAVE_CNOC_MNOC_CFG, SLAVE_SNOC_CFG, + SLAVE_IMEM, SLAVE_PIMEM, + SLAVE_SERVICE_CNOC, SLAVE_QDSS_STM, + SLAVE_TCU }, +}; + +static struct qcom_icc_qosbox alm_gpu_tcu_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x9e000 }, + .config = &(struct qos_config) { + .prio = 1, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node alm_gpu_tcu = { + .name = "alm_gpu_tcu", + .id = MASTER_GPU_TCU, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &alm_gpu_tcu_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox alm_sys_tcu_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x9f000 }, + .config = &(struct qos_config) { + .prio = 6, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node alm_sys_tcu = { + .name = "alm_sys_tcu", + .id = MASTER_SYS_TCU, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &alm_sys_tcu_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_node chm_apps = { + .name = "chm_apps", + .id = MASTER_APPSS_PROC, + .channels = 1, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 3, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC, + SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_qosbox qnm_gpu_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 2, + .offsets = { 0xe000, 0x4e000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qnm_gpu = { + .name = "qnm_gpu", + .id = MASTER_GFX3D, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_gpu_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox qnm_mnoc_hf_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 2, + .offsets = { 0xf000, 0x4f000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_mnoc_hf = { + .name = "qnm_mnoc_hf", + .id = MASTER_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_mnoc_hf_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox qnm_mnoc_sf_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x9d000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_mnoc_sf = { + .name = "qnm_mnoc_sf", + .id = MASTER_MNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_mnoc_sf_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 2, + .offsets = { 0x10000, 0x50000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qnm_nsp_gemnoc = { + .name = "qnm_nsp_gemnoc", + .id = MASTER_COMPUTE_NOC, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_nsp_gemnoc_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox qnm_pcie_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0xa2000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_pcie = { + .name = "qnm_pcie", + .id = MASTER_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_pcie_qos, + .num_links = 2, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox qnm_snoc_gc_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0xa0000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_snoc_gc = { + .name = "qnm_snoc_gc", + .id = MASTER_SNOC_GC_MEM_NOC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_snoc_gc_qos, + .num_links = 1, + .links = { SLAVE_LLCC }, +}; + +static struct qcom_icc_qosbox qnm_snoc_sf_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0xa1000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_snoc_sf = { + .name = "qnm_snoc_sf", + .id = MASTER_SNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_snoc_sf_qos, + .num_links = 3, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC, + SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qxm_wlan_q6 = { + .name = "qxm_wlan_q6", + .id = MASTER_WLAN_Q6, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 3, + .links = { SLAVE_GEM_NOC_CNOC, SLAVE_LLCC, + SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qhm_config_noc = { + .name = "qhm_config_noc", + .id = MASTER_CNOC_LPASS_AG_NOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 6, + .links = { SLAVE_LPASS_CORE_CFG, SLAVE_LPASS_LPI_CFG, + SLAVE_LPASS_MPU_CFG, SLAVE_LPASS_TOP_CFG, + SLAVE_SERVICES_LPASS_AML_NOC, SLAVE_SERVICE_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qxm_lpass_dsp = { + .name = "qxm_lpass_dsp", + .id = MASTER_LPASS_PROC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 4, + .links = { SLAVE_LPASS_TOP_CFG, SLAVE_LPASS_SNOC, + SLAVE_SERVICES_LPASS_AML_NOC, SLAVE_SERVICE_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node llcc_mc = { + .name = "llcc_mc", + .id = MASTER_LLCC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_EBI1 }, +}; + +static struct qcom_icc_qosbox qnm_camnoc_hf_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1c000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_camnoc_hf = { + .name = "qnm_camnoc_hf", + .id = MASTER_CAMNOC_HF, + .channels = 1, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_camnoc_hf_qos, + .num_links = 1, + .links = { SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_camnoc_icp_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1c080 }, + .config = &(struct qos_config) { + .prio = 4, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_camnoc_icp = { + .name = "qnm_camnoc_icp", + .id = MASTER_CAMNOC_ICP, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_camnoc_icp_qos, + .num_links = 1, + .links = { SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_camnoc_sf_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1c100 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_camnoc_sf = { + .name = "qnm_camnoc_sf", + .id = MASTER_CAMNOC_SF, + .channels = 1, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_camnoc_sf_qos, + .num_links = 1, + .links = { SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_lsr_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 2, + .offsets = { 0x1f000, 0x1f080 }, + .config = &(struct qos_config) { + .prio = 3, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_lsr = { + .name = "qnm_lsr", + .id = MASTER_LSR, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_lsr_qos, + .num_links = 1, + .links = { SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_mdp_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 2, + .offsets = { 0x1d000, 0x1d080 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_mdp = { + .name = "qnm_mdp", + .id = MASTER_MDP, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_mdp_qos, + .num_links = 1, + .links = { SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mnoc_cfg = { + .name = "qnm_mnoc_cfg", + .id = MASTER_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_SERVICE_MNOC }, +}; + +static struct qcom_icc_qosbox qnm_video_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 2, + .offsets = { 0x1e000, 0x1e080 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_video = { + .name = "qnm_video", + .id = MASTER_VIDEO, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_video_qos, + .num_links = 1, + .links = { SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_video_cv_cpu_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1e100 }, + .config = &(struct qos_config) { + .prio = 4, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_video_cv_cpu = { + .name = "qnm_video_cv_cpu", + .id = MASTER_VIDEO_CV_PROC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_video_cv_cpu_qos, + .num_links = 1, + .links = { SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_video_cvp_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1e180 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_video_cvp = { + .name = "qnm_video_cvp", + .id = MASTER_VIDEO_PROC, + .channels = 1, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_video_cvp_qos, + .num_links = 1, + .links = { SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_qosbox qnm_video_v_cpu_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1e200 }, + .config = &(struct qos_config) { + .prio = 4, + .urg_fwd = 1, + }, +}; + +static struct qcom_icc_node qnm_video_v_cpu = { + .name = "qnm_video_v_cpu", + .id = MASTER_VIDEO_V_PROC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_video_v_cpu_qos, + .num_links = 1, + .links = { SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qhm_nsp_noc_config = { + .name = "qhm_nsp_noc_config", + .id = MASTER_CDSP_NOC_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_SERVICE_NSP_NOC }, +}; + +static struct qcom_icc_node qxm_nsp = { + .name = "qxm_nsp", + .id = MASTER_CDSP_PROC, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_CDSP_MEM_NOC }, +}; + +static struct qcom_icc_qosbox xm_pcie3_0_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x9000 }, + .config = &(struct qos_config) { + .prio = 3, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_pcie3_0 = { + .name = "xm_pcie3_0", + .id = MASTER_PCIE_0, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_pcie3_0_qos, + .num_links = 1, + .links = { SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_qosbox xm_pcie3_1_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0xa000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_pcie3_1 = { + .name = "xm_pcie3_1", + .id = MASTER_PCIE_1, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_pcie3_1_qos, + .num_links = 1, + .links = { SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_qosbox qhm_gic_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1d000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qhm_gic = { + .name = "qhm_gic", + .id = MASTER_GIC_AHB, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qhm_gic_qos, + .num_links = 1, + .links = { SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_qosbox qhm_qdss_bam_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x22000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qhm_qdss_bam = { + .name = "qhm_qdss_bam", + .id = MASTER_QDSS_BAM, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qhm_qdss_bam_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox qhm_qspi_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x23000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qhm_qspi = { + .name = "qhm_qspi", + .id = MASTER_QSPI_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qhm_qspi_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox qhm_qup0_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x24000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qhm_qup0 = { + .name = "qhm_qup0", + .id = MASTER_QUP_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qhm_qup0_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox qhm_qup1_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x25000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qhm_qup1 = { + .name = "qhm_qup1", + .id = MASTER_QUP_1, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qhm_qup1_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_aggre2_noc = { + .name = "qnm_aggre2_noc", + .id = MASTER_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_qosbox qnm_cnoc_datapath_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x26000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qnm_cnoc_datapath = { + .name = "qnm_cnoc_datapath", + .id = MASTER_CNOC_DATAPATH, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_cnoc_datapath_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox qnm_lpass_noc_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1e000 }, + .config = &(struct qos_config) { + .prio = 0, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qnm_lpass_noc = { + .name = "qnm_lpass_noc", + .id = MASTER_LPASS_ANOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qnm_lpass_noc_qos, + .num_links = 1, + .links = { SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_snoc_cfg = { + .name = "qnm_snoc_cfg", + .id = MASTER_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_SERVICE_SNOC }, +}; + +static struct qcom_icc_qosbox qxm_crypto_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x27000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qxm_crypto = { + .name = "qxm_crypto", + .id = MASTER_CRYPTO, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qxm_crypto_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox qxm_pimem_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1f000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node qxm_pimem = { + .name = "qxm_pimem", + .id = MASTER_PIMEM, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &qxm_pimem_qos, + .num_links = 1, + .links = { SLAVE_SNOC_GEM_NOC_GC }, +}; + +static struct qcom_icc_qosbox xm_gic_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x21000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_gic = { + .name = "xm_gic", + .id = MASTER_GIC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_gic_qos, + .num_links = 1, + .links = { SLAVE_SNOC_GEM_NOC_GC }, +}; + +static struct qcom_icc_qosbox xm_qdss_etr_0_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1b000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_qdss_etr_0 = { + .name = "xm_qdss_etr_0", + .id = MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_qdss_etr_0_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox xm_qdss_etr_1_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x1c000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_qdss_etr_1 = { + .name = "xm_qdss_etr_1", + .id = MASTER_QDSS_ETR_1, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_qdss_etr_1_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox xm_sdc1_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x29000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_sdc1 = { + .name = "xm_sdc1", + .id = MASTER_SDCC_1, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_sdc1_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_qosbox xm_usb3_0_qos = { + .regs = icc_qnoc_qos_regs[ICC_QNOC_QOSGEN_TYPE_RPMH], + .num_ports = 1, + .offsets = { 0x28000 }, + .config = &(struct qos_config) { + .prio = 2, + .urg_fwd = 0, + .prio_fwd_disable = 1, + }, +}; + +static struct qcom_icc_node xm_usb3_0 = { + .name = "xm_usb3_0", + .id = MASTER_USB3_0, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .qosbox = &xm_usb3_0_qos, + .num_links = 1, + .links = { SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_mnoc_hf_disp = { + .name = "qnm_mnoc_hf_disp", + .id = MASTER_MNOC_HF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node qnm_pcie_disp = { + .name = "qnm_pcie_disp", + .id = MASTER_ANOC_PCIE_GEM_NOC_DISP, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node llcc_mc_disp = { + .name = "llcc_mc_disp", + .id = MASTER_LLCC_DISP, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_EBI1_DISP }, +}; + +static struct qcom_icc_node qnm_mdp_disp = { + .name = "qnm_mdp_disp", + .id = MASTER_MDP_DISP, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { SLAVE_MNOC_HF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_node qup0_core_slave = { + .name = "qup0_core_slave", + .id = SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qup1_core_slave = { + .name = "qup1_core_slave", + .id = SLAVE_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy0 = { + .name = "qhs_ahb2phy0", + .id = SLAVE_AHB2PHY_SOUTH, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_aoss = { + .name = "qhs_aoss", + .id = SLAVE_AOSS, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_camera_cfg = { + .name = "qhs_camera_cfg", + .id = SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_clk_ctl = { + .name = "qhs_clk_ctl", + .id = SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_compute_cfg = { + .name = "qhs_compute_cfg", + .id = SLAVE_CDSP_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_CDSP_NOC_CFG }, +}; + +static struct qcom_icc_node qhs_cpr_cx = { + .name = "qhs_cpr_cx", + .id = SLAVE_RBCPR_CX_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mmcx = { + .name = "qhs_cpr_mmcx", + .id = SLAVE_RBCPR_MMCX_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mxa = { + .name = "qhs_cpr_mxa", + .id = SLAVE_RBCPR_MXA_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mxc = { + .name = "qhs_cpr_mxc", + .id = SLAVE_RBCPR_MXC_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_nspcx = { + .name = "qhs_cpr_nspcx", + .id = SLAVE_CPR_NSPCX, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_crypto0_cfg = { + .name = "qhs_crypto0_cfg", + .id = SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cx_rdpm = { + .name = "qhs_cx_rdpm", + .id = SLAVE_CX_RDPM, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_display_cfg = { + .name = "qhs_display_cfg", + .id = SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_gpuss_cfg = { + .name = "qhs_gpuss_cfg", + .id = SLAVE_GFX3D_CFG, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_imem_cfg = { + .name = "qhs_imem_cfg", + .id = SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ipc_router = { + .name = "qhs_ipc_router", + .id = SLAVE_IPC_ROUTER_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_cfg = { + .name = "qhs_lpass_cfg", + .id = SLAVE_LPASS, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_CNOC_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qhs_mx_rdpm = { + .name = "qhs_mx_rdpm", + .id = SLAVE_MX_RDPM, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie0_cfg = { + .name = "qhs_pcie0_cfg", + .id = SLAVE_PCIE_0_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie1_cfg = { + .name = "qhs_pcie1_cfg", + .id = SLAVE_PCIE_1_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pdm = { + .name = "qhs_pdm", + .id = SLAVE_PDM, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pimem_cfg = { + .name = "qhs_pimem_cfg", + .id = SLAVE_PIMEM_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_prng = { + .name = "qhs_prng", + .id = SLAVE_PRNG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qdss_cfg = { + .name = "qhs_qdss_cfg", + .id = SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qspi = { + .name = "qhs_qspi", + .id = SLAVE_QSPI_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup0 = { + .name = "qhs_qup0", + .id = SLAVE_QUP_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup1 = { + .name = "qhs_qup1", + .id = SLAVE_QUP_1, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc1 = { + .name = "qhs_sdc1", + .id = SLAVE_SDCC_1, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tcsr = { + .name = "qhs_tcsr", + .id = SLAVE_TCSR, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tlmm = { + .name = "qhs_tlmm", + .id = SLAVE_TLMM, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tme_cfg = { + .name = "qhs_tme_cfg", + .id = SLAVE_TME_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_0 = { + .name = "qhs_usb3_0", + .id = SLAVE_USB3_0, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_venus_cfg = { + .name = "qhs_venus_cfg", + .id = SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_vsense_ctrl_cfg = { + .name = "qhs_vsense_ctrl_cfg", + .id = SLAVE_VSENSE_CTRL_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_wlan_q6 = { + .name = "qhs_wlan_q6", + .id = SLAVE_WLAN_Q6_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_ddrss_cfg = { + .name = "qns_ddrss_cfg", + .id = SLAVE_DDRSS_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mnoc_cfg = { + .name = "qns_mnoc_cfg", + .id = SLAVE_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_CNOC_MNOC_CFG }, +}; + +static struct qcom_icc_node qns_snoc_cfg = { + .name = "qns_snoc_cfg", + .id = SLAVE_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_SNOC_CFG }, +}; + +static struct qcom_icc_node qxs_imem = { + .name = "qxs_imem", + .id = SLAVE_IMEM, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qxs_pimem = { + .name = "qxs_pimem", + .id = SLAVE_PIMEM, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node srvc_cnoc = { + .name = "srvc_cnoc", + .id = SLAVE_SERVICE_CNOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_0 = { + .name = "xs_pcie_0", + .id = SLAVE_PCIE_0, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_1 = { + .name = "xs_pcie_1", + .id = SLAVE_PCIE_1, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node xs_qdss_stm = { + .name = "xs_qdss_stm", + .id = SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node xs_sys_tcu_cfg = { + .name = "xs_sys_tcu_cfg", + .id = SLAVE_TCU, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_gem_noc_cnoc = { + .name = "qns_gem_noc_cnoc", + .id = SLAVE_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_GEM_NOC_CNOC }, +}; + +static struct qcom_icc_node qns_llcc = { + .name = "qns_llcc", + .id = SLAVE_LLCC, + .channels = 2, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_LLCC }, +}; + +static struct qcom_icc_node qns_pcie = { + .name = "qns_pcie", + .id = SLAVE_MEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_GEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qhs_lpass_core = { + .name = "qhs_lpass_core", + .id = SLAVE_LPASS_CORE_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_lpi = { + .name = "qhs_lpass_lpi", + .id = SLAVE_LPASS_LPI_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_mpu = { + .name = "qhs_lpass_mpu", + .id = SLAVE_LPASS_MPU_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_lpass_top = { + .name = "qhs_lpass_top", + .id = SLAVE_LPASS_TOP_CFG, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_sysnoc = { + .name = "qns_sysnoc", + .id = SLAVE_LPASS_SNOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_LPASS_ANOC }, +}; + +static struct qcom_icc_node srvc_niu_aml_noc = { + .name = "srvc_niu_aml_noc", + .id = SLAVE_SERVICES_LPASS_AML_NOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node srvc_niu_lpass_agnoc = { + .name = "srvc_niu_lpass_agnoc", + .id = SLAVE_SERVICE_LPASS_AG_NOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node ebi = { + .name = "ebi", + .id = SLAVE_EBI1, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf = { + .name = "qns_mem_noc_hf", + .id = SLAVE_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qns_mem_noc_sf = { + .name = "qns_mem_noc_sf", + .id = SLAVE_MNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_mnoc = { + .name = "srvc_mnoc", + .id = SLAVE_SERVICE_MNOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_nsp_gemnoc = { + .name = "qns_nsp_gemnoc", + .id = SLAVE_CDSP_MEM_NOC, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_COMPUTE_NOC }, +}; + +static struct qcom_icc_node service_nsp_noc = { + .name = "service_nsp_noc", + .id = SLAVE_SERVICE_NSP_NOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_pcie_mem_noc = { + .name = "qns_pcie_mem_noc", + .id = SLAVE_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node qns_a2noc_snoc = { + .name = "qns_a2noc_snoc", + .id = SLAVE_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qns_gemnoc_gc = { + .name = "qns_gemnoc_gc", + .id = SLAVE_SNOC_GEM_NOC_GC, + .channels = 1, + .buswidth = 8, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_SNOC_GC_MEM_NOC }, +}; + +static struct qcom_icc_node qns_gemnoc_sf = { + .name = "qns_gemnoc_sf", + .id = SLAVE_SNOC_GEM_NOC_SF, + .channels = 1, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_SNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_snoc = { + .name = "srvc_snoc", + .id = SLAVE_SERVICE_SNOC, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_llcc_disp = { + .name = "qns_llcc_disp", + .id = SLAVE_LLCC_DISP, + .channels = 2, + .buswidth = 16, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_LLCC_DISP }, +}; + +static struct qcom_icc_node ebi_disp = { + .name = "ebi_disp", + .id = SLAVE_EBI1_DISP, + .channels = 1, + .buswidth = 4, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf_disp = { + .name = "qns_mem_noc_hf_disp", + .id = SLAVE_MNOC_HF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .noc_ops = &qcom_qnoc4_ops, + .num_links = 1, + .links = { MASTER_MNOC_HF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_bcm bcm_acv = { + .name = "ACV", + .voter_idx = VOTER_IDX_HLOS, + .enable_mask = 0x8, + .perf_mode_mask = 0x2, + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_ce0 = { + .name = "CE0", + .voter_idx = VOTER_IDX_HLOS, + .num_nodes = 1, + .nodes = { &qxm_crypto }, +}; + +static struct qcom_icc_bcm bcm_cn0 = { + .name = "CN0", + .voter_idx = VOTER_IDX_HLOS, + .enable_mask = 0x1, + .keepalive = true, + .num_nodes = 48, + .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie, + &xm_qdss_dap, &qhs_ahb2phy0, + &qhs_aoss, &qhs_camera_cfg, + &qhs_clk_ctl, &qhs_compute_cfg, + &qhs_cpr_cx, &qhs_cpr_mmcx, + &qhs_cpr_mxa, &qhs_cpr_mxc, + &qhs_cpr_nspcx, &qhs_crypto0_cfg, + &qhs_cx_rdpm, &qhs_display_cfg, + &qhs_gpuss_cfg, &qhs_imem_cfg, + &qhs_ipc_router, &qhs_lpass_cfg, + &qhs_mx_rdpm, &qhs_pcie0_cfg, + &qhs_pcie1_cfg, &qhs_pdm, + &qhs_pimem_cfg, &qhs_prng, + &qhs_qdss_cfg, &qhs_qspi, + &qhs_qup0, &qhs_qup1, + &qhs_sdc1, &qhs_tcsr, + &qhs_tlmm, &qhs_tme_cfg, + &qhs_usb3_0, &qhs_venus_cfg, + &qhs_vsense_ctrl_cfg, &qhs_wlan_q6, + &qns_ddrss_cfg, &qns_mnoc_cfg, + &qns_snoc_cfg, &qxs_imem, + &qxs_pimem, &srvc_cnoc, + &xs_pcie_0, &xs_pcie_1, + &xs_qdss_stm, &xs_sys_tcu_cfg }, +}; + +static struct qcom_icc_bcm bcm_co0 = { + .name = "CO0", + .voter_idx = VOTER_IDX_HLOS, + .enable_mask = 0x1, + .num_nodes = 2, + .nodes = { &qxm_nsp, &qns_nsp_gemnoc }, +}; + +static struct qcom_icc_bcm bcm_mc0 = { + .name = "MC0", + .voter_idx = VOTER_IDX_HLOS, + .keepalive = true, + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_mm0 = { + .name = "MM0", + .voter_idx = VOTER_IDX_HLOS, + .keepalive_early = true, + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf }, +}; + +static struct qcom_icc_bcm bcm_mm1 = { + .name = "MM1", + .voter_idx = VOTER_IDX_HLOS, + .enable_mask = 0x1, + .num_nodes = 11, + .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp, + &qnm_camnoc_sf, &qnm_lsr, + &qnm_mdp, &qnm_mnoc_cfg, + &qnm_video, &qnm_video_cv_cpu, + &qnm_video_cvp, &qnm_video_v_cpu, + &qns_mem_noc_sf }, +}; + +static struct qcom_icc_bcm bcm_qup0 = { + .name = "QUP0", + .voter_idx = VOTER_IDX_HLOS, + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup0_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup1 = { + .name = "QUP1", + .voter_idx = VOTER_IDX_HLOS, + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup1_core_slave }, +}; + +static struct qcom_icc_bcm bcm_sh0 = { + .name = "SH0", + .voter_idx = VOTER_IDX_HLOS, + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_llcc }, +}; + +static struct qcom_icc_bcm bcm_sh1 = { + .name = "SH1", + .voter_idx = VOTER_IDX_HLOS, + .enable_mask = 0x1, + .num_nodes = 13, + .nodes = { &alm_gpu_tcu, &alm_sys_tcu, + &chm_apps, &qnm_gpu, + &qnm_mnoc_hf, &qnm_mnoc_sf, + &qnm_nsp_gemnoc, &qnm_pcie, + &qnm_snoc_gc, &qnm_snoc_sf, + &qxm_wlan_q6, &qns_gem_noc_cnoc, + &qns_pcie }, +}; + +static struct qcom_icc_bcm bcm_sn0 = { + .name = "SN0", + .voter_idx = VOTER_IDX_HLOS, + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_gemnoc_sf }, +}; + +static struct qcom_icc_bcm bcm_sn1 = { + .name = "SN1", + .voter_idx = VOTER_IDX_HLOS, + .enable_mask = 0x1, + .num_nodes = 4, + .nodes = { &qhm_gic, &qxm_pimem, + &xm_gic, &qns_gemnoc_gc }, +}; + +static struct qcom_icc_bcm bcm_sn3 = { + .name = "SN3", + .voter_idx = VOTER_IDX_HLOS, + .num_nodes = 1, + .nodes = { &qnm_aggre2_noc }, +}; + +static struct qcom_icc_bcm bcm_sn4 = { + .name = "SN4", + .voter_idx = VOTER_IDX_HLOS, + .num_nodes = 1, + .nodes = { &qnm_lpass_noc }, +}; + +static struct qcom_icc_bcm bcm_sn7 = { + .name = "SN7", + .voter_idx = VOTER_IDX_HLOS, + .num_nodes = 1, + .nodes = { &qns_pcie_mem_noc }, +}; + +static struct qcom_icc_bcm bcm_acv_disp = { + .name = "ACV", + .voter_idx = VOTER_IDX_DISP, + .enable_mask = 0x1, + .perf_mode_mask = 0x2, + .num_nodes = 1, + .nodes = { &ebi_disp }, +}; + +static struct qcom_icc_bcm bcm_mc0_disp = { + .name = "MC0", + .voter_idx = VOTER_IDX_DISP, + .num_nodes = 1, + .nodes = { &ebi_disp }, +}; + +static struct qcom_icc_bcm bcm_mm0_disp = { + .name = "MM0", + .voter_idx = VOTER_IDX_DISP, + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf_disp }, +}; + +static struct qcom_icc_bcm bcm_mm1_disp = { + .name = "MM1", + .voter_idx = VOTER_IDX_DISP, + .enable_mask = 0x1, + .num_nodes = 1, + .nodes = { &qnm_mdp_disp }, +}; + +static struct qcom_icc_bcm bcm_sh0_disp = { + .name = "SH0", + .voter_idx = VOTER_IDX_DISP, + .num_nodes = 1, + .nodes = { &qns_llcc_disp }, +}; + +static struct qcom_icc_bcm bcm_sh1_disp = { + .name = "SH1", + .voter_idx = VOTER_IDX_DISP, + .enable_mask = 0x1, + .num_nodes = 2, + .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp }, +}; + +static struct qcom_icc_bcm *clk_virt_bcms[] = { + &bcm_qup0, + &bcm_qup1, +}; + +static struct qcom_icc_node *clk_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &qup0_core_master, + [MASTER_QUP_CORE_1] = &qup1_core_master, + [SLAVE_QUP_CORE_0] = &qup0_core_slave, + [SLAVE_QUP_CORE_1] = &qup1_core_slave, +}; + +static char *clk_virt_voters[] = { + [VOTER_IDX_HLOS] = "hlos", +}; + +static struct qcom_icc_desc neo_clk_virt = { + .config = &icc_regmap_config, + .nodes = clk_virt_nodes, + .num_nodes = ARRAY_SIZE(clk_virt_nodes), + .bcms = clk_virt_bcms, + .num_bcms = ARRAY_SIZE(clk_virt_bcms), + .voters = clk_virt_voters, + .num_voters = ARRAY_SIZE(clk_virt_voters), +}; + +static struct qcom_icc_bcm *config_noc_bcms[] = { + &bcm_cn0, +}; + +static struct qcom_icc_node *config_noc_nodes[] = { + [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, + [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, + [MASTER_QDSS_DAP] = &xm_qdss_dap, + [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_CDSP_CFG] = &qhs_compute_cfg, + [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, + [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx, + [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa, + [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc, + [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_CX_RDPM] = &qhs_cx_rdpm, + [SLAVE_DISPLAY_CFG] = &qhs_display_cfg, + [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router, + [SLAVE_LPASS] = &qhs_lpass_cfg, + [SLAVE_MX_RDPM] = &qhs_mx_rdpm, + [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, + [SLAVE_PRNG] = &qhs_prng, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QSPI_0] = &qhs_qspi, + [SLAVE_QUP_0] = &qhs_qup0, + [SLAVE_QUP_1] = &qhs_qup1, + [SLAVE_SDCC_1] = &qhs_sdc1, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_TME_CFG] = &qhs_tme_cfg, + [SLAVE_USB3_0] = &qhs_usb3_0, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, + [SLAVE_WLAN_Q6_CFG] = &qhs_wlan_q6, + [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg, + [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg, + [SLAVE_SNOC_CFG] = &qns_snoc_cfg, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_PIMEM] = &qxs_pimem, + [SLAVE_SERVICE_CNOC] = &srvc_cnoc, + [SLAVE_PCIE_0] = &xs_pcie_0, + [SLAVE_PCIE_1] = &xs_pcie_1, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static char *config_noc_voters[] = { + "hlos", +}; + +static struct qcom_icc_desc neo_config_noc = { + .config = &icc_regmap_config, + .nodes = config_noc_nodes, + .num_nodes = ARRAY_SIZE(config_noc_nodes), + .bcms = config_noc_bcms, + .num_bcms = ARRAY_SIZE(config_noc_bcms), + .voters = config_noc_voters, + .num_voters = ARRAY_SIZE(config_noc_voters), +}; + +static struct qcom_icc_bcm *gem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh1, + &bcm_sh0_disp, + &bcm_sh1_disp, +}; + +static struct qcom_icc_node *gem_noc_nodes[] = { + [MASTER_GPU_TCU] = &alm_gpu_tcu, + [MASTER_SYS_TCU] = &alm_sys_tcu, + [MASTER_APPSS_PROC] = &chm_apps, + [MASTER_GFX3D] = &qnm_gpu, + [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, + [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc, + [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie, + [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, + [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, + [MASTER_WLAN_Q6] = &qxm_wlan_q6, + [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie, + [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp, + [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp, + [SLAVE_LLCC_DISP] = &qns_llcc_disp, +}; + +static char *gem_noc_voters[] = { + [VOTER_IDX_HLOS] = "hlos", + [VOTER_IDX_DISP] = "disp", +}; + +static struct qcom_icc_desc neo_gem_noc = { + .config = &icc_regmap_config, + .nodes = gem_noc_nodes, + .num_nodes = ARRAY_SIZE(gem_noc_nodes), + .bcms = gem_noc_bcms, + .num_bcms = ARRAY_SIZE(gem_noc_bcms), + .voters = gem_noc_voters, + .num_voters = ARRAY_SIZE(gem_noc_voters), +}; + +static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = { +}; + +static struct qcom_icc_node *lpass_ag_noc_nodes[] = { + [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc, + [MASTER_LPASS_PROC] = &qxm_lpass_dsp, + [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core, + [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi, + [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu, + [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top, + [SLAVE_LPASS_SNOC] = &qns_sysnoc, + [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc, + [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc, +}; + +static char *lpass_ag_noc_voters[] = { + [VOTER_IDX_HLOS] = "hlos", +}; + +static struct qcom_icc_desc neo_lpass_ag_noc = { + .config = &icc_regmap_config, + .nodes = lpass_ag_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), + .bcms = lpass_ag_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), + .voters = lpass_ag_noc_voters, + .num_voters = ARRAY_SIZE(lpass_ag_noc_voters), +}; + +static struct qcom_icc_bcm *mc_virt_bcms[] = { + &bcm_acv, + &bcm_mc0, + &bcm_acv_disp, + &bcm_mc0_disp, +}; + +static struct qcom_icc_node *mc_virt_nodes[] = { + [MASTER_LLCC] = &llcc_mc, + [SLAVE_EBI1] = &ebi, + [MASTER_LLCC_DISP] = &llcc_mc_disp, + [SLAVE_EBI1_DISP] = &ebi_disp, +}; + +static char *mc_virt_voters[] = { + [VOTER_IDX_HLOS] = "hlos", + [VOTER_IDX_DISP] = "disp", +}; + +static struct qcom_icc_desc neo_mc_virt = { + .config = &icc_regmap_config, + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), + .voters = mc_virt_voters, + .num_voters = ARRAY_SIZE(mc_virt_voters), +}; + +static struct qcom_icc_bcm *mmss_noc_bcms[] = { + &bcm_mm0, + &bcm_mm1, + &bcm_mm0_disp, + &bcm_mm1_disp, +}; + +static struct qcom_icc_node *mmss_noc_nodes[] = { + [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, + [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, + [MASTER_CAMNOC_SF] = &qnm_camnoc_sf, + [MASTER_LSR] = &qnm_lsr, + [MASTER_MDP] = &qnm_mdp, + [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg, + [MASTER_VIDEO] = &qnm_video, + [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu, + [MASTER_VIDEO_PROC] = &qnm_video_cvp, + [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu, + [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, + [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf, + [SLAVE_SERVICE_MNOC] = &srvc_mnoc, + [MASTER_MDP_DISP] = &qnm_mdp_disp, + [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp, +}; + +static char *mmss_noc_voters[] = { + [VOTER_IDX_HLOS] = "hlos", + [VOTER_IDX_DISP] = "disp", +}; + +static struct qcom_icc_desc neo_mmss_noc = { + .config = &icc_regmap_config, + .nodes = mmss_noc_nodes, + .num_nodes = ARRAY_SIZE(mmss_noc_nodes), + .bcms = mmss_noc_bcms, + .num_bcms = ARRAY_SIZE(mmss_noc_bcms), + .voters = mmss_noc_voters, + .num_voters = ARRAY_SIZE(mmss_noc_voters), +}; + +static struct qcom_icc_bcm *nsp_noc_bcms[] = { + &bcm_co0, +}; + +static struct qcom_icc_node *nsp_noc_nodes[] = { + [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config, + [MASTER_CDSP_PROC] = &qxm_nsp, + [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, + [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc, +}; + +static char *nsp_noc_voters[] = { + [VOTER_IDX_HLOS] = "hlos", +}; + +static struct qcom_icc_desc neo_nsp_noc = { + .config = &icc_regmap_config, + .nodes = nsp_noc_nodes, + .num_nodes = ARRAY_SIZE(nsp_noc_nodes), + .bcms = nsp_noc_bcms, + .num_bcms = ARRAY_SIZE(nsp_noc_bcms), + .voters = nsp_noc_voters, + .num_voters = ARRAY_SIZE(nsp_noc_voters), +}; + +static struct qcom_icc_bcm *pcie_anoc_bcms[] = { + &bcm_sn7, +}; + +static struct qcom_icc_node *pcie_anoc_nodes[] = { + [MASTER_PCIE_0] = &xm_pcie3_0, + [MASTER_PCIE_1] = &xm_pcie3_1, + [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc, +}; + +static char *pcie_anoc_voters[] = { + [VOTER_IDX_HLOS] = "hlos", +}; + +static struct qcom_icc_desc neo_pcie_anoc = { + .config = &icc_regmap_config, + .nodes = pcie_anoc_nodes, + .num_nodes = ARRAY_SIZE(pcie_anoc_nodes), + .bcms = pcie_anoc_bcms, + .num_bcms = ARRAY_SIZE(pcie_anoc_bcms), + .voters = pcie_anoc_voters, + .num_voters = ARRAY_SIZE(pcie_anoc_voters), +}; + +static struct qcom_icc_bcm *system_noc_bcms[] = { + &bcm_ce0, + &bcm_sn0, + &bcm_sn1, + &bcm_sn3, + &bcm_sn4, +}; + +static struct qcom_icc_node *system_noc_nodes[] = { + [MASTER_GIC_AHB] = &qhm_gic, + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_QSPI_0] = &qhm_qspi, + [MASTER_QUP_0] = &qhm_qup0, + [MASTER_QUP_1] = &qhm_qup1, + [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, + [MASTER_CNOC_DATAPATH] = &qnm_cnoc_datapath, + [MASTER_LPASS_ANOC] = &qnm_lpass_noc, + [MASTER_SNOC_CFG] = &qnm_snoc_cfg, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_PIMEM] = &qxm_pimem, + [MASTER_GIC] = &xm_gic, + [MASTER_QDSS_ETR] = &xm_qdss_etr_0, + [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1, + [MASTER_SDCC_1] = &xm_sdc1, + [MASTER_USB3_0] = &xm_usb3_0, + [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, + [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc, + [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf, + [SLAVE_SERVICE_SNOC] = &srvc_snoc, +}; + +static char *system_noc_voters[] = { + [VOTER_IDX_HLOS] = "hlos", +}; + +static struct qcom_icc_desc neo_system_noc = { + .config = &icc_regmap_config, + .nodes = system_noc_nodes, + .num_nodes = ARRAY_SIZE(system_noc_nodes), + .bcms = system_noc_bcms, + .num_bcms = ARRAY_SIZE(system_noc_bcms), + .voters = system_noc_voters, + .num_voters = ARRAY_SIZE(system_noc_voters), +}; + +static int qnoc_probe(struct platform_device *pdev) +{ + const char *compat = NULL; + int compatlen = 0; + u32 *msm_id; + size_t len; + int ret; + + msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len); + if (IS_ERR(msm_id)) + return PTR_ERR(msm_id); + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if ((enum target_msm_id) *(++msm_id) == NEO_LA_V1) { + if (!strcmp(compat, "qcom,neo-gem_noc")) { + bcm_sh0_disp.voter_idx = VOTER_IDX_HLOS; + bcm_sh1_disp.voter_idx = VOTER_IDX_HLOS; + gem_noc_nodes[MASTER_MNOC_HF_MEM_NOC_DISP] = NULL; + gem_noc_nodes[MASTER_ANOC_PCIE_GEM_NOC_DISP] = NULL; + gem_noc_nodes[SLAVE_LLCC_DISP] = NULL; + neo_gem_noc.num_voters = 1; + neo_gem_noc.num_bcms = 2; + } else if (!strcmp(compat, "qcom,neo-mc_virt")) { + bcm_acv_disp.voter_idx = VOTER_IDX_HLOS; + bcm_mc0_disp.voter_idx = VOTER_IDX_HLOS; + mc_virt_nodes[SLAVE_EBI1_DISP] = NULL; + mc_virt_nodes[MASTER_LLCC_DISP] = NULL; + neo_mc_virt.num_voters = 1; + neo_mc_virt.num_bcms = 2; + } else if (!strcmp(compat, "qcom,neo-mmss_noc")) { + bcm_mm0_disp.voter_idx = VOTER_IDX_HLOS; + bcm_mm1_disp.voter_idx = VOTER_IDX_HLOS; + mmss_noc_nodes[MASTER_MDP_DISP] = NULL; + mmss_noc_nodes[SLAVE_MNOC_HF_MEM_NOC_DISP] = NULL; + neo_mmss_noc.num_voters = 1; + neo_mmss_noc.num_bcms = 2; + } + } + + ret = qcom_icc_rpmh_probe(pdev); + + if (ret) + dev_err(&pdev->dev, "failed to register ICC provider\n"); + else + dev_info(&pdev->dev, "Registered NEO ICC\n"); + + return ret; +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,neo-clk_virt", + .data = &neo_clk_virt}, + { .compatible = "qcom,neo-config_noc", + .data = &neo_config_noc}, + { .compatible = "qcom,neo-gem_noc", + .data = &neo_gem_noc}, + { .compatible = "qcom,neo-lpass_ag_noc", + .data = &neo_lpass_ag_noc}, + { .compatible = "qcom,neo-mc_virt", + .data = &neo_mc_virt}, + { .compatible = "qcom,neo-mmss_noc", + .data = &neo_mmss_noc}, + { .compatible = "qcom,neo-nsp_noc", + .data = &neo_nsp_noc}, + { .compatible = "qcom,neo-pcie_anoc", + .data = &neo_pcie_anoc}, + { .compatible = "qcom,neo-system_noc", + .data = &neo_system_noc}, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qcom_icc_rpmh_remove, + .driver = { + .name = "qnoc-neo", + .of_match_table = qnoc_of_match, + .sync_state = qcom_icc_rpmh_sync_state, + }, +}; + +static int __init qnoc_driver_init(void) +{ + return platform_driver_register(&qnoc_driver); +} +core_initcall(qnoc_driver_init); + +MODULE_DESCRIPTION("Neo NoC driver"); +MODULE_LICENSE("GPL"); From cce6ec546eaa85bbd8f7402029e764e1415f34a1 Mon Sep 17 00:00:00 2001 From: Dhaval Radiya Date: Mon, 22 Jul 2024 11:59:31 +0530 Subject: [PATCH 045/117] defconfig: Enable Regulator related modules for neo Enable Regulator related modules for neo platform. Change-Id: I140caa8a7a39e03b18228a5ba9994a76f09edd60 Signed-off-by: Dhaval Radiya --- arch/arm64/configs/vendor/neo_la_GKI.config | 6 ++++++ modules.list.msm.neo-la | 6 +++++- neo_la.bzl | 6 ++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index 227c0ab4d5da..73abe9fcd2ff 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -24,6 +24,7 @@ CONFIG_HWSPINLOCK_QCOM=m CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_IOMMU_IO_PGTABLE_FAST=y CONFIG_LOCALVERSION="-gki" +CONFIG_MFD_I2C_PMIC=m # CONFIG_MODULE_SIG_ALL is not set CONFIG_MSM_BOOT_STATS=m CONFIG_MSM_CORE_HANG_DETECT=m @@ -77,5 +78,10 @@ CONFIG_QCOM_WATCHDOG_WAKEUP_ENABLE=y CONFIG_QCOM_WDT_CORE=m CONFIG_QTEE_SHM_BRIDGE=y CONFIG_QTI_IOMMU_SUPPORT=m +CONFIG_REGULATOR_DEBUG_CONTROL=m +CONFIG_REGULATOR_PROXY_CONSUMER=m +CONFIG_REGULATOR_QCOM_PM8008=m +CONFIG_REGULATOR_QTI_FIXED_VOLTAGE=m +CONFIG_REGULATOR_RPMH=m CONFIG_SCHED_WALT=m CONFIG_VIRT_DRIVERS=y diff --git a/modules.list.msm.neo-la b/modules.list.msm.neo-la index 3d9591ab58fc..2035ea88956e 100644 --- a/modules.list.msm.neo-la +++ b/modules.list.msm.neo-la @@ -76,7 +76,7 @@ dispcc-neo.ko secure_buffer.ko qcom-cpufreq-hw.ko sched-walt-debug.ko -qcom-i2c-pmici.ko +qcom-i2c-pmic.ko qcom-spmi-pmic.ko qcom-reboot-reason.ko qti-regmap-debugfs.ko @@ -88,4 +88,8 @@ crypto-qti-common.ko crypto-qti-hwkm.ko hwkm.ko tmecom-intf.ko +qti-fixed-regulator.ko +qcom_pm8008-regulator.ko +rpmh-regulator.ko +debug-regulator.ko qcom-pdc.ko diff --git a/neo_la.bzl b/neo_la.bzl index d73a15d4d5df..508b99f95769 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -20,10 +20,16 @@ def define_neo_la(): "drivers/iommu/qcom_iommu_util.ko", "drivers/irqchip/msm_show_resume_irq.ko", "drivers/irqchip/qcom-pdc.ko", + "drivers/mfd/qcom-i2c-pmic.ko", "drivers/perf/qcom_llcc_pmu.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/power/reset/qcom-dload-mode.ko", "drivers/power/reset/qcom-reboot-reason.ko", + "drivers/regulator/debug-regulator.ko", + "drivers/regulator/proxy-consumer.ko", + "drivers/regulator/qcom_pm8008-regulator.ko", + "drivers/regulator/qti-fixed-regulator.ko", + "drivers/regulator/rpmh-regulator.ko", "drivers/soc/qcom/boot_stats.ko", "drivers/soc/qcom/cmd-db.ko", "drivers/soc/qcom/core_hang_detect.ko", From abe45eaade45df3430bf236950d8e5a55dcc5add Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 13 Aug 2024 10:49:14 +0530 Subject: [PATCH 046/117] clk: qcom: clk-branch: Add support for BRANCH_HALT_POLL flag Usecases where branch clock will be enabled pre-HLOS and expectation from the clock provider is to poll on the clock to ensure it is indeed enabled and not HW gated, thus add the BRANCH_HALT_POLL flag. Change-Id: If41a65c64755d062721266ca7ea97822102115a1 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/clk-branch.c | 7 ++++++- drivers/clk/qcom/clk-branch.h | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index 83ef97bf386a..579001540caa 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013, 2016, 2020-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -104,6 +104,7 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling, udelay(10); } else if (br->halt_check == BRANCH_HALT_ENABLE || br->halt_check == BRANCH_HALT || + br->halt_check == BRANCH_HALT_POLL || (enabling && voted)) { timeout = get_branch_timeout(br); @@ -125,6 +126,10 @@ static int clk_branch_toggle(struct clk_hw *hw, bool en, struct clk_branch *br = to_clk_branch(hw); int ret; + if (br->halt_check == BRANCH_HALT_POLL) { + return clk_branch_wait(br, en, check_halt); + } + if (en) { ret = clk_enable_regmap(hw); if (ret) diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h index b211e588f5ec..fb18b5de6244 100644 --- a/drivers/clk/qcom/clk-branch.h +++ b/drivers/clk/qcom/clk-branch.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2013, 2016, 2020 The Linux Foundation. All rights reserved. */ -/* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. */ +/* Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __QCOM_CLK_BRANCH_H__ #define __QCOM_CLK_BRANCH_H__ @@ -41,6 +41,7 @@ struct clk_branch { #define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */ #define BRANCH_HALT_SKIP 3 /* Don't check halt bit */ #define BRANCH_HALT_INVERT 4 /* Invert logic for halt bit */ +#define BRANCH_HALT_POLL 5 /* Don't enable the clock, poll for halt */ struct clk_regmap clkr; }; From a5b38d6ff687c566f078e33a8b8b3cb4c677d89f Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Tue, 11 Jun 2024 14:26:51 +0530 Subject: [PATCH 047/117] cpuidle: governors: qcom-lpm: Fix runtime PM based cpuidle support In the cpuidle-psci case, runtime PM in combination with the generic PM domain (genpd), may be used when entering/exiting a shared idle state. More precisely, genpd relies on runtime PM to be enabled for the attached device (in this case it belongs to a CPU), to properly manage the reference counting of its PM domain. This works fine most of the time, but during system suspend in dpm_suspend_late(), the PM core disables runtime PM for all devices. Beyond this point, calls to pm_runtime_get_sync() to runtime resume a device may fail and therefore it could also mess up the reference counting in genpd. To fix this problem, let's call wake_up_all_idle_cpus() prior to disabling runtime PM. In this way a device that belongs to a CPU, becomes runtime resumed through cpuidle-psci and stays like that because idle governor will stop selecting deeper idle state when suspend is started. Change-Id: I8e046aa9b904ada09c0138499766d6362a9795be Signed-off-by: Maulik Shah --- drivers/cpuidle/governors/qcom-lpm.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/cpuidle/governors/qcom-lpm.c b/drivers/cpuidle/governors/qcom-lpm.c index f0c46e16372b..48c14b30309e 100644 --- a/drivers/cpuidle/governors/qcom-lpm.c +++ b/drivers/cpuidle/governors/qcom-lpm.c @@ -707,6 +707,28 @@ static void lpm_idle_exit(void *unused, int state, struct cpuidle_device *dev) } } +static int suspend_lpm_notify(struct notifier_block *nb, + unsigned long mode, void *_unused) +{ + int cpu; + + switch (mode) { + case PM_SUSPEND_PREPARE: + suspend_in_progress = true; + break; + case PM_POST_SUSPEND: + suspend_in_progress = false; + break; + default: + break; + } + + for_each_online_cpu(cpu) + wake_up_if_idle(cpu); + + return 0; +} + /** * lpm_enable_device() - Initialize the governor's data for the CPU * @drv: cpuidle driver @@ -831,6 +853,10 @@ static struct cpuidle_governor lpm_governor = { .reflect = lpm_reflect, }; +static struct notifier_block suspend_lpm_nb = { + .notifier_call = suspend_lpm_notify, +}; + static int __init qcom_lpm_governor_init(void) { int ret; @@ -856,6 +882,8 @@ static int __init qcom_lpm_governor_init(void) if (ret < 0) goto cpuhp_setup_fail; + register_pm_notifier(&suspend_lpm_nb); + return 0; cpuhp_setup_fail: From 88af4d180a75d7aa876eb3181a82025a1dbd10a9 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 6 Aug 2024 10:38:38 +0530 Subject: [PATCH 048/117] bindings: clock: qcom: Add gcc_pwm0_xo512_div_clk_src clock id Add clock handle for gcc_pwm0_xo512_div_clk_src clock on NIOBE platform. Change-Id: Ib06b737c4fc74a5b2fecd25313ca56fdfe208ed9 Signed-off-by: Kalpak Kawadkar --- include/dt-bindings/clock/qcom,gcc-niobe.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/dt-bindings/clock/qcom,gcc-niobe.h b/include/dt-bindings/clock/qcom,gcc-niobe.h index 1665dae385be..a4d17110508d 100644 --- a/include/dt-bindings/clock/qcom,gcc-niobe.h +++ b/include/dt-bindings/clock/qcom,gcc-niobe.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _DT_BINDINGS_CLK_QCOM_GCC_NIOBE_H @@ -224,6 +224,7 @@ #define GCC_PCIE_1_PHY_AUX_CLK 214 #define GCC_PCIE_1_PHY_AUX_CLK_SRC 215 #define GCC_USB30_PRIM_ATB_CLK 216 +#define GCC_PWM0_XO512_DIV_CLK_SRC 217 /* GCC resets */ #define GCC_CAMERA_BCR 0 From 7dbfb580431bd16a430a85b66cc790612582f2fd Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Wed, 31 Jul 2024 16:45:26 +0530 Subject: [PATCH 049/117] clk: qcom: gcc-niobe: Add pwm clocks to support pdm_pwm Add gcc_pwm0_xo512_div_clk_src as parent of gcc_pwm0_xo512_clk to support the pdm_pwm functionality on NIOBE platform. Change-Id: I719ac8405dcc9460d5d3346a1ee52ad4555aba11 Signed-off-by: Kalpak Kawadkar --- drivers/clk/qcom/gcc-niobe.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/clk/qcom/gcc-niobe.c b/drivers/clk/qcom/gcc-niobe.c index 8de723d11700..b564eeeee555 100644 --- a/drivers/clk/qcom/gcc-niobe.c +++ b/drivers/clk/qcom/gcc-niobe.c @@ -1933,6 +1933,20 @@ static struct clk_regmap_div gcc_pcie_2_pipe_div_clk_src = { }, }; +static struct clk_regmap_div gcc_pwm0_xo512_div_clk_src = { + .reg = 0x33030, + .shift = 0, + .width = 9, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pwm0_xo512_div_clk_src", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_regmap_div_ops, + }, +}; + static struct clk_regmap_div gcc_qupv3_wrap1_s1_div_clk_src = { .reg = 0x18148, .shift = 0, @@ -3011,6 +3025,11 @@ static struct clk_branch gcc_pwm0_xo512_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "gcc_pwm0_xo512_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pwm0_xo512_div_clk_src.clkr.hw + }, + .flags = CLK_SET_RATE_PARENT, + .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -4280,6 +4299,7 @@ static struct clk_regmap *gcc_niobe_clocks[] = { [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, [GCC_PWM0_XO512_CLK] = &gcc_pwm0_xo512_clk.clkr, + [GCC_PWM0_XO512_DIV_CLK_SRC] = &gcc_pwm0_xo512_div_clk_src.clkr, [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, From 19f5ce8875dc3d04353d314992ca7add07019bb6 Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Wed, 7 Aug 2024 00:47:59 -0700 Subject: [PATCH 050/117] soc: qcom: smem: Add qcom_smem_bust_hwspin_lock_by_host() Add qcom_smem_bust_hwspin_lock_by_host to enable remoteproc to bust the hwspin_lock owned by smem. In the event the remoteproc crashes unexpectedly, the remoteproc driver can invoke this API to try and bust the hwspin_lock and release the lock if still held by the remoteproc device. Change-Id: Ia9b3169586a75908f8f6e7e831b6a42ee585f9f7 Signed-off-by: Chris Lew Reviewed-by: Bjorn Andersson Link: https://lore.kernel.org/r/20240529-hwspinlock-bust-v3-3-c8b924ffa5a2@quicinc.com Signed-off-by: Bjorn Andersson Git-commit: 2e3f0d693875db698891ffe89a18121bda5b95b8 Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [quic_deesin@quicinc.com: Bust function in core hwspinlock module is not available downstream due to KMI freeze, directly use bust function exported by qcom hwspinlock module] Signed-off-by: Deepak Kumar Singh --- drivers/soc/qcom/smem.c | 27 +++++++++++++++++++++++++++ include/linux/soc/qcom/smem.h | 2 ++ 2 files changed, 29 insertions(+) diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 2667487e6c7d..83696107edba 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -370,6 +371,32 @@ static struct qcom_smem *__smem; /* Timeout (ms) for the trylock of remote spinlocks */ #define HWSPINLOCK_TIMEOUT 1000 +/* The qcom hwspinlock id is always plus one from the smem host id */ +#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1) + +/** + * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host + * @host: remote processor id + * + * Busts the hwspin_lock for the given smem host id. This helper is intended + * for remoteproc drivers that manage remoteprocs with an equivalent smem + * driver instance in the remote firmware. Drivers can force a release of the + * smem hwspin_lock if the rproc unexpectedly goes into a bad state. + * + * Context: Process context. + * + * Returns: 0 on success, otherwise negative errno. + */ +int qcom_smem_bust_hwspin_lock_by_host(unsigned int host) +{ + /* This function is for remote procs, so ignore SMEM_HOST_APPS */ + if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT) + return -EINVAL; + + return qcom_hwspinlock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host)); +} +EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host); + static int qcom_smem_alloc_private(struct qcom_smem *smem, struct smem_partition *part, unsigned item, diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index fee6c397c964..2fb00a64e237 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h @@ -17,4 +17,6 @@ int qcom_smem_get_free_space(unsigned host); phys_addr_t qcom_smem_virt_to_phys(void *p); +int qcom_smem_bust_hwspin_lock_by_host(unsigned int host); + #endif From fce407b7affb86ed71ab91adf7b81cd970e56413 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 12:06:06 +0530 Subject: [PATCH 051/117] clk: qcom: rmph: Add support for PMIC clocks for NEO Add the PMIC clocks required to be requested from clients via RPMH clock driver. Change-Id: I862816903386043579203ee2a255945ba5b260d2 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/clk-rpmh.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index f49ed5207f0f..10618b9f2e9e 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -707,6 +707,18 @@ static const struct clk_rpmh_desc clk_rpmh_lemans = { .num_clks = ARRAY_SIZE(lemans_rpmh_clocks), }; +DEFINE_CLK_RPMH_ARC(neo, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 1); + +static struct clk_hw *neo_rpmh_clocks[] = { + [RPMH_CXO_CLK] = &neo_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &neo_bi_tcxo_ao.hw, +}; + +static const struct clk_rpmh_desc clk_rpmh_neo = { + .clks = neo_rpmh_clocks, + .num_clks = ARRAY_SIZE(neo_rpmh_clocks), +}; + DEFINE_CLK_RPMH_VRM(anorak, rf_clk1, rf_clk1_ao, "clka1", 1); DEFINE_CLK_RPMH_VRM(anorak, ln_bb_clk7, ln_bb_clk7_ao, "clka7", 2); DEFINE_CLK_RPMH_VRM(anorak, ln_bb_clk8, ln_bb_clk8_ao, "clka8", 4); @@ -1009,6 +1021,7 @@ static const struct of_device_id clk_rpmh_match_table[] = { { .compatible = "qcom,niobe-rpmh-clk", .data = &clk_rpmh_niobe}, { .compatible = "qcom,volcano-rpmh-clk", .data = &clk_rpmh_volcano}, { .compatible = "qcom,anorak-rpmh-clk", .data = &clk_rpmh_anorak}, + { .compatible = "qcom,neo-rpmh-clk", .data = &clk_rpmh_neo}, { } }; MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); From f2ff920d1caa65681d18e269596023fe18d99ee0 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 12:24:28 +0530 Subject: [PATCH 052/117] clk: qcom: gcc-neo: Snapshot of GCC driver for NEO Add snapshot of support of Global Clock Controller for peripheral clock clients to be able to request for the clocks on NEO from msm-5.10 branch commit 65ec2ca93cf8 ("clk: qcom: gcc-neo: Add support for GCC clock driver"). Change-Id: I2a77b55708376d5dda3d3137127267952002a718 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/gcc-neo.c | 2688 ++++++++++++++++++++++++++++++++++++ 3 files changed, 2698 insertions(+) create mode 100644 drivers/clk/qcom/gcc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index a50cd565eedf..7ec24465fc0e 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1490,6 +1490,15 @@ config SM_DEBUGCC_VOLCANO Volcano devices. Say Y if you want to support the debug clocks such as clock measurement functionality. + +config SXR_GCC_NEO + tristate "NEO Global Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on Qualcomm Technologies, Inc. + NEO devices. + Say Y if you want to use peripheral devices such as UART, SPI, I2C, + USB, UFS, SD/eMMC, PCIE, etc. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index b1946edb5351..6569674d70d8 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -153,6 +153,7 @@ obj-$(CONFIG_SM_GCC_PINEAPPLE) += gcc-pineapple.o obj-$(CONFIG_SM_GCC_PITTI) += gcc-pitti.o obj-$(CONFIG_SM_GCC_VOLCANO) += gcc-volcano.o obj-$(CONFIG_SXR_GCC_ANORAK) += gcc-anorak.o +obj-$(CONFIG_SXR_GCC_NEO) += gcc-neo.o obj-$(CONFIG_SXR_GCC_NIOBE) += gcc-niobe.o obj-$(CONFIG_SM_GPUCC_6150) += gpucc-sm6150.o obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o diff --git a/drivers/clk/qcom/gcc-neo.c b/drivers/clk/qcom/gcc-neo.c new file mode 100644 index 000000000000..ff38ae01acfd --- /dev/null +++ b/drivers/clk/qcom/gcc-neo.c @@ -0,0 +1,2688 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NOMINAL + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mxa, VDD_NOMINAL + 1, 1, vdd_corner); + +static struct clk_vdd_class *gcc_neo_regulators[] = { + &vdd_cx, + &vdd_mxa, +}; + +enum { + P_BI_TCXO, + P_GCC_GPLL0_OUT_EVEN, + P_GCC_GPLL0_OUT_MAIN, + P_GCC_GPLL1_OUT_EVEN, + P_GCC_GPLL1_OUT_MAIN, + P_GCC_GPLL4_OUT_MAIN, + P_GCC_GPLL5_OUT_MAIN, + P_GCC_GPLL7_OUT_MAIN, + P_GCC_GPLL9_OUT_EVEN, + P_PCIE_0_PIPE_CLK, + P_PCIE_1_PIPE_CLK, + P_SLEEP_CLK, + P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, +}; + +static struct clk_alpha_pll gcc_gpll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_gcc_gpll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gpll0_out_even", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static struct clk_alpha_pll gcc_gpll1 = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll4 = { + .offset = 0x4000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll4", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll5 = { + .offset = 0x5000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll5", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll7 = { + .offset = 0x7000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll7", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll9 = { + .offset = 0x9000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .enable_reg = 0x62018, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpll9", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gcc_gpll9_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gcc_gpll9_out_even = { + .offset = 0x9000, + .post_div_shift = 10, + .post_div_table = post_div_table_gcc_gpll9_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll9_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gpll9_out_even", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll9.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_1[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gcc_gpll0.clkr.hw }, + { .fw_name = "sleep_clk" }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_GCC_GPLL5_OUT_MAIN, 3 }, + { P_GCC_GPLL1_OUT_MAIN, 4 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_2[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll7.clkr.hw }, + { .hw = &gcc_gpll5.clkr.hw }, + { .hw = &gcc_gpll1.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "sleep_clk" }, +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_PCIE_0_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_4[] = { + { .fw_name = "pcie_0_pipe_clk" }, + { .fw_name = "bi_tcxo" }, +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_PCIE_1_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_5[] = { + { .fw_name = "pcie_1_pipe_clk" }, + { .fw_name = "bi_tcxo" }, +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL9_OUT_EVEN, 2 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_6[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll9_out_even.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL1_OUT_EVEN, 2 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_7[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll1.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_8[] = { + { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" }, + { .fw_name = "bi_tcxo" }, +}; + +static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = { + .reg = 0x7b070, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_4, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_pipe_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = ARRAY_SIZE(gcc_parent_data_4), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; +static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = { + .reg = 0x9d06c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_5, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_pipe_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = ARRAY_SIZE(gcc_parent_data_5), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; +static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { + .reg = 0x4906c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_8, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk_src", + .parent_data = gcc_parent_data_8, + .num_parents = ARRAY_SIZE(gcc_parent_data_8), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; +static const struct freq_tbl ftbl_gcc_ddrss_spad_clk_src[] = { + F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0), + F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0), + F(426400000, P_GCC_GPLL1_OUT_MAIN, 2.5, 0, 0), + F(500000000, P_GCC_GPLL7_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ddrss_spad_clk_src = { + .cmd_rcgr = 0x70004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_ddrss_spad_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_spad_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 403000000, + [VDD_LOW_L1] = 426400000, + [VDD_NOMINAL] = 500000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x74004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x75004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x76004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0x7b074, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = { + .cmd_rcgr = 0x7b058, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000}, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_aux_clk_src = { + .cmd_rcgr = 0x9d070, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = { + .cmd_rcgr = 0x9d054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x43010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 60000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375), + F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75), + F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625), + F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x28018, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 120000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s1_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x28150, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x28288, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x283c0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x284f8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x28630, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x2e018, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 120000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x2e150, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x2e288, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x2e3c0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x2e4f8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x2e630, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { + F(144000, P_BI_TCXO, 16, 3, 25), + F(400000, P_BI_TCXO, 12, 1, 4), + F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3), + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(192000000, P_GCC_GPLL9_OUT_EVEN, 2, 0, 0), + F(384000000, P_GCC_GPLL9_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0x26018, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk_src", + .parent_data = gcc_parent_data_6, + .num_parents = ARRAY_SIZE(gcc_parent_data_6), + .ops = &clk_rcg2_floor_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW_L1] = 384000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x2603c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_data = gcc_parent_data_7, + .num_parents = ARRAY_SIZE(gcc_parent_data_7), + .ops = &clk_rcg2_floor_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW] = 150000000, + [VDD_LOW_L1] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0), + F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0x4902c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gcc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gcc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 66666667, + [VDD_LOW] = 133333333, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0x49044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0x49070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = { + .reg = 0x4905c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = { + .halt_reg = 0x7b094, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x7b094, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_noc_pcie_1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0x4908c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4908c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4908c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x48004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x48004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_hf_axi_clk = { + .halt_reg = 0x36010, + .halt_check = BRANCH_HALT_POLL, + .clkr = { + .enable_reg = 0x36010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_hf_axi_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_sf_axi_clk = { + .halt_reg = 0x36014, + .halt_check = BRANCH_HALT_POLL, + .clkr = { + .enable_reg = 0x36014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_sf_axi_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = { + .halt_reg = 0x20034, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x20034, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_pcie_anoc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0x49088, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x49088, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x49088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x81154, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x81154, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x81154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_pcie_sf_clk = { + .halt_reg = 0x9d098, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x9d098, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_pcie_sf_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_spad_clk = { + .halt_reg = 0x70000, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x70000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x70000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_spad_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ddrss_spad_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0x37008, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x37008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x37008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x74000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x74000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x75000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x75000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x76000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x76000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll0_out_even.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x9b010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9b010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x9b010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_memnoc_gfx_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x9b018, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x9b018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_iris_ss_hf_axi1_clk = { + .halt_reg = 0x42030, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x42030, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_iris_ss_hf_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_iris_ss_hf_axi1_sreg = { + .sreg_enable_reg = 0x42034, + .sreg_core_ack_bit = BIT(11), + .sreg_periph_ack_bit = BIT(10), + .clkr = { + .enable_reg = 0x42034, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_iris_ss_hf_axi1_sreg", + .ops = &clk_branch2_sreg_ops, + }, + }, +}; + +static struct clk_branch gcc_iris_ss_spd_axi1_clk = { + .halt_reg = 0x70020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x70020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x70020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_iris_ss_spd_axi1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ddrss_spad_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_iris_ss_spd_axi1_sreg = { + .sreg_enable_reg = 0x70024, + .sreg_core_ack_bit = BIT(11), + .sreg_periph_ack_bit = BIT(10), + .clkr = { + .enable_reg = 0x70024, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_iris_ss_spd_axi1_sreg", + .ops = &clk_branch2_sreg_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x7b03c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0x7b038, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7b038, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0x7b02c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x7b02c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_phy_rchng_clk = { + .halt_reg = 0x7b054, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_0_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0x7b048, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_0_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0x7b020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7b020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0x7b01c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x9d038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(29), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .halt_reg = 0x9d034, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9d034, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(28), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .halt_reg = 0x9d028, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x9d028, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_phy_rchng_clk = { + .halt_reg = 0x9d050, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(23), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_1_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x9d044, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(30), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_1_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .halt_reg = 0x9d01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9d01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = { + .halt_reg = 0x9d018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x4300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pdm2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x43004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x43004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x43004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x43008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x43008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0x36008, + .halt_check = BRANCH_HALT_POLL, + .clkr = { + .enable_reg = 0x36008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_nrt_ahb_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_rt_ahb_clk = { + .halt_reg = 0x3600c, + .halt_check = BRANCH_HALT_POLL, + .clkr = { + .enable_reg = 0x3600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_rt_ahb_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_gpu_ahb_clk = { + .halt_reg = 0x9b008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9b008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x9b008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_gpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_pcie_ahb_clk = { + .halt_reg = 0x7b018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7b018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62000, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_pcie_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = { + .halt_reg = 0x42014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x42014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_cv_cpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cvp_ahb_clk = { + .halt_reg = 0x42008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x42008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_cvp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_lsr_ahb_clk = { + .halt_reg = 0x4204c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4204c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4204c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_lsr_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = { + .halt_reg = 0x42010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x42010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_v_cpu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0x4200c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4200c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x33034, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x33024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x2800c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x28144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(23), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x2827c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(24), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x283b4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x284ec, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x28624, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = { + .halt_reg = 0x3317c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_clk = { + .halt_reg = 0x3316c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x2e00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x2e144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x2e27c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x2e3b4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x2e4ec, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x2e624, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x28004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x28004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x28008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x28008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62008, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x2e004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2e004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x2e008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2e008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x62010, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x26010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x26004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_sdcc1_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x26030, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x26030, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x26030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_sdcc1_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0x49018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0x49028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0x49024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0x49060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49060, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0x49064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_reg = 0x49068, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x49068, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x49068, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0x42018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x42018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_sreg = { + .sreg_enable_reg = 0x4201C, + .sreg_core_ack_bit = BIT(11), + .sreg_periph_ack_bit = BIT(10), + .clkr = { + .enable_reg = 0x4201C, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi0_sreg", + .ops = &clk_branch2_sreg_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_clk = { + .halt_reg = 0x42024, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x42024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x42024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_sreg = { + .sreg_enable_reg = 0x42028, + .sreg_core_ack_bit = BIT(11), + .sreg_periph_ack_bit = BIT(10), + .clkr = { + .enable_reg = 0x42028, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi1_sreg", + .ops = &clk_branch2_sreg_ops, + }, + }, +}; + +static struct clk_regmap *gcc_neo_clocks[] = { + [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, + [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr, + [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr, + [GCC_DDRSS_SPAD_CLK] = &gcc_ddrss_spad_clk.clkr, + [GCC_DDRSS_SPAD_CLK_SRC] = &gcc_ddrss_spad_clk_src.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GPLL0] = &gcc_gpll0.clkr, + [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr, + [GCC_GPLL1] = &gcc_gpll1.clkr, + [GCC_GPLL4] = &gcc_gpll4.clkr, + [GCC_GPLL5] = &gcc_gpll5.clkr, + [GCC_GPLL7] = &gcc_gpll7.clkr, + [GCC_GPLL9] = &gcc_gpll9.clkr, + [GCC_GPLL9_OUT_EVEN] = &gcc_gpll9_out_even.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_IRIS_SS_HF_AXI1_CLK] = &gcc_iris_ss_hf_axi1_clk.clkr, + [GCC_IRIS_SS_HF_AXI1_SREG] = &gcc_iris_ss_hf_axi1_sreg.clkr, + [GCC_IRIS_SS_SPD_AXI1_CLK] = &gcc_iris_ss_spd_axi1_clk.clkr, + [GCC_IRIS_SS_SPD_AXI1_SREG] = &gcc_iris_ss_spd_axi1_sreg.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr, + [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr, + [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr, + [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr, + [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, + [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr, + [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_LSR_AHB_CLK] = &gcc_qmip_video_lsr_ahb_clk.clkr, + [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr, + [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_AXI0_SREG] = &gcc_video_axi0_sreg.clkr, + [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, + [GCC_VIDEO_AXI1_SREG] = &gcc_video_axi1_sreg.clkr, +}; + +static const struct qcom_reset_map gcc_neo_resets[] = { + [GCC_DISPLAY_BCR] = { 0x37000 }, + [GCC_GPU_BCR] = { 0x9b000 }, + [GCC_PCIE_0_BCR] = { 0x7b000 }, + [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x7c014 }, + [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x7c020 }, + [GCC_PCIE_0_PHY_BCR] = { 0x7c01c }, + [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x7c028 }, + [GCC_PCIE_1_BCR] = { 0x9d000 }, + [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e014 }, + [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x9e020 }, + [GCC_PCIE_1_PHY_BCR] = { 0x9e01c }, + [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x9e024 }, + [GCC_PCIE_PHY_BCR] = { 0x7f000 }, + [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c }, + [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 }, + [GCC_PDM_BCR] = { 0x43000 }, + [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 }, + [GCC_QUPV3_WRAPPER_1_BCR] = { 0x2e000 }, + [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 }, + [GCC_SDCC1_BCR] = { 0x26000 }, + [GCC_USB30_PRIM_BCR] = { 0x49000 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 }, + [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 }, + [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 }, + [GCC_USB3_PHY_SEC_BCR] = { 0x6000c }, + [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 }, + [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 }, + [GCC_VIDEO_AXI0_CLK_ARES] = { 0x42018, 2 }, + [GCC_VIDEO_AXI1_CLK_ARES] = { 0x42024, 2 }, + [GCC_VIDEO_BCR] = { 0x42000 }, + [GCC_IRIS_SS_HF_AXI_CLK_ARES] = { 0x42030, 2 }, + [GCC_IRIS_SS_SPD_AXI_CLK_ARES] = { 0x70020, 2 }, + [GCC_DDRSS_SPAD_CLK_ARES] = { 0x70000, 2 }, +}; + + +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src), +}; + +static const struct regmap_config gcc_neo_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1f1030, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_neo_desc = { + .config = &gcc_neo_regmap_config, + .clks = gcc_neo_clocks, + .num_clks = ARRAY_SIZE(gcc_neo_clocks), + .resets = gcc_neo_resets, + .num_resets = ARRAY_SIZE(gcc_neo_resets), + .clk_regulators = gcc_neo_regulators, + .num_clk_regulators = ARRAY_SIZE(gcc_neo_regulators), +}; + +static const struct of_device_id gcc_neo_match_table[] = { + { .compatible = "qcom,neo-gcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_neo_match_table); + +static int gcc_neo_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gcc_neo_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Keep the clocks always-ON + * GCC_DISP_AHB_CLK, GCC_VIDEO_AHB_CLK, GCC_VIDEO_XO_CLK, + * GCC_GPU_CFG_AHB_CLK + */ + regmap_update_bits(regmap, 0x37004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x42004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x42048, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x9b004, BIT(0), BIT(0)); + + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + + ret = qcom_cc_really_probe(pdev, &gcc_neo_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GCC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered GCC clocks\n"); + + return ret; +} + +static void gcc_neo_sync_state(struct device *dev) +{ + qcom_cc_sync_state(dev, &gcc_neo_desc); +} + +static struct platform_driver gcc_neo_driver = { + .probe = gcc_neo_probe, + .driver = { + .name = "gcc-neo", + .of_match_table = gcc_neo_match_table, + .sync_state = gcc_neo_sync_state, + }, +}; + +static int __init gcc_neo_init(void) +{ + return platform_driver_register(&gcc_neo_driver); +} +subsys_initcall(gcc_neo_init); + +static void __exit gcc_neo_exit(void) +{ + platform_driver_unregister(&gcc_neo_driver); +} +module_exit(gcc_neo_exit); + +MODULE_DESCRIPTION("QTI GCC NEO Driver"); +MODULE_LICENSE("GPL"); From a0c89f0f4ad16c4ce563a0e285c3b29774e08442 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 12:44:28 +0530 Subject: [PATCH 053/117] clk: qcom: gpucc-neo: Snapshot of GPUCC driver for NEO Add snapshot of support of Graphics clock controller for clients to be able to request for the clocks on NEO from msm-5.10 branch commit df5992009e3c ("clk: qcom: gpucc-neo: Add support for GPUCC clock driver"). Change-Id: I5dc73e4d29534e9996c3e87b92ee9a684f72ad90 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/gpucc-neo.c | 535 +++++++++++++++++++++++++++++++++++ 3 files changed, 545 insertions(+) create mode 100644 drivers/clk/qcom/gpucc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 7ec24465fc0e..09d5755f5176 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1499,6 +1499,15 @@ config SXR_GCC_NEO NEO devices. Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, UFS, SD/eMMC, PCIE, etc. + +config SXR_GPUCC_NEO + tristate "NEO Graphics Clock Controller" + select SXR_GCC_NEO + help + Support for the graphics clock controller on Qualcomm Technologies, Inc. + NEO devices. + Say Y if you want to support graphics controller devices and + functionality such as 3D graphics. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 6569674d70d8..4cd8309d59e3 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -182,6 +182,7 @@ obj-$(CONFIG_SM_GPUCC_HOLI) += gpucc-holi.o obj-$(CONFIG_SM_GPUCC_PINEAPPLE) += gpucc-pineapple.o obj-$(CONFIG_SM_GPUCC_VOLCANO) += gpucc-volcano.o obj-$(CONFIG_SXR_GPUCC_ANORAK) += gpucc-anorak.o +obj-$(CONFIG_SXR_GPUCC_NEO) += gpucc-neo.o obj-$(CONFIG_SXR_GPUCC_NIOBE) += gpucc-niobe.o obj-$(CONFIG_SM_VIDEOCC_6150) += videocc-sm6150.o obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o diff --git a/drivers/clk/qcom/gpucc-neo.c b/drivers/clk/qcom/gpucc-neo.c new file mode 100644 index 000000000000..1610876d8e5d --- /dev/null +++ b/drivers/clk/qcom/gpucc-neo.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NOMINAL + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mxa, VDD_NOMINAL + 1, 1, vdd_corner); + +static struct clk_vdd_class *gpu_cc_neo_regulators[] = { + &vdd_cx, + &vdd_mxa, +}; + +enum { + P_BI_TCXO, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL0_OUT_MAIN, + P_GPU_CC_PLL1_OUT_MAIN, +}; + +static const struct pll_vco lucid_ole_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 470MHz Configuration */ +static const struct alpha_pll_config gpu_cc_pll0_config = { + .l = 0x18, + .cal_l = 0x44, + .alpha = 0x7AAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll gpu_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +/* 440MHz Configuration */ +static const struct alpha_pll_config gpu_cc_pll1_config = { + .l = 0x16, + .cal_l = 0x44, + .alpha = 0xEAAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "gpll0_out_main" }, + { .fw_name = "gpll0_out_main_div" }, +}; + +static const struct parent_map gpu_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_MAIN, 1 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_1[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpu_cc_pll0.clkr.hw }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .fw_name = "gpll0_out_main" }, + { .fw_name = "gpll0_out_main_div" }, +}; + +static const struct parent_map gpu_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_2[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .fw_name = "gpll0_out_main" }, + { .fw_name = "gpll0_out_main_div" }, +}; + +static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = { + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_ff_clk_src = { + .cmd_rcgr = 0x9474, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_ff_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_ff_clk_src", + .parent_data = gpu_cc_parent_data_0, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0), + F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x9318, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_1, + .freq_tbl = ftbl_gpu_cc_gmu_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gmu_clk_src", + .parent_data = gpu_cc_parent_data_1, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = gpu_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(gpu_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 220000000, + [VDD_LOW] = 550000000}, + }, +}; + +static struct clk_rcg2 gpu_cc_hub_clk_src = { + .cmd_rcgr = 0x93ec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_2, + .freq_tbl = ftbl_gpu_cc_ff_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_hub_clk_src", + .parent_data = gpu_cc_parent_data_2, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 200000000}, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x911c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x911c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x9120, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9120, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_crc_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_ff_clk = { + .halt_reg = 0x914c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x914c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_ff_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_ff_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x913c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x913c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gmu_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_aon_clk = { + .halt_reg = 0x9004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_aon_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x9144, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9144, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_clk", + .flags = CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gmu_clk = { + .halt_reg = 0x90bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90bc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gmu_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hub_aon_clk = { + .halt_reg = 0x93e8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x93e8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_hub_aon_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hub_cx_int_clk = { + .halt_reg = 0x9148, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9148, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_hub_cx_int_clk", + .parent_hws = (const struct clk_hw*[]){ + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_DONT_HOLD_STATE, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_memnoc_gfx_clk = { + .halt_reg = 0x9150, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9150, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = { + .halt_reg = 0x7000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_hlos1_vote_gpu_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_sleep_clk = { + .halt_reg = 0x9134, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9134, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gpu_cc_neo_clocks[] = { + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr, + [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr, + [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr, + [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr, + [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr, + [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr, + [GPU_CC_PLL0] = &gpu_cc_pll0.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, + [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr, +}; + +static const struct regmap_config gpu_cc_neo_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9988, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_neo_desc = { + .config = &gpu_cc_neo_regmap_config, + .clks = gpu_cc_neo_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_neo_clocks), + .clk_regulators = gpu_cc_neo_regulators, + .num_clk_regulators = ARRAY_SIZE(gpu_cc_neo_regulators), +}; + +static const struct of_device_id gpu_cc_neo_match_table[] = { + { .compatible = "qcom,neo-gpucc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_neo_match_table); + +static int gpu_cc_neo_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gpu_cc_neo_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); + clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + + /*Keep the clock always-ON + * gpu_cc_demet_clk + */ + regmap_update_bits(regmap, 0x0900C, BIT(0), BIT(0)); + + ret = qcom_cc_really_probe(pdev, &gpu_cc_neo_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GPU CC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered GPU CC clocks\n"); + + return ret; +} + +static void gpu_cc_neo_sync_state(struct device *dev) +{ + qcom_cc_sync_state(dev, &gpu_cc_neo_desc); +} + +static struct platform_driver gpu_cc_neo_driver = { + .probe = gpu_cc_neo_probe, + .driver = { + .name = "gpu_cc-neo", + .of_match_table = gpu_cc_neo_match_table, + .sync_state = gpu_cc_neo_sync_state, + }, +}; + +static int __init gpu_cc_neo_init(void) +{ + return platform_driver_register(&gpu_cc_neo_driver); +} +subsys_initcall(gpu_cc_neo_init); + +static void __exit gpu_cc_neo_exit(void) +{ + platform_driver_unregister(&gpu_cc_neo_driver); +} +module_exit(gpu_cc_neo_exit); + +MODULE_DESCRIPTION("QTI GPU_CC NEO Driver"); +MODULE_LICENSE("GPL"); From a4c4f7b03f3052a5e0dec58f49096025d2512f6e Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 12:54:23 +0530 Subject: [PATCH 054/117] clk: qcom: camcc-neo: Snapshot of CAMCC driver for NEO Add snapshot of support of camera clock controller for camera clients to be able to request for the clocks on NEO from msm-5.10 branch commit 1265980b9a01 ("clk: qcom: camcc-neo: Add support for CAMCC on NEO"). Change-Id: I409cacf7d7dc6691cdccbc505ba805529111b3c9 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/camcc-neo.c | 2788 ++++++++++++++++++++++++++++++++++ 3 files changed, 2798 insertions(+) create mode 100644 drivers/clk/qcom/camcc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 09d5755f5176..3487c5b78e2d 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1508,6 +1508,15 @@ config SXR_GPUCC_NEO NEO devices. Say Y if you want to support graphics controller devices and functionality such as 3D graphics. + +config SXR_CAMCC_NEO + tristate "NEO Camera Clock Controller" + select SXR_GCC_NEO + help + Support for the camera clock controller on Qualcomm Technologies, Inc. + NEO devices. + Say Y if you want to support camera devices and functionality such as + capturing pictures. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 4cd8309d59e3..82f17001b2f4 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -161,6 +161,7 @@ obj-$(CONFIG_SM_GPUCC_PITTI) += gpucc-pitti.o obj-$(CONFIG_SM_CAMCC_CLIFFS) += camcc-cliffs.o obj-$(CONFIG_SM_CAMCC_PINEAPPLE) += camcc-pineapple.o obj-$(CONFIG_SXR_CAMCC_ANORAK) += camcc-anorak.o +obj-$(CONFIG_SXR_CAMCC_NEO) += camcc-neo.o obj-$(CONFIG_SXR_CAMCC_NIOBE) += camcc-niobe.o obj-$(CONFIG_SM_CAMCC_VOLCANO) += camcc-volcano.o obj-$(CONFIG_SM_GCC_KALAMA) += gcc-kalama.o diff --git a/drivers/clk/qcom/camcc-neo.c b/drivers/clk/qcom/camcc-neo.c new file mode 100644 index 000000000000..44c2b0b4e52f --- /dev/null +++ b/drivers/clk/qcom/camcc-neo.c @@ -0,0 +1,2788 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NOMINAL + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mxa, VDD_LOW + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mxc, VDD_NOMINAL + 1, 1, vdd_corner); + +static struct clk_vdd_class *cam_cc_neo_regulators[] = { + &vdd_mm, + &vdd_mxa, + &vdd_mxc, +}; + +static struct clk_vdd_class *cam_cc_neo_regulators_1[] = { + &vdd_mm, + &vdd_mxc, +}; + +enum { + P_BI_TCXO, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL0_OUT_MAIN, + P_CAM_CC_PLL0_OUT_ODD, + P_CAM_CC_PLL1_OUT_EVEN, + P_CAM_CC_PLL2_OUT_EVEN, + P_CAM_CC_PLL2_OUT_MAIN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CAM_CC_PLL4_OUT_EVEN, + P_CAM_CC_PLL5_OUT_EVEN, + P_CAM_CC_PLL5_OUT_MAIN, + P_CAM_CC_PLL6_OUT_EVEN, + P_CAM_CC_PLL6_OUT_ODD, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_ole_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static const struct pll_vco rivian_ole_vco[] = { + { 777000000, 1285000000, 0 }, +}; + +/* 1200Mhz Configuration */ +static const struct alpha_pll_config cam_cc_pll0_config = { + .l = 0x3E, + .cal_l = 0x44, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00008401, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = { + { 0x2, 3 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { + .offset = 0x0, + .post_div_shift = 14, + .post_div_table = post_div_table_cam_cc_pll0_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0_out_odd", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +/* 728MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll1_config = { + .l = 0x25, + .cal_l = 0x44, + .alpha = 0xEAAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { + .offset = 0x1000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll1_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll1_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll1.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +/* 960MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll2_config = { + .l = 0x32, + .cal_l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x10000030, + .config_ctl_hi_val = 0x80890263, + .config_ctl_hi1_val = 0x00000217, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00100000, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x2000, + .vco_table = rivian_ole_vco, + .num_vco = ARRAY_SIZE(rivian_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_rivian_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOW] = 1285000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll2_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = { + .offset = 0x2000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll2_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll2.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_rivian_evo_ops, + }, +}; + +/* 864MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll3_config = { + .l = 0x2D, + .cal_l = 0x44, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x3000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll3", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { + .offset = 0x3000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll3_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll3_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll3.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +/* 864MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll4_config = { + .l = 0x2D, + .cal_l = 0x44, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll4 = { + .offset = 0x4000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll4", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = { + .offset = 0x4000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll4_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll4_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll4.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +/* 1200MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll5_config = { + .l = 0x3E, + .cal_l = 0x44, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll5 = { + .offset = 0x5000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll5", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = { + .offset = 0x5000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll5_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll5_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll5.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +/* 800Mhz Configuration */ +static const struct alpha_pll_config cam_cc_pll6_config = { + .l = 0x29, + .cal_l = 0x44, + .alpha = 0xAAAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll cam_cc_pll6 = { + .offset = 0x6000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll6", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = { + .offset = 0x6000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll6_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll6_out_even", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_pll6.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ole_ops, + }, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL6_OUT_ODD, 4 }, + { P_CAM_CC_PLL6_OUT_EVEN, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll6.clkr.hw }, + { .hw = &cam_cc_pll6_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL2_OUT_EVEN, 3 }, + { P_CAM_CC_PLL2_OUT_MAIN, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_1[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &cam_cc_pll2_out_even.clkr.hw }, + { .hw = &cam_cc_pll2.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL3_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_2[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &cam_cc_pll3_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL4_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_3[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &cam_cc_pll4_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL5_OUT_MAIN, 5 }, + { P_CAM_CC_PLL5_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_4[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll5.clkr.hw }, + { .hw = &cam_cc_pll5_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL1_OUT_EVEN, 4 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_5[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &cam_cc_pll1_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_6[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_6[] = { + { .fw_name = "sleep_clk" }, +}; + +static const struct parent_map cam_cc_parent_map_7[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_7_ao[] = { + { .fw_name = "bi_tcxo_ao" }, +}; + +static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_bps_clk_src = { + .cmd_rcgr = 0x10278, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 400000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { + .cmd_rcgr = 0x13b24, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_axi_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .cmd_rcgr = 0x13514, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_0_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000}, + }, +}; + +static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .cmd_rcgr = 0x13644, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_1_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000}, + }, +}; + +static struct clk_rcg2 cam_cc_cci_2_clk_src = { + .cmd_rcgr = 0x13774, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_2_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000}, + }, +}; + +static struct clk_rcg2 cam_cc_cci_3_clk_src = { + .cmd_rcgr = 0x138a4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_3_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0x11164, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cphy_rx_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000, + [VDD_LOW] = 480000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x15980, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi0phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x15ab8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi1phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x15bec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi2phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .cmd_rcgr = 0x15d20, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi3phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000}, + }, +}; + +static struct clk_rcg2 cam_cc_csid_clk_src = { + .cmd_rcgr = 0x139ec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000, + [VDD_LOW] = 480000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0x10018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_fast_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW] = 200000000, + [VDD_NOMINAL] = 400000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0x133d8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000, + [VDD_LOW] = 480000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(432000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(727000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .cmd_rcgr = 0x11018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_clk_src", + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 432000000, + [VDD_LOW] = 594000000, + [VDD_NOMINAL] = 727000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(727000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .cmd_rcgr = 0x12018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_ife_1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_clk_src", + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 432000000, + [VDD_LOW] = 594000000, + [VDD_NOMINAL] = 727000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_CAM_CC_PLL5_OUT_MAIN, 16, 0, 0), + F(100000000, P_CAM_CC_PLL5_OUT_MAIN, 12, 0, 0), + F(150000000, P_CAM_CC_PLL5_OUT_MAIN, 8, 0, 0), + F(200000000, P_CAM_CC_PLL5_OUT_MAIN, 6, 0, 0), + F(300000000, P_CAM_CC_PLL5_OUT_MAIN, 4, 0, 0), + F(400000000, P_CAM_CC_PLL5_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL5_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_lite_clk_src = { + .cmd_rcgr = 0x13000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_4, + .freq_tbl = ftbl_cam_cc_ife_lite_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_clk_src", + .parent_data = cam_cc_parent_data_4, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000, + [VDD_LOW] = 480000000}, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { + .cmd_rcgr = 0x1313c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 400000000, + [VDD_LOW] = 480000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(364000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(500000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ipe_nps_clk_src = { + .cmd_rcgr = 0x103cc, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_5, + .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_nps_clk_src", + .parent_data = cam_cc_parent_data_5, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 364000000, + [VDD_LOW] = 500000000, + [VDD_NOMINAL] = 700000000}, + }, +}; + +static struct clk_rcg2 cam_cc_jpeg_clk_src = { + .cmd_rcgr = 0x1327c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 400000000, + [VDD_NOMINAL] = 600000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 5), + F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4), + F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x15000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk0_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x15130, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk1_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x15260, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk2_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x15390, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk3_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk4_clk_src = { + .cmd_rcgr = 0x154c0, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk4_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk5_clk_src = { + .cmd_rcgr = 0x155f0, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk5_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk6_clk_src = { + .cmd_rcgr = 0x15720, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk6_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static struct clk_rcg2 cam_cc_mclk7_clk_src = { + .cmd_rcgr = 0x15850, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk7_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mxa, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 68571429}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_qdss_debug_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_qdss_debug_clk_src = { + .cmd_rcgr = 0x13c6c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_qdss_debug_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_qdss_debug_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 150000000, + [VDD_NOMINAL] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_sleep_clk_src = { + .cmd_rcgr = 0x14148, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_6, + .freq_tbl = ftbl_cam_cc_sleep_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_sleep_clk_src", + .parent_data = cam_cc_parent_data_6, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_6), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 32000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0x10148, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_slow_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = cam_cc_neo_regulators_1, + .num_vdd_classes = ARRAY_SIZE(cam_cc_neo_regulators_1), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 80000000}, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_xo_clk_src = { + .cmd_rcgr = 0x14018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_7, + .freq_tbl = ftbl_cam_cc_xo_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_xo_clk_src", + .parent_data = cam_cc_parent_data_7_ao, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_7_ao), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch cam_cc_bps_ahb_clk = { + .halt_reg = 0x10274, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10274, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_clk = { + .halt_reg = 0x103a4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x103a4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_bps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_fast_ahb_clk = { + .halt_reg = 0x10144, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10144, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_ahb_clk = { + .halt_reg = 0x13c5c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13c5c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_clk = { + .halt_reg = 0x13c50, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13c50, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_dcd_xo_clk = { + .halt_reg = 0x13c60, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13c60, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_dcd_xo_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_xo_clk = { + .halt_reg = 0x13c64, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13c64, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_xo_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_0_clk = { + .halt_reg = 0x13640, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13640, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cci_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_1_clk = { + .halt_reg = 0x13770, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13770, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cci_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_2_clk = { + .halt_reg = 0x138a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x138a0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_2_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cci_2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_3_clk = { + .halt_reg = 0x139d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x139d0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_3_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cci_3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_core_ahb_clk = { + .halt_reg = 0x14014, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x14014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_core_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0x139d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x139d4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_bps_clk = { + .halt_reg = 0x103b0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x103b0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_bps_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_bps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_fast_ahb_clk = { + .halt_reg = 0x139e0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x139e0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_0_clk = { + .halt_reg = 0x11150, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11150, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ife_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_1_clk = { + .halt_reg = 0x12150, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12150, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ife_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ife_lite_clk = { + .halt_reg = 0x13138, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13138, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ife_lite_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_lite_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ipe_nps_clk = { + .halt_reg = 0x10504, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10504, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ipe_nps_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ipe_nps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x15aac, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15aac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi0phytimer_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_csi0phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x15be4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15be4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi1phytimer_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_csi1phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x15d18, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15d18, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi2phytimer_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_csi2phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi3phytimer_clk = { + .halt_reg = 0x15e4c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15e4c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi3phytimer_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_csi3phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csid_clk = { + .halt_reg = 0x13b18, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13b18, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csid_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csid_csiphy_rx_clk = { + .halt_reg = 0x15ab4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15ab4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csid_csiphy_rx_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x15ab0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15ab0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy0_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x15be8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15be8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy1_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x15d1c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15d1c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy2_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy3_clk = { + .halt_reg = 0x15e50, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15e50, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy3_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_drv_ahb_clk = { + .halt_reg = 0x14280, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14280, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_drv_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_drv_xo_clk = { + .halt_reg = 0x1427c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1427c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_drv_xo_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ahb_clk = { + .halt_reg = 0x13510, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13510, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0x13504, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13504, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_icp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_clk = { + .halt_reg = 0x11144, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11144, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_dsp_clk = { + .halt_reg = 0x11154, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_dsp_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_fast_ahb_clk = { + .halt_reg = 0x11160, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11160, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_clk = { + .halt_reg = 0x12144, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12144, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_dsp_clk = { + .halt_reg = 0x12154, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_dsp_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_fast_ahb_clk = { + .halt_reg = 0x12160, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12160, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_ahb_clk = { + .halt_reg = 0x13278, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13278, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_clk = { + .halt_reg = 0x1312c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1312c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_lite_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = { + .halt_reg = 0x13274, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13274, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_csid_clk = { + .halt_reg = 0x13268, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13268, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_csid_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ife_lite_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_nps_ahb_clk = { + .halt_reg = 0x1051c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1051c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_nps_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_nps_clk = { + .halt_reg = 0x104f8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x104f8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_nps_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ipe_nps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = { + .halt_reg = 0x10520, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10520, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_nps_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_pps_clk = { + .halt_reg = 0x10508, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10508, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_pps_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_ipe_nps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = { + .halt_reg = 0x10524, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10524, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_pps_fast_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_1_clk = { + .halt_reg = 0x133b4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x133b4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_jpeg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_2_clk = { + .halt_reg = 0x133c0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x133c0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_2_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_jpeg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_clk = { + .halt_reg = 0x133a8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x133a8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_jpeg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x1512c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1512c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk0_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x1525c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1525c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk1_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x1538c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1538c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk2_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x154bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x154bc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk3_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk4_clk = { + .halt_reg = 0x155ec, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x155ec, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk4_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk5_clk = { + .halt_reg = 0x1571c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1571c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk5_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk6_clk = { + .halt_reg = 0x1584c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1584c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk6_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk7_clk = { + .halt_reg = 0x1597c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1597c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk7_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_mclk7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_qdss_debug_clk = { + .halt_reg = 0x13d98, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13d98, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_qdss_debug_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_qdss_debug_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_qdss_debug_xo_clk = { + .halt_reg = 0x13d9c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13d9c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_qdss_debug_xo_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sleep_clk = { + .halt_reg = 0x14274, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14274, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_sleep_clk", + .parent_hws = (const struct clk_hw*[]){ + &cam_cc_sleep_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *cam_cc_neo_clocks[] = { + [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr, + [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr, + [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr, + [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr, + [CAM_CC_CAMNOC_AHB_CLK] = &cam_cc_camnoc_ahb_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr, + [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr, + [CAM_CC_CAMNOC_XO_CLK] = &cam_cc_camnoc_xo_clk.clkr, + [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr, + [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr, + [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr, + [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr, + [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr, + [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr, + [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr, + [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr, + [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr, + [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr, + [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr, + [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr, + [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr, + [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr, + [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr, + [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr, + [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr, + [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr, + [CAM_CC_DRV_AHB_CLK] = &cam_cc_drv_ahb_clk.clkr, + [CAM_CC_DRV_XO_CLK] = &cam_cc_drv_xo_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr, + [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr, + [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr, + [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr, + [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr, + [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr, + [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr, + [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr, + [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr, + [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr, + [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr, + [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr, + [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr, + [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr, + [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr, + [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr, + [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr, + [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr, + [CAM_CC_JPEG_1_CLK] = &cam_cc_jpeg_1_clk.clkr, + [CAM_CC_JPEG_2_CLK] = &cam_cc_jpeg_2_clk.clkr, + [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr, + [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr, + [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr, + [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr, + [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr, + [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr, + [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr, + [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr, + [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr, + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr, + [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr, + [CAM_CC_PLL1] = &cam_cc_pll1.clkr, + [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr, + [CAM_CC_PLL4] = &cam_cc_pll4.clkr, + [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr, + [CAM_CC_PLL5] = &cam_cc_pll5.clkr, + [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr, + [CAM_CC_PLL6] = &cam_cc_pll6.clkr, + [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr, + [CAM_CC_QDSS_DEBUG_CLK] = &cam_cc_qdss_debug_clk.clkr, + [CAM_CC_QDSS_DEBUG_CLK_SRC] = &cam_cc_qdss_debug_clk_src.clkr, + [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr, + [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr, + [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, + [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr, +}; + +static const struct qcom_reset_map cam_cc_neo_resets[] = { + [CAM_CC_BPS_BCR] = { 0x10000 }, + [CAM_CC_DRV_BCR] = { 0x14278 }, + [CAM_CC_ICP_BCR] = { 0x133d4 }, + [CAM_CC_IFE_0_BCR] = { 0x11000 }, + [CAM_CC_IFE_1_BCR] = { 0x12000 }, + [CAM_CC_IPE_0_BCR] = { 0x103b4 }, + [CAM_CC_QDSS_DEBUG_BCR] = { 0x13c68 }, +}; + +static const struct regmap_config cam_cc_neo_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1603c, + .fast_io = true, +}; + +static struct qcom_cc_desc cam_cc_neo_desc = { + .config = &cam_cc_neo_regmap_config, + .clks = cam_cc_neo_clocks, + .num_clks = ARRAY_SIZE(cam_cc_neo_clocks), + .resets = cam_cc_neo_resets, + .num_resets = ARRAY_SIZE(cam_cc_neo_resets), + .clk_regulators = cam_cc_neo_regulators, + .num_clk_regulators = ARRAY_SIZE(cam_cc_neo_regulators), +}; + +static const struct of_device_id cam_cc_neo_match_table[] = { + { .compatible = "qcom,neo-camcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_neo_match_table); + +static int cam_cc_neo_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &cam_cc_neo_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_runtime_init(pdev, &cam_cc_neo_desc); + if (ret) + return ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret) + return ret; + + clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); + clk_rivian_ole_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); + clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); + clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config); + + /* + * Keep clocks always enabled: + * cam_cc_gdsc_clk + */ + regmap_update_bits(regmap, 0x14144, BIT(0), BIT(0)); + + ret = qcom_cc_really_probe(pdev, &cam_cc_neo_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register CAM CC clocks\n"); + return ret; + } + + pm_runtime_put_sync(&pdev->dev); + dev_info(&pdev->dev, "Registered CAM CC clocks\n"); + + return ret; +} + +static void cam_cc_neo_sync_state(struct device *dev) +{ + qcom_cc_sync_state(dev, &cam_cc_neo_desc); +} + +static const struct dev_pm_ops cam_cc_neo_pm_ops = { + SET_RUNTIME_PM_OPS(qcom_cc_runtime_suspend, qcom_cc_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static struct platform_driver cam_cc_neo_driver = { + .probe = cam_cc_neo_probe, + .driver = { + .name = "cam_cc-neo", + .of_match_table = cam_cc_neo_match_table, + .sync_state = cam_cc_neo_sync_state, + .pm = &cam_cc_neo_pm_ops, + }, +}; + +static int __init cam_cc_neo_init(void) +{ + return platform_driver_register(&cam_cc_neo_driver); +} +subsys_initcall(cam_cc_neo_init); + +static void __exit cam_cc_neo_exit(void) +{ + platform_driver_unregister(&cam_cc_neo_driver); +} +module_exit(cam_cc_neo_exit); + +MODULE_DESCRIPTION("QTI CAM_CC NEO Driver"); +MODULE_LICENSE("GPL"); From 7e44e7aedc77a8fd6229071f4b1b28769dc02323 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 14:13:39 +0530 Subject: [PATCH 055/117] clk: qcom: dispcc-neo: Snapshot of DISPCC driver for NEO Add snapshot of support for display clock controller, for display clients to be able to request for clocks on NEO platform from msm-5.10 branch commit eaf8cad86891 ("clk: qcom: dispcc-neo: Add support for DISPCC on NEO"). Change-Id: Id4652573afb68ea2b9727fc6c430efec775721ac Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/dispcc-neo.c | 1971 +++++++++++++++++++++++++++++++++ 3 files changed, 1981 insertions(+) create mode 100644 drivers/clk/qcom/dispcc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 3487c5b78e2d..92a266499ee1 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1517,6 +1517,15 @@ config SXR_CAMCC_NEO NEO devices. Say Y if you want to support camera devices and functionality such as capturing pictures. + +config SXR_DISPCC_NEO + tristate "NEO Display Clock Controller" + select SXR_GCC_NEO + help + Support for the display clock controller on Qualcomm Technologies, Inc + NEO devices. + Say Y if you want to support display devices and functionality such as + splash screen. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 82f17001b2f4..4f0302db6d60 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -127,6 +127,7 @@ obj-$(CONFIG_SM_DISPCC_HOLI) += dispcc-holi.o obj-$(CONFIG_SM_DISPCC_PINEAPPLE) += dispcc-pineapple.o obj-$(CONFIG_SM_DISPCC_PITTI) += dispcc-pitti.o obj-$(CONFIG_SXR_DISPCC_ANORAK) += dispcc0-anorak.o dispcc1-anorak.o +obj-$(CONFIG_SXR_DISPCC_NEO) += dispcc-neo.o obj-$(CONFIG_SXR_DISPCC_NIOBE) += dispcc0-niobe.o dispcc1-niobe.o obj-$(CONFIG_SM_DISPCC_VOLCANO) += dispcc-volcano.o obj-$(CONFIG_SM_DEBUGCC_BLAIR) += debugcc-blair.o diff --git a/drivers/clk/qcom/dispcc-neo.c b/drivers/clk/qcom/dispcc-neo.c new file mode 100644 index 000000000000..3dacb83ae30a --- /dev/null +++ b/drivers/clk/qcom/dispcc-neo.c @@ -0,0 +1,1971 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap-divider.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +#define DISP_CC_MISC_CMD 0xF000 + +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NOMINAL + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mxa, VDD_NOMINAL + 1, 1, vdd_corner); + +static struct clk_vdd_class *disp_cc_neo_regulators[] = { + &vdd_mm, + &vdd_mxa, +}; + +enum { + P_BI_TCXO, + P_DISP_CC_PLL0_OUT_MAIN, + P_DISP_CC_PLL1_OUT_EVEN, + P_DISP_CC_PLL1_OUT_MAIN, + P_DP0_PHY_PLL_LINK_CLK, + P_DP0_PHY_PLL_VCO_DIV_CLK, + P_DP1_PHY_PLL_LINK_CLK, + P_DP1_PHY_PLL_VCO_DIV_CLK, + P_DP2_PHY_PLL_LINK_CLK, + P_DP2_PHY_PLL_VCO_DIV_CLK, + P_DP3_PHY_PLL_LINK_CLK, + P_DP3_PHY_PLL_VCO_DIV_CLK, + P_DSI0_PHY_PLL_OUT_BYTECLK, + P_DSI0_PHY_PLL_OUT_DSICLK, + P_DSI1_PHY_PLL_OUT_BYTECLK, + P_DSI1_PHY_PLL_OUT_DSICLK, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_ole_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 600MHz Configuration */ +static const struct alpha_pll_config disp_cc_pll0_config = { + .l = 0x1F, + .cal_l = 0x44, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll disp_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_pll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +/* 600MHz Configuration */ +static const struct alpha_pll_config disp_cc_pll1_config = { + .l = 0x1F, + .cal_l = 0x44, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll disp_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_pll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct parent_map disp_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, +}; + +static const struct clk_parent_data disp_cc_parent_data_0_ao[] = { + { .fw_name = "bi_tcxo_ao" }, +}; + +static const struct parent_map disp_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, + { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, + { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_1[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "dp3_phy_pll_vco_div_clk", .name = "dp3_phy_pll_vco_div_clk" }, + { .fw_name = "dp1_phy_pll_vco_div_clk", .name = "dp1_phy_pll_vco_div_clk" }, + { .fw_name = "dp2_phy_pll_vco_div_clk", .name = "dp2_phy_pll_vco_div_clk" }, +}; + +static const struct parent_map disp_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_2[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "dsi0_phy_pll_out_dsiclk", .name = "dsi0_phy_pll_out_dsiclk" }, + { .fw_name = "dsi0_phy_pll_out_byteclk", .name = "dsi0_phy_pll_out_byteclk" }, + { .fw_name = "dsi1_phy_pll_out_dsiclk", .name = "dsi1_phy_pll_out_dsiclk" }, + { .fw_name = "dsi1_phy_pll_out_byteclk", .name = "dsi1_phy_pll_out_byteclk" }, +}; + +static const struct parent_map disp_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_DP1_PHY_PLL_LINK_CLK, 2 }, + { P_DP2_PHY_PLL_LINK_CLK, 3 }, + { P_DP3_PHY_PLL_LINK_CLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_3[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "dp1_phy_pll_link_clk", .name = "dp1_phy_pll_link_clk" }, + { .fw_name = "dp2_phy_pll_link_clk", .name = "dp2_phy_pll_link_clk" }, + { .fw_name = "dp3_phy_pll_link_clk", .name = "dp3_phy_pll_link_clk" }, +}; + +static const struct parent_map disp_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, + { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, + { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, + { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_4[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "dp0_phy_pll_link_clk", .name = "dp0_phy_pll_link_clk" }, + { .fw_name = "dp0_phy_pll_vco_div_clk", .name = "dp0_phy_pll_vco_div_clk" }, + { .fw_name = "dp3_phy_pll_vco_div_clk", .name = "dp3_phy_pll_vco_div_clk" }, + { .fw_name = "dp1_phy_pll_vco_div_clk", .name = "dp1_phy_pll_vco_div_clk" }, + { .fw_name = "dp2_phy_pll_vco_div_clk", .name = "dp2_phy_pll_vco_div_clk" }, +}; + +static const struct parent_map disp_cc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_5[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "dsi0_phy_pll_out_byteclk", .name = "dsi0_phy_pll_out_byteclk" }, + { .fw_name = "dsi1_phy_pll_out_byteclk", .name = "dsi1_phy_pll_out_byteclk" }, +}; + +static const struct parent_map disp_cc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_6[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &disp_cc_pll1.clkr.hw }, + { .hw = &disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_DP0_PHY_PLL_LINK_CLK, 1 }, + { P_DP1_PHY_PLL_LINK_CLK, 2 }, + { P_DP2_PHY_PLL_LINK_CLK, 3 }, + { P_DP3_PHY_PLL_LINK_CLK, 4 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_7[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "dp0_phy_pll_link_clk", .name = "dp0_phy_pll_link_clk" }, + { .fw_name = "dp1_phy_pll_link_clk", .name = "dp1_phy_pll_link_clk" }, + { .fw_name = "dp2_phy_pll_link_clk", .name = "dp2_phy_pll_link_clk" }, + { .fw_name = "dp3_phy_pll_link_clk", .name = "dp3_phy_pll_link_clk" }, +}; + +static const struct parent_map disp_cc_parent_map_8[] = { + { P_BI_TCXO, 0 }, + { P_DISP_CC_PLL0_OUT_MAIN, 1 }, + { P_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_8[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &disp_cc_pll0.clkr.hw }, + { .hw = &disp_cc_pll1.clkr.hw }, + { .hw = &disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_parent_map_9[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_9[] = { + { .fw_name = "sleep_clk" }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), + F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { + .cmd_rcgr = 0x82e8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_6, + .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_ahb_clk_src", + .parent_data = disp_cc_parent_data_6, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_6), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 37500000, + [VDD_NOMINAL] = 75000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { + .cmd_rcgr = 0x8108, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte0_clk_src", + .parent_data = disp_cc_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 187500000, + [VDD_LOW] = 300000000, + [VDD_NOMINAL] = 358000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = { + .cmd_rcgr = 0x8124, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte1_clk_src", + .parent_data = disp_cc_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 187500000, + [VDD_LOW] = 300000000, + [VDD_NOMINAL] = 358000000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { + .cmd_rcgr = 0x81bc, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_aux_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = { + F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), + F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { + .cmd_rcgr = 0x8170, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_7, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_link_clk_src", + .parent_data = disp_cc_parent_data_7, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 270000, + [VDD_NOMINAL] = 810000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = { + .cmd_rcgr = 0x818c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_pixel0_clk_src", + .parent_data = disp_cc_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = { + .cmd_rcgr = 0x81a4, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_pixel1_clk_src", + .parent_data = disp_cc_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { + .cmd_rcgr = 0x8220, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_aux_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { + .cmd_rcgr = 0x8204, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_3, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_link_clk_src", + .parent_data = disp_cc_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 270000, + [VDD_NOMINAL] = 810000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = { + .cmd_rcgr = 0x81d4, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_pixel0_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = { + .cmd_rcgr = 0x81ec, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_pixel1_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = { + .cmd_rcgr = 0x8284, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_aux_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { + .cmd_rcgr = 0x8238, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_3, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_link_clk_src", + .parent_data = disp_cc_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 270000, + [VDD_NOMINAL] = 810000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = { + .cmd_rcgr = 0x8254, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_pixel0_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = { + .cmd_rcgr = 0x826c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_pixel1_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = { + .cmd_rcgr = 0x82d0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_aux_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { + .cmd_rcgr = 0x82b4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_3, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_link_clk_src", + .parent_data = disp_cc_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 270000, + [VDD_LOW] = 594000, + [VDD_NOMINAL] = 810000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = { + .cmd_rcgr = 0x829c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_pixel0_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_dp_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 337500, + [VDD_NOMINAL] = 675000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { + .cmd_rcgr = 0x8140, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_5, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_esc0_clk_src", + .parent_data = disp_cc_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { + .cmd_rcgr = 0x8158, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_5, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_esc1_clk_src", + .parent_data = disp_cc_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { + F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { + .cmd_rcgr = 0x80d8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_8, + .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_clk_src", + .parent_data = disp_cc_parent_data_8, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_8), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 325000000, + [VDD_NOMINAL] = 514000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { + .cmd_rcgr = 0x80a8, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_pclk0_clk_src", + .parent_data = disp_cc_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 480000000, + [VDD_NOMINAL] = 625000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = { + .cmd_rcgr = 0x80c0, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_pclk1_clk_src", + .parent_data = disp_cc_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, + .clkr.vdd_data = { + .vdd_classes = disp_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(disp_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 480000000, + [VDD_NOMINAL] = 625000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { + .cmd_rcgr = 0x80f0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_vsync_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_sleep_clk_src = { + .cmd_rcgr = 0xe05c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_9, + .freq_tbl = ftbl_disp_cc_sleep_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_sleep_clk_src", + .parent_data = disp_cc_parent_data_9, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_9), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 32000}, + }, +}; + +static struct clk_rcg2 disp_cc_xo_clk_src = { + .cmd_rcgr = 0xe03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_xo_clk_src", + .parent_data = disp_cc_parent_data_0_ao, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { + .reg = 0x8120, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_byte0_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = { + .reg = 0x813c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_byte1_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = { + .reg = 0x8188, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_dptx0_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = { + .reg = 0x821c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_dptx1_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = { + .reg = 0x8250, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_dptx2_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = { + .reg = 0x82cc, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_dptx3_link_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch disp_cc_mdss_accu_clk = { + .halt_reg = 0xe058, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xe058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_accu_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_ahb1_clk = { + .halt_reg = 0xa020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_ahb1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_DONT_HOLD_STATE | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_ahb_clk = { + .halt_reg = 0x80a4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x80a4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_DONT_HOLD_STATE | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte0_clk = { + .halt_reg = 0x8028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte0_intf_clk = { + .halt_reg = 0x802c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x802c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte0_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_byte0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte1_clk = { + .halt_reg = 0x8030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte1_intf_clk = { + .halt_reg = 0x8034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte1_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_byte1_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_aux_clk = { + .halt_reg = 0x8058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = { + .halt_reg = 0x804c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x804c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_crypto_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_link_clk = { + .halt_reg = 0x8040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_link_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = { + .halt_reg = 0x8048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_link_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = { + .halt_reg = 0x8050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_pixel0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = { + .halt_reg = 0x8054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_pixel1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = { + .halt_reg = 0x8044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_aux_clk = { + .halt_reg = 0x8074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8074, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = { + .halt_reg = 0x8070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8070, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_crypto_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_link_clk = { + .halt_reg = 0x8064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_link_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = { + .halt_reg = 0x806c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x806c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_link_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = { + .halt_reg = 0x805c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x805c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_pixel0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = { + .halt_reg = 0x8060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8060, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_pixel1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = { + .halt_reg = 0x8068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8068, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx2_aux_clk = { + .halt_reg = 0x808c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x808c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = { + .halt_reg = 0x8088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_crypto_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx2_link_clk = { + .halt_reg = 0x8080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_link_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = { + .halt_reg = 0x8084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_link_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = { + .halt_reg = 0x8078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_pixel0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = { + .halt_reg = 0x807c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x807c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx2_pixel1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx3_aux_clk = { + .halt_reg = 0x809c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x809c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = { + .halt_reg = 0x80a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x80a0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_crypto_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx3_link_clk = { + .halt_reg = 0x8094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8094, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_link_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = { + .halt_reg = 0x8098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_link_intf_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = { + .halt_reg = 0x8090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_dptx3_pixel0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_esc0_clk = { + .halt_reg = 0x8038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_esc0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_esc0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_esc1_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_esc1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_esc1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp1_clk = { + .halt_reg = 0xa004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_clk = { + .halt_reg = 0x800c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x800c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_lut1_clk = { + .halt_reg = 0xa010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_lut1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_lut_clk = { + .halt_reg = 0x8018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_lut_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { + .halt_reg = 0xc004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xc004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_non_gdsc_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_pclk0_clk = { + .halt_reg = 0x8004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_pclk0_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_pclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_pclk1_clk = { + .halt_reg = 0x8008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_pclk1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_pclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { + .halt_reg = 0xc00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_rscc_ahb_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { + .halt_reg = 0xc008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_rscc_vsync_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_vsync1_clk = { + .halt_reg = 0xa01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_vsync1_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_vsync_clk = { + .halt_reg = 0x8024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_vsync_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_sleep_clk = { + .halt_reg = 0xe074, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xe074, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_sleep_clk", + .parent_hws = (const struct clk_hw*[]){ + &disp_cc_sleep_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *disp_cc_neo_clocks[] = { + [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr, + [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr, + [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr, + [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr, + [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr, + [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr, + [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr, + [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr, + [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr, + [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr, + [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr, + [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr, + [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr, + [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr, + [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr, + [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr, + [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr, + [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr, + [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr, + [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr, + [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = + &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, + [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr, + [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr, + [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr, + [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr, + [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr, + [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr, + [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr, + [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr, + [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr, + [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr, + [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr, + [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = + &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, + [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr, + [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr, + [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr, + [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr, + [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr, + [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr, + [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr, + [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr, + [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr, + [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr, + [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr, + [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr, + [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr, + [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr, + [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr, + [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr, + [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr, + [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr, + [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr, + [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr, + [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr, + [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr, + [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr, + [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr, + [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr, + [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr, + [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr, + [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr, + [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr, + [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr, + [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr, + [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, + [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr, + [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr, + [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, + [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, + [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr, + [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, + [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, + [DISP_CC_PLL0] = &disp_cc_pll0.clkr, + [DISP_CC_PLL1] = &disp_cc_pll1.clkr, + [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr, + [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr, + [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr, +}; + +static const struct qcom_reset_map disp_cc_neo_resets[] = { + [DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, + [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 }, + [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 }, +}; + +static const struct regmap_config disp_cc_neo_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x11008, + .fast_io = true, +}; + +static struct qcom_cc_desc disp_cc_neo_desc = { + .config = &disp_cc_neo_regmap_config, + .clks = disp_cc_neo_clocks, + .num_clks = ARRAY_SIZE(disp_cc_neo_clocks), + .resets = disp_cc_neo_resets, + .num_resets = ARRAY_SIZE(disp_cc_neo_resets), + .clk_regulators = disp_cc_neo_regulators, + .num_clk_regulators = ARRAY_SIZE(disp_cc_neo_regulators), +}; + +static const struct of_device_id disp_cc_neo_match_table[] = { + { .compatible = "qcom,neo-dispcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, disp_cc_neo_match_table); + +static int disp_cc_neo_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &disp_cc_neo_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_runtime_init(pdev, &disp_cc_neo_desc); + if (ret) + return ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret) + return ret; + + clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + + /* Enable clock gating for MDP clocks */ + regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10); + + /* + * Keep clocks always enabled: + * disp_cc_xo_clk + */ + regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0)); + + ret = qcom_cc_really_probe(pdev, &disp_cc_neo_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register DISP CC clocks\n"); + return ret; + } + + pm_runtime_put_sync(&pdev->dev); + dev_info(&pdev->dev, "Registered DISP CC clocks\n"); + + return ret; +} + +static void disp_cc_neo_sync_state(struct device *dev) +{ + qcom_cc_sync_state(dev, &disp_cc_neo_desc); +} + +static const struct dev_pm_ops disp_cc_neo_pm_ops = { + SET_RUNTIME_PM_OPS(qcom_cc_runtime_suspend, qcom_cc_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static struct platform_driver disp_cc_neo_driver = { + .probe = disp_cc_neo_probe, + .driver = { + .name = "disp_cc-neo", + .of_match_table = disp_cc_neo_match_table, + .sync_state = disp_cc_neo_sync_state, + .pm = &disp_cc_neo_pm_ops, + }, +}; + +static int __init disp_cc_neo_init(void) +{ + return platform_driver_register(&disp_cc_neo_driver); +} +subsys_initcall(disp_cc_neo_init); + +static void __exit disp_cc_neo_exit(void) +{ + platform_driver_unregister(&disp_cc_neo_driver); +} +module_exit(disp_cc_neo_exit); + +MODULE_DESCRIPTION("QTI DISP_CC NEO Driver"); +MODULE_LICENSE("GPL"); From fc3f1111a79f23a3f4934d7f306c5f9531c9e587 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 14:21:58 +0530 Subject: [PATCH 056/117] clk: qcom: videocc-neo: Snapshot of VIDEOCC driver for NEO Add snapshot of support of Video Clock Controller for video clients to be able to request for the clocks on NEO from msm-5.10 branch commit 088fac3b0656 ("clk: qcom: videocc-neo: Add support for VIDEOCC on NEO"). Change-Id: Ic24b7f1a47b32fa8ad3d94cf62e006dc52ef57aa Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/videocc-neo.c | 522 +++++++++++++++++++++++++++++++++ 3 files changed, 532 insertions(+) create mode 100644 drivers/clk/qcom/videocc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 92a266499ee1..542388821660 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1526,6 +1526,15 @@ config SXR_DISPCC_NEO NEO devices. Say Y if you want to support display devices and functionality such as splash screen. + +config SXR_VIDEOCC_NEO + tristate "NEO Video Clock Controller" + select SXR_GCC_NEO + help + Support for the video clock controller on Qualcomm Technologies, Inc. + NEO devices. + Say Y if you want to support video devices and functionality such as + video encode/decode. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 4f0302db6d60..4251854ebd15 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -192,6 +192,7 @@ obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o obj-$(CONFIG_SM_VIDEOCC_PINEAPPLE) += videocc-pineapple.o obj-$(CONFIG_SM_VIDEOCC_VOLCANO) += videocc-volcano.o obj-$(CONFIG_SXR_VIDEOCC_ANORAK) += videocc-anorak.o +obj-$(CONFIG_SXR_VIDEOCC_NEO) += videocc-neo.o obj-$(CONFIG_SXR_VIDEOCC_NIOBE) += videocc-niobe.o obj-$(CONFIG_SM_TCSRCC_PINEAPPLE) += tcsrcc-pineapple.o obj-$(CONFIG_SXR_TCSRCC_NIOBE) += tcsrcc-niobe.o diff --git a/drivers/clk/qcom/videocc-neo.c b/drivers/clk/qcom/videocc-neo.c new file mode 100644 index 000000000000..aa520fe151b5 --- /dev/null +++ b/drivers/clk/qcom/videocc-neo.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap-divider.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NOMINAL + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mxc, VDD_NOMINAL + 1, 1, vdd_corner); + +static struct clk_vdd_class *video_cc_neo_regulators[] = { + &vdd_mm, + &vdd_mxc, +}; + +enum { + P_BI_TCXO, + P_SLEEP_CLK, + P_VIDEO_CC_PLL0_OUT_MAIN, + P_VIDEO_CC_PLL1_OUT_MAIN, +}; + +static const struct pll_vco lucid_ole_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static const struct alpha_pll_config video_cc_pll0_config = { + .l = 0x25, + .cal_l = 0x44, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll video_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "video_cc_pll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct alpha_pll_config video_cc_pll1_config = { + .l = 0x36, + .cal_l = 0x44, + .alpha = 0xB000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x82AA299C, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000003, + .test_ctl_hi1_val = 0x00009000, + .test_ctl_hi2_val = 0x00000034, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000005, +}; + +static struct clk_alpha_pll video_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_ole_vco, + .num_vco = ARRAY_SIZE(lucid_ole_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "video_cc_pll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ole_ops, + }, + .vdd_data = { + .vdd_class = &vdd_mxc, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER_D1] = 615000000, + [VDD_LOW] = 1100000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct parent_map video_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_VIDEO_CC_PLL0_OUT_MAIN, 1 }, +}; + +static const struct clk_parent_data video_cc_parent_data_1[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &video_cc_pll0.clkr.hw }, +}; + +static const struct parent_map video_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_VIDEO_CC_PLL1_OUT_MAIN, 1 }, +}; + +static const struct clk_parent_data video_cc_parent_data_2[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &video_cc_pll1.clkr.hw }, +}; + +static const struct parent_map video_cc_parent_map_3[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data video_cc_parent_data_3[] = { + { .fw_name = "sleep_clk" }, +}; + +static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = { + F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_mvs0_clk_src = { + .cmd_rcgr = 0x8000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_1, + .freq_tbl = ftbl_video_cc_mvs0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "video_cc_mvs0_clk_src", + .parent_data = video_cc_parent_data_1, + .num_parents = ARRAY_SIZE(video_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = video_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(video_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 720000000, + [VDD_LOW] = 1014000000, + [VDD_NOMINAL] = 1332000000}, + }, +}; + +static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = { + F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_mvs1_clk_src = { + .cmd_rcgr = 0x8018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_2, + .freq_tbl = ftbl_video_cc_mvs1_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "video_cc_mvs1_clk_src", + .parent_data = video_cc_parent_data_2, + .num_parents = ARRAY_SIZE(video_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_classes = video_cc_neo_regulators, + .num_vdd_classes = ARRAY_SIZE(video_cc_neo_regulators), + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 1050000000, + [VDD_LOW] = 1350000000, + [VDD_NOMINAL] = 1650000000}, + }, +}; + +static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_sleep_clk_src = { + .cmd_rcgr = 0x8128, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_3, + .freq_tbl = ftbl_video_cc_sleep_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "video_cc_sleep_clk_src", + .parent_data = video_cc_parent_data_3, + .num_parents = ARRAY_SIZE(video_cc_parent_data_3), + .ops = &clk_rcg2_ops, + }, + .clkr.vdd_data = { + .vdd_class = &vdd_mm, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 32000}, + }, +}; + +static struct clk_regmap_div video_cc_mvs0_div_clk_src = { + .reg = 0x80c4, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "video_cc_mvs0_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = { + .reg = 0x8070, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "video_cc_mvs0c_div2_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs1_div_clk_src = { + .reg = 0x80ec, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "video_cc_mvs1_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = { + .reg = 0x809c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "video_cc_mvs1c_div2_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch video_cc_mvs0_clk = { + .halt_reg = 0x80b8, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x80b8, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x80b8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_mvs0_clk", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_mvs0c_clk = { + .halt_reg = 0x8064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_mvs0c_clk", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs0c_div2_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_mvs1_clk = { + .halt_reg = 0x80e0, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x80e0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x80e0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_mvs1_clk", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs1_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_mvs1c_clk = { + .halt_reg = 0x8090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_mvs1c_clk", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_mvs1c_div2_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_sleep_clk = { + .halt_reg = 0x8140, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8140, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_sleep_clk", + .parent_hws = (const struct clk_hw*[]){ + &video_cc_sleep_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *video_cc_neo_clocks[] = { + [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr, + [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr, + [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr, + [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr, + [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr, + [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr, + [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr, + [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr, + [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr, + [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr, + [VIDEO_CC_PLL0] = &video_cc_pll0.clkr, + [VIDEO_CC_PLL1] = &video_cc_pll1.clkr, + [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr, + [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr, +}; + +static const struct qcom_reset_map video_cc_neo_resets[] = { + [CVP_VIDEO_CC_INTERFACE_BCR] = { 0x80f0 }, + [CVP_VIDEO_CC_MVS0_BCR] = { 0x80a0 }, + [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 }, + [CVP_VIDEO_CC_MVS0C_BCR] = { 0x8048 }, + [CVP_VIDEO_CC_MVS1_BCR] = { 0x80c8 }, + [VIDEO_CC_MVS1C_CLK_ARES] = { 0x8090, 2 }, + [CVP_VIDEO_CC_MVS1C_BCR] = { 0x8074 }, + [VIDEO_CC_XO_CLK_ARES] = { 0x8124, 2 }, +}; + +static const struct regmap_config video_cc_neo_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9f4c, + .fast_io = true, +}; + +static struct qcom_cc_desc video_cc_neo_desc = { + .config = &video_cc_neo_regmap_config, + .clks = video_cc_neo_clocks, + .num_clks = ARRAY_SIZE(video_cc_neo_clocks), + .resets = video_cc_neo_resets, + .num_resets = ARRAY_SIZE(video_cc_neo_resets), + .clk_regulators = video_cc_neo_regulators, + .num_clk_regulators = ARRAY_SIZE(video_cc_neo_regulators), +}; + +static const struct of_device_id video_cc_neo_match_table[] = { + { .compatible = "qcom,neo-videocc" }, + { } +}; +MODULE_DEVICE_TABLE(of, video_cc_neo_match_table); + +static int video_cc_neo_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &video_cc_neo_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_runtime_init(pdev, &video_cc_neo_desc); + if (ret) + return ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret) + return ret; + + clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config); + clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config); + + /* + * Keep clocks always enabled: + * video_cc_ahb_clk + * video_cc_xo_clk + */ + regmap_update_bits(regmap, 0x80f4, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x8124, BIT(0), BIT(0)); + + ret = qcom_cc_really_probe(pdev, &video_cc_neo_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register VIDEO CC clocks\n"); + return ret; + } + + pm_runtime_put_sync(&pdev->dev); + dev_info(&pdev->dev, "Registered VIDEO CC clocks\n"); + + return ret; +} + +static void video_cc_neo_sync_state(struct device *dev) +{ + qcom_cc_sync_state(dev, &video_cc_neo_desc); +} + +static const struct dev_pm_ops video_cc_neo_pm_ops = { + SET_RUNTIME_PM_OPS(qcom_cc_runtime_suspend, qcom_cc_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static struct platform_driver video_cc_neo_driver = { + .probe = video_cc_neo_probe, + .driver = { + .name = "video_cc-neo", + .of_match_table = video_cc_neo_match_table, + .sync_state = video_cc_neo_sync_state, + .pm = &video_cc_neo_pm_ops, + }, +}; + +static int __init video_cc_neo_init(void) +{ + return platform_driver_register(&video_cc_neo_driver); +} +subsys_initcall(video_cc_neo_init); + +static void __exit video_cc_neo_exit(void) +{ + platform_driver_unregister(&video_cc_neo_driver); +} +module_exit(video_cc_neo_exit); + +MODULE_DESCRIPTION("QTI VIDEO_CC NEO Driver"); +MODULE_LICENSE("GPL"); From ff588cfe76576751b47887d80dd377f496a19c67 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 14:33:29 +0530 Subject: [PATCH 057/117] clk: qcom: debugcc-neo: Snapshot of DEBUGCC driver for NEO Add snapshot of support of Debug Clock controller for clock measurement on NEO from msm-5.10 branch commit 372d104af18c ("clk: qcom: debugcc-neo: Add support for DEBUGCC for NEO"). Change-Id: I9c1e41f5e37be08538aa4e1981512905994a5424 Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/debugcc-neo.c | 1451 ++++++++++++++++++++++++++++++++ 3 files changed, 1461 insertions(+) create mode 100644 drivers/clk/qcom/debugcc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 542388821660..f3cdf1322ab2 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1535,6 +1535,15 @@ config SXR_VIDEOCC_NEO NEO devices. Say Y if you want to support video devices and functionality such as video encode/decode. + +config SXR_DEBUGCC_NEO + tristate "NEO Debug Clock Controller" + depends on SXR_GCC_NEO + help + Support for the debug clock controller on Qualcomm Technologies, Inc. + NEO devices. + Say Y if you want to support the debug clocks such as clock measurement + functionality. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 4251854ebd15..40573327aa2d 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -136,6 +136,7 @@ obj-$(CONFIG_SM_DEBUGCC_HOLI) += debugcc-holi.o obj-$(CONFIG_SM_DEBUGCC_PINEAPPLE) += debugcc-pineapple.o obj-$(CONFIG_SM_DEBUGCC_PITTI) += debugcc-pitti.o obj-$(CONFIG_SXR_DEBUGCC_ANORAK) += debugcc-anorak.o +obj-$(CONFIG_SXR_DEBUGCC_NEO) += debugcc-neo.o obj-$(CONFIG_SXR_DEBUGCC_NIOBE) += debugcc-niobe.o obj-$(CONFIG_SM_DEBUGCC_VOLCANO) += debugcc-volcano.o obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o diff --git a/drivers/clk/qcom/debugcc-neo.c b/drivers/clk/qcom/debugcc-neo.c new file mode 100644 index 000000000000..30b7bfc1cb73 --- /dev/null +++ b/drivers/clk/qcom/debugcc-neo.c @@ -0,0 +1,1451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-debug.h" +#include "common.h" + +static struct measure_clk_data debug_mux_priv = { + .ctl_reg = 0x72038, + .status_reg = 0x7203C, + .xo_div4_cbcr = 0x7200C, +}; + +static const char *const apss_cc_debug_mux_parent_names[] = { + "measure_only_apcs_l3_post_acd_clk", + "measure_only_apcs_l3_pre_acd_clk", + "measure_only_apcs_silver_post_acd_clk", + "measure_only_apcs_silver_pre_acd_clk", +}; + +static int apss_cc_debug_mux_sels[] = { + 0x41, /* measure_only_apcs_l3_post_acd_clk */ + 0x45, /* measure_only_apcs_l3_pre_acd_clk */ + 0x21, /* measure_only_apcs_silver_post_acd_clk */ + 0x44, /* measure_only_apcs_silver_pre_acd_clk */ +}; + +static int apss_cc_debug_mux_pre_divs[] = { + 0x4, /* measure_only_apcs_l3_post_acd_clk */ + 0x10, /* measure_only_apcs_l3_pre_acd_clk */ + 0x4, /* measure_only_apcs_silver_post_acd_clk */ + 0x10, /* measure_only_apcs_silver_pre_acd_clk */ +}; + +static struct clk_debug_mux apss_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x18, + .post_div_offset = 0x18, + .cbcr_offset = 0x0, + .src_sel_mask = 0x7F0, + .src_sel_shift = 4, + .post_div_mask = 0x7800, + .post_div_shift = 11, + .post_div_val = 1, + .mux_sels = apss_cc_debug_mux_sels, + .num_mux_sels = ARRAY_SIZE(apss_cc_debug_mux_sels), + .pre_div_vals = apss_cc_debug_mux_pre_divs, + .hw.init = &(struct clk_init_data){ + .name = "apss_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = apss_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(apss_cc_debug_mux_parent_names), + }, +}; + +static const char *const cam_cc_debug_mux_parent_names[] = { + "cam_cc_bps_ahb_clk", + "cam_cc_bps_clk", + "cam_cc_bps_fast_ahb_clk", + "cam_cc_camnoc_ahb_clk", + "cam_cc_camnoc_axi_clk", + "cam_cc_camnoc_dcd_xo_clk", + "cam_cc_camnoc_xo_clk", + "cam_cc_cci_0_clk", + "cam_cc_cci_1_clk", + "cam_cc_cci_2_clk", + "cam_cc_cci_3_clk", + "cam_cc_core_ahb_clk", + "cam_cc_cpas_ahb_clk", + "cam_cc_cpas_bps_clk", + "cam_cc_cpas_fast_ahb_clk", + "cam_cc_cpas_ife_0_clk", + "cam_cc_cpas_ife_1_clk", + "cam_cc_cpas_ife_lite_clk", + "cam_cc_cpas_ipe_nps_clk", + "cam_cc_csi0phytimer_clk", + "cam_cc_csi1phytimer_clk", + "cam_cc_csi2phytimer_clk", + "cam_cc_csi3phytimer_clk", + "cam_cc_csid_clk", + "cam_cc_csid_csiphy_rx_clk", + "cam_cc_csiphy0_clk", + "cam_cc_csiphy1_clk", + "cam_cc_csiphy2_clk", + "cam_cc_csiphy3_clk", + "cam_cc_drv_ahb_clk", + "cam_cc_drv_xo_clk", + "cam_cc_icp_ahb_clk", + "cam_cc_icp_clk", + "cam_cc_ife_0_clk", + "cam_cc_ife_0_dsp_clk", + "cam_cc_ife_0_fast_ahb_clk", + "cam_cc_ife_1_clk", + "cam_cc_ife_1_dsp_clk", + "cam_cc_ife_1_fast_ahb_clk", + "cam_cc_ife_lite_ahb_clk", + "cam_cc_ife_lite_clk", + "cam_cc_ife_lite_cphy_rx_clk", + "cam_cc_ife_lite_csid_clk", + "cam_cc_ipe_nps_ahb_clk", + "cam_cc_ipe_nps_clk", + "cam_cc_ipe_nps_fast_ahb_clk", + "cam_cc_ipe_pps_clk", + "cam_cc_ipe_pps_fast_ahb_clk", + "cam_cc_jpeg_1_clk", + "cam_cc_jpeg_2_clk", + "cam_cc_jpeg_clk", + "cam_cc_mclk0_clk", + "cam_cc_mclk1_clk", + "cam_cc_mclk2_clk", + "cam_cc_mclk3_clk", + "cam_cc_mclk4_clk", + "cam_cc_mclk5_clk", + "cam_cc_mclk6_clk", + "cam_cc_mclk7_clk", + "cam_cc_qdss_debug_clk", + "cam_cc_qdss_debug_xo_clk", + "cam_cc_sleep_clk", + "measure_only_cam_cc_gdsc_clk", +}; + +static int cam_cc_debug_mux_sels[] = { + 0x17, /* cam_cc_bps_ahb_clk */ + 0x18, /* cam_cc_bps_clk */ + 0x16, /* cam_cc_bps_fast_ahb_clk */ + 0x78, /* cam_cc_camnoc_ahb_clk */ + 0x49, /* cam_cc_camnoc_axi_clk */ + 0x4A, /* cam_cc_camnoc_dcd_xo_clk */ + 0x60, /* cam_cc_camnoc_xo_clk */ + 0x44, /* cam_cc_cci_0_clk */ + 0x45, /* cam_cc_cci_1_clk */ + 0x61, /* cam_cc_cci_2_clk */ + 0x77, /* cam_cc_cci_3_clk */ + 0x4D, /* cam_cc_core_ahb_clk */ + 0x46, /* cam_cc_cpas_ahb_clk */ + 0x19, /* cam_cc_cpas_bps_clk */ + 0x47, /* cam_cc_cpas_fast_ahb_clk */ + 0x25, /* cam_cc_cpas_ife_0_clk */ + 0x2A, /* cam_cc_cpas_ife_1_clk */ + 0x34, /* cam_cc_cpas_ife_lite_clk */ + 0x1B, /* cam_cc_cpas_ipe_nps_clk */ + 0x9, /* cam_cc_csi0phytimer_clk */ + 0xC, /* cam_cc_csi1phytimer_clk */ + 0xE, /* cam_cc_csi2phytimer_clk */ + 0x10, /* cam_cc_csi3phytimer_clk */ + 0x48, /* cam_cc_csid_clk */ + 0xB, /* cam_cc_csid_csiphy_rx_clk */ + 0xA, /* cam_cc_csiphy0_clk */ + 0xD, /* cam_cc_csiphy1_clk */ + 0xF, /* cam_cc_csiphy2_clk */ + 0x11, /* cam_cc_csiphy3_clk */ + 0x79, /* cam_cc_drv_ahb_clk */ + 0x74, /* cam_cc_drv_xo_clk */ + 0x43, /* cam_cc_icp_ahb_clk */ + 0x42, /* cam_cc_icp_clk */ + 0x24, /* cam_cc_ife_0_clk */ + 0x26, /* cam_cc_ife_0_dsp_clk */ + 0x28, /* cam_cc_ife_0_fast_ahb_clk */ + 0x29, /* cam_cc_ife_1_clk */ + 0x2B, /* cam_cc_ife_1_dsp_clk */ + 0x2D, /* cam_cc_ife_1_fast_ahb_clk */ + 0x37, /* cam_cc_ife_lite_ahb_clk */ + 0x33, /* cam_cc_ife_lite_clk */ + 0x36, /* cam_cc_ife_lite_cphy_rx_clk */ + 0x35, /* cam_cc_ife_lite_csid_clk */ + 0x1E, /* cam_cc_ipe_nps_ahb_clk */ + 0x1A, /* cam_cc_ipe_nps_clk */ + 0x1F, /* cam_cc_ipe_nps_fast_ahb_clk */ + 0x1C, /* cam_cc_ipe_pps_clk */ + 0x20, /* cam_cc_ipe_pps_fast_ahb_clk */ + 0x5F, /* cam_cc_jpeg_1_clk */ + 0x75, /* cam_cc_jpeg_2_clk */ + 0x40, /* cam_cc_jpeg_clk */ + 0x1, /* cam_cc_mclk0_clk */ + 0x2, /* cam_cc_mclk1_clk */ + 0x3, /* cam_cc_mclk2_clk */ + 0x4, /* cam_cc_mclk3_clk */ + 0x5, /* cam_cc_mclk4_clk */ + 0x6, /* cam_cc_mclk5_clk */ + 0x7, /* cam_cc_mclk6_clk */ + 0x8, /* cam_cc_mclk7_clk */ + 0x4B, /* cam_cc_qdss_debug_clk */ + 0x4C, /* cam_cc_qdss_debug_xo_clk */ + 0x4F, /* cam_cc_sleep_clk */ + 0x4E, /* measure_only_cam_cc_gdsc_clk */ +}; + +static struct clk_debug_mux cam_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x16000, + .post_div_offset = 0x14288, + .cbcr_offset = 0x1428C, + .src_sel_mask = 0xFF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 4, + .mux_sels = cam_cc_debug_mux_sels, + .num_mux_sels = ARRAY_SIZE(cam_cc_debug_mux_sels), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = cam_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(cam_cc_debug_mux_parent_names), + }, +}; + +static const char *const disp_cc_debug_mux_parent_names[] = { + "disp_cc_mdss_accu_clk", + "disp_cc_mdss_ahb1_clk", + "disp_cc_mdss_ahb_clk", + "disp_cc_mdss_byte0_clk", + "disp_cc_mdss_byte0_intf_clk", + "disp_cc_mdss_byte1_clk", + "disp_cc_mdss_byte1_intf_clk", + "disp_cc_mdss_dptx0_aux_clk", + "disp_cc_mdss_dptx0_crypto_clk", + "disp_cc_mdss_dptx0_link_clk", + "disp_cc_mdss_dptx0_link_intf_clk", + "disp_cc_mdss_dptx0_pixel0_clk", + "disp_cc_mdss_dptx0_pixel1_clk", + "disp_cc_mdss_dptx0_usb_router_link_intf_clk", + "disp_cc_mdss_dptx1_aux_clk", + "disp_cc_mdss_dptx1_crypto_clk", + "disp_cc_mdss_dptx1_link_clk", + "disp_cc_mdss_dptx1_link_intf_clk", + "disp_cc_mdss_dptx1_pixel0_clk", + "disp_cc_mdss_dptx1_pixel1_clk", + "disp_cc_mdss_dptx1_usb_router_link_intf_clk", + "disp_cc_mdss_dptx2_aux_clk", + "disp_cc_mdss_dptx2_crypto_clk", + "disp_cc_mdss_dptx2_link_clk", + "disp_cc_mdss_dptx2_link_intf_clk", + "disp_cc_mdss_dptx2_pixel0_clk", + "disp_cc_mdss_dptx2_pixel1_clk", + "disp_cc_mdss_dptx3_aux_clk", + "disp_cc_mdss_dptx3_crypto_clk", + "disp_cc_mdss_dptx3_link_clk", + "disp_cc_mdss_dptx3_link_intf_clk", + "disp_cc_mdss_dptx3_pixel0_clk", + "disp_cc_mdss_esc0_clk", + "disp_cc_mdss_esc1_clk", + "disp_cc_mdss_mdp1_clk", + "disp_cc_mdss_mdp_clk", + "disp_cc_mdss_mdp_lut1_clk", + "disp_cc_mdss_mdp_lut_clk", + "disp_cc_mdss_non_gdsc_ahb_clk", + "disp_cc_mdss_pclk0_clk", + "disp_cc_mdss_pclk1_clk", + "disp_cc_mdss_rscc_ahb_clk", + "disp_cc_mdss_rscc_vsync_clk", + "disp_cc_mdss_vsync1_clk", + "disp_cc_mdss_vsync_clk", + "disp_cc_sleep_clk", + "measure_only_disp_cc_xo_clk", +}; + +static int disp_cc_debug_mux_sels[] = { + 0x46, /* disp_cc_mdss_accu_clk */ + 0x37, /* disp_cc_mdss_ahb1_clk */ + 0x33, /* disp_cc_mdss_ahb_clk */ + 0x14, /* disp_cc_mdss_byte0_clk */ + 0x15, /* disp_cc_mdss_byte0_intf_clk */ + 0x16, /* disp_cc_mdss_byte1_clk */ + 0x17, /* disp_cc_mdss_byte1_intf_clk */ + 0x20, /* disp_cc_mdss_dptx0_aux_clk */ + 0x1D, /* disp_cc_mdss_dptx0_crypto_clk */ + 0x1A, /* disp_cc_mdss_dptx0_link_clk */ + 0x1C, /* disp_cc_mdss_dptx0_link_intf_clk */ + 0x1E, /* disp_cc_mdss_dptx0_pixel0_clk */ + 0x1F, /* disp_cc_mdss_dptx0_pixel1_clk */ + 0x1B, /* disp_cc_mdss_dptx0_usb_router_link_intf_clk */ + 0x27, /* disp_cc_mdss_dptx1_aux_clk */ + 0x26, /* disp_cc_mdss_dptx1_crypto_clk */ + 0x23, /* disp_cc_mdss_dptx1_link_clk */ + 0x25, /* disp_cc_mdss_dptx1_link_intf_clk */ + 0x21, /* disp_cc_mdss_dptx1_pixel0_clk */ + 0x22, /* disp_cc_mdss_dptx1_pixel1_clk */ + 0x24, /* disp_cc_mdss_dptx1_usb_router_link_intf_clk */ + 0x2D, /* disp_cc_mdss_dptx2_aux_clk */ + 0x2C, /* disp_cc_mdss_dptx2_crypto_clk */ + 0x2A, /* disp_cc_mdss_dptx2_link_clk */ + 0x2B, /* disp_cc_mdss_dptx2_link_intf_clk */ + 0x28, /* disp_cc_mdss_dptx2_pixel0_clk */ + 0x29, /* disp_cc_mdss_dptx2_pixel1_clk */ + 0x31, /* disp_cc_mdss_dptx3_aux_clk */ + 0x32, /* disp_cc_mdss_dptx3_crypto_clk */ + 0x2F, /* disp_cc_mdss_dptx3_link_clk */ + 0x30, /* disp_cc_mdss_dptx3_link_intf_clk */ + 0x2E, /* disp_cc_mdss_dptx3_pixel0_clk */ + 0x18, /* disp_cc_mdss_esc0_clk */ + 0x19, /* disp_cc_mdss_esc1_clk */ + 0x34, /* disp_cc_mdss_mdp1_clk */ + 0x11, /* disp_cc_mdss_mdp_clk */ + 0x35, /* disp_cc_mdss_mdp_lut1_clk */ + 0x12, /* disp_cc_mdss_mdp_lut_clk */ + 0x38, /* disp_cc_mdss_non_gdsc_ahb_clk */ + 0xF, /* disp_cc_mdss_pclk0_clk */ + 0x10, /* disp_cc_mdss_pclk1_clk */ + 0x3A, /* disp_cc_mdss_rscc_ahb_clk */ + 0x39, /* disp_cc_mdss_rscc_vsync_clk */ + 0x36, /* disp_cc_mdss_vsync1_clk */ + 0x13, /* disp_cc_mdss_vsync_clk */ + 0x47, /* disp_cc_sleep_clk */ + 0x45, /* measure_only_disp_cc_xo_clk */ +}; + +static struct clk_debug_mux disp_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x11000, + .post_div_offset = 0xD000, + .cbcr_offset = 0xD004, + .src_sel_mask = 0x1FF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 4, + .mux_sels = disp_cc_debug_mux_sels, + .num_mux_sels = ARRAY_SIZE(disp_cc_debug_mux_sels), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = disp_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(disp_cc_debug_mux_parent_names), + }, +}; + +static const char *const gcc_debug_mux_parent_names[] = { + "apss_cc_debug_mux", + "cam_cc_debug_mux", + "disp_cc_debug_mux", + "gcc_aggre_noc_pcie_1_axi_clk", + "gcc_aggre_usb3_prim_axi_clk", + "gcc_boot_rom_ahb_clk", + "gcc_camera_hf_axi_clk", + "gcc_camera_sf_axi_clk", + "gcc_cfg_noc_pcie_anoc_ahb_clk", + "gcc_cfg_noc_usb3_prim_axi_clk", + "gcc_ddrss_gpu_axi_clk", + "gcc_ddrss_pcie_sf_clk", + "gcc_ddrss_spad_clk", + "gcc_disp_hf_axi_clk", + "gcc_gp1_clk", + "gcc_gp2_clk", + "gcc_gp3_clk", + "gcc_gpu_gpll0_clk_src", + "gcc_gpu_gpll0_div_clk_src", + "gcc_gpu_memnoc_gfx_clk", + "gcc_gpu_snoc_dvm_gfx_clk", + "gcc_iris_ss_hf_axi1_clk", + "gcc_iris_ss_spd_axi1_clk", + "gcc_pcie_0_aux_clk", + "gcc_pcie_0_cfg_ahb_clk", + "gcc_pcie_0_mstr_axi_clk", + "gcc_pcie_0_phy_rchng_clk", + "gcc_pcie_0_pipe_clk", + "gcc_pcie_0_slv_axi_clk", + "gcc_pcie_0_slv_q2a_axi_clk", + "gcc_pcie_1_aux_clk", + "gcc_pcie_1_cfg_ahb_clk", + "gcc_pcie_1_mstr_axi_clk", + "gcc_pcie_1_phy_rchng_clk", + "gcc_pcie_1_pipe_clk", + "gcc_pcie_1_slv_axi_clk", + "gcc_pcie_1_slv_q2a_axi_clk", + "gcc_pdm2_clk", + "gcc_pdm_ahb_clk", + "gcc_pdm_xo4_clk", + "gcc_qmip_camera_nrt_ahb_clk", + "gcc_qmip_camera_rt_ahb_clk", + "gcc_qmip_gpu_ahb_clk", + "gcc_qmip_pcie_ahb_clk", + "gcc_qmip_video_cv_cpu_ahb_clk", + "gcc_qmip_video_cvp_ahb_clk", + "gcc_qmip_video_lsr_ahb_clk", + "gcc_qmip_video_v_cpu_ahb_clk", + "gcc_qmip_video_vcodec_ahb_clk", + "gcc_qupv3_wrap0_core_2x_clk", + "gcc_qupv3_wrap0_core_clk", + "gcc_qupv3_wrap0_s0_clk", + "gcc_qupv3_wrap0_s1_clk", + "gcc_qupv3_wrap0_s2_clk", + "gcc_qupv3_wrap0_s3_clk", + "gcc_qupv3_wrap0_s4_clk", + "gcc_qupv3_wrap0_s5_clk", + "gcc_qupv3_wrap1_core_2x_clk", + "gcc_qupv3_wrap1_core_clk", + "gcc_qupv3_wrap1_s0_clk", + "gcc_qupv3_wrap1_s1_clk", + "gcc_qupv3_wrap1_s2_clk", + "gcc_qupv3_wrap1_s3_clk", + "gcc_qupv3_wrap1_s4_clk", + "gcc_qupv3_wrap1_s5_clk", + "gcc_qupv3_wrap_0_m_ahb_clk", + "gcc_qupv3_wrap_0_s_ahb_clk", + "gcc_qupv3_wrap_1_m_ahb_clk", + "gcc_qupv3_wrap_1_s_ahb_clk", + "gcc_sdcc1_ahb_clk", + "gcc_sdcc1_apps_clk", + "gcc_sdcc1_ice_core_clk", + "gcc_usb30_prim_master_clk", + "gcc_usb30_prim_mock_utmi_clk", + "gcc_usb30_prim_sleep_clk", + "gcc_usb3_prim_phy_aux_clk", + "gcc_usb3_prim_phy_com_aux_clk", + "gcc_usb3_prim_phy_pipe_clk", + "gcc_video_axi0_clk", + "gcc_video_axi1_clk", + "gpu_cc_debug_mux", + "mc_cc_debug_mux", + "measure_only_cnoc_clk", + "measure_only_gcc_anoc_pcie_north_at_clk", + "measure_only_gcc_aoss_at_clk", + "measure_only_gcc_apss_qdss_apb_clk", + "measure_only_gcc_apss_qdss_tsctr_clk", + "measure_only_gcc_at_clk", + "measure_only_gcc_camera_ahb_clk", + "measure_only_gcc_camera_xo_clk", + "measure_only_gcc_cnoc_qdss_stm_clk", + "measure_only_gcc_config_noc_at_clk", + "measure_only_gcc_cpuss_at_clk", + "measure_only_gcc_cpuss_trig_clk", + "measure_only_gcc_ddrss_at_clk", + "measure_only_gcc_disp_ahb_clk", + "measure_only_gcc_gpu_at_clk", + "measure_only_gcc_gpu_cfg_ahb_clk", + "measure_only_gcc_gpu_trig_clk", + "measure_only_gcc_lpass_at_clk", + "measure_only_gcc_lpass_trig_clk", + "measure_only_gcc_mmnoc_at_clk", + "measure_only_gcc_mmss_at_clk", + "measure_only_gcc_mmss_trig_clk", + "measure_only_gcc_north_at_clk", + "measure_only_gcc_phy_at_clk", + "measure_only_gcc_pimem_at_clk", + "measure_only_gcc_qdss_center_at_clk", + "measure_only_gcc_qdss_cfg_ahb_clk", + "measure_only_gcc_qdss_dap_ahb_clk", + "measure_only_gcc_qdss_dap_clk", + "measure_only_gcc_qdss_etr_ddr_clk", + "measure_only_gcc_qdss_etr_usb_clk", + "measure_only_gcc_qdss_stm_clk", + "measure_only_gcc_qdss_traceclkin_clk", + "measure_only_gcc_qdss_trig_clk", + "measure_only_gcc_qdss_tsctr_clk", + "measure_only_gcc_qdss_usb_prim_clk", + "measure_only_gcc_qdss_xo_clk", + "measure_only_gcc_sdcc1_at_clk", + "measure_only_gcc_sys_noc_at_clk", + "measure_only_gcc_tme_at_clk", + "measure_only_gcc_tme_trig_clk", + "measure_only_gcc_turing_at_clk", + "measure_only_gcc_turing_trig_clk", + "measure_only_gcc_video_ahb_clk", + "measure_only_gcc_video_xo_clk", + "measure_only_gcc_wpss_at_clk", + "measure_only_gcc_wpss_m_at_clk", + "measure_only_gcc_wpss_trig_clk", + "measure_only_memnoc_clk", + "measure_only_pcie_0_pipe_clk", + "measure_only_pcie_1_pipe_clk", + "measure_only_snoc_clk", + "measure_only_usb3_phy_wrapper_gcc_usb30_pipe_clk", + "video_cc_debug_mux", +}; + +static int gcc_debug_mux_sels[] = { + 0xE7, /* apss_cc_debug_mux */ + 0x53, /* cam_cc_debug_mux */ + 0x56, /* disp_cc_debug_mux */ + 0x2A, /* gcc_aggre_noc_pcie_1_axi_clk */ + 0x2B, /* gcc_aggre_usb3_prim_axi_clk */ + 0xAD, /* gcc_boot_rom_ahb_clk */ + 0x4F, /* gcc_camera_hf_axi_clk */ + 0x50, /* gcc_camera_sf_axi_clk */ + 0x28, /* gcc_cfg_noc_pcie_anoc_ahb_clk */ + 0x1D, /* gcc_cfg_noc_usb3_prim_axi_clk */ + 0xC5, /* gcc_ddrss_gpu_axi_clk */ + 0xC6, /* gcc_ddrss_pcie_sf_clk */ + 0xCF, /* gcc_ddrss_spad_clk */ + 0x55, /* gcc_disp_hf_axi_clk */ + 0xF3, /* gcc_gp1_clk */ + 0xF4, /* gcc_gp2_clk */ + 0xF5, /* gcc_gp3_clk */ + 0x119, /* gcc_gpu_gpll0_clk_src */ + 0x11A, /* gcc_gpu_gpll0_div_clk_src */ + 0x116, /* gcc_gpu_memnoc_gfx_clk */ + 0x118, /* gcc_gpu_snoc_dvm_gfx_clk */ + 0x60, /* gcc_iris_ss_hf_axi1_clk */ + 0x62, /* gcc_iris_ss_spd_axi1_clk */ + 0xFB, /* gcc_pcie_0_aux_clk */ + 0xFA, /* gcc_pcie_0_cfg_ahb_clk */ + 0xF9, /* gcc_pcie_0_mstr_axi_clk */ + 0xFD, /* gcc_pcie_0_phy_rchng_clk */ + 0xFC, /* gcc_pcie_0_pipe_clk */ + 0xF8, /* gcc_pcie_0_slv_axi_clk */ + 0xF7, /* gcc_pcie_0_slv_q2a_axi_clk */ + 0x104, /* gcc_pcie_1_aux_clk */ + 0x103, /* gcc_pcie_1_cfg_ahb_clk */ + 0x102, /* gcc_pcie_1_mstr_axi_clk */ + 0x106, /* gcc_pcie_1_phy_rchng_clk */ + 0x105, /* gcc_pcie_1_pipe_clk */ + 0x101, /* gcc_pcie_1_slv_axi_clk */ + 0x100, /* gcc_pcie_1_slv_q2a_axi_clk */ + 0x9E, /* gcc_pdm2_clk */ + 0x9C, /* gcc_pdm_ahb_clk */ + 0x9D, /* gcc_pdm_xo4_clk */ + 0x4D, /* gcc_qmip_camera_nrt_ahb_clk */ + 0x4E, /* gcc_qmip_camera_rt_ahb_clk */ + 0x113, /* gcc_qmip_gpu_ahb_clk */ + 0xF6, /* gcc_qmip_pcie_ahb_clk */ + 0x5B, /* gcc_qmip_video_cv_cpu_ahb_clk */ + 0x58, /* gcc_qmip_video_cvp_ahb_clk */ + 0x65, /* gcc_qmip_video_lsr_ahb_clk */ + 0x5A, /* gcc_qmip_video_v_cpu_ahb_clk */ + 0x59, /* gcc_qmip_video_vcodec_ahb_clk */ + 0x8B, /* gcc_qupv3_wrap0_core_2x_clk */ + 0x8A, /* gcc_qupv3_wrap0_core_clk */ + 0x8C, /* gcc_qupv3_wrap0_s0_clk */ + 0x8D, /* gcc_qupv3_wrap0_s1_clk */ + 0x8E, /* gcc_qupv3_wrap0_s2_clk */ + 0x8F, /* gcc_qupv3_wrap0_s3_clk */ + 0x90, /* gcc_qupv3_wrap0_s4_clk */ + 0x91, /* gcc_qupv3_wrap0_s5_clk */ + 0x95, /* gcc_qupv3_wrap1_core_2x_clk */ + 0x94, /* gcc_qupv3_wrap1_core_clk */ + 0x96, /* gcc_qupv3_wrap1_s0_clk */ + 0x97, /* gcc_qupv3_wrap1_s1_clk */ + 0x98, /* gcc_qupv3_wrap1_s2_clk */ + 0x99, /* gcc_qupv3_wrap1_s3_clk */ + 0x9A, /* gcc_qupv3_wrap1_s4_clk */ + 0x9B, /* gcc_qupv3_wrap1_s5_clk */ + 0x88, /* gcc_qupv3_wrap_0_m_ahb_clk */ + 0x89, /* gcc_qupv3_wrap_0_s_ahb_clk */ + 0x92, /* gcc_qupv3_wrap_1_m_ahb_clk */ + 0x93, /* gcc_qupv3_wrap_1_s_ahb_clk */ + 0x85, /* gcc_sdcc1_ahb_clk */ + 0x84, /* gcc_sdcc1_apps_clk */ + 0x87, /* gcc_sdcc1_ice_core_clk */ + 0x79, /* gcc_usb30_prim_master_clk */ + 0x7B, /* gcc_usb30_prim_mock_utmi_clk */ + 0x7A, /* gcc_usb30_prim_sleep_clk */ + 0x7C, /* gcc_usb3_prim_phy_aux_clk */ + 0x7D, /* gcc_usb3_prim_phy_com_aux_clk */ + 0x7E, /* gcc_usb3_prim_phy_pipe_clk */ + 0x5C, /* gcc_video_axi0_clk */ + 0x5E, /* gcc_video_axi1_clk */ + 0x115, /* gpu_cc_debug_mux */ + 0xD2, /* mc_cc_debug_mux or ddrss_gcc_debug_clk */ + 0x19, /* measure_only_cnoc_clk */ + 0x34, /* measure_only_gcc_anoc_pcie_north_at_clk */ + 0xB2, /* measure_only_gcc_aoss_at_clk */ + 0xE6, /* measure_only_gcc_apss_qdss_apb_clk */ + 0xE5, /* measure_only_gcc_apss_qdss_tsctr_clk */ + 0xBB, /* measure_only_gcc_at_clk */ + 0x4C, /* measure_only_gcc_camera_ahb_clk */ + 0x52, /* measure_only_gcc_camera_xo_clk */ + 0x1C, /* measure_only_gcc_cnoc_qdss_stm_clk */ + 0x25, /* measure_only_gcc_config_noc_at_clk */ + 0xE4, /* measure_only_gcc_cpuss_at_clk */ + 0xE3, /* measure_only_gcc_cpuss_trig_clk */ + 0xCC, /* measure_only_gcc_ddrss_at_clk */ + 0x54, /* measure_only_gcc_disp_ahb_clk */ + 0x114, /* measure_only_gcc_gpu_at_clk */ + 0x112, /* measure_only_gcc_gpu_cfg_ahb_clk */ + 0x117, /* measure_only_gcc_gpu_trig_clk */ + 0xD6, /* measure_only_gcc_lpass_at_clk */ + 0xD5, /* measure_only_gcc_lpass_trig_clk */ + 0x3A, /* measure_only_gcc_mmnoc_at_clk */ + 0x48, /* measure_only_gcc_mmss_at_clk */ + 0x4A, /* measure_only_gcc_mmss_trig_clk */ + 0x6A, /* measure_only_gcc_north_at_clk */ + 0x6B, /* measure_only_gcc_phy_at_clk */ + 0x12C, /* measure_only_gcc_pimem_at_clk */ + 0x69, /* measure_only_gcc_qdss_center_at_clk */ + 0x68, /* measure_only_gcc_qdss_cfg_ahb_clk */ + 0x67, /* measure_only_gcc_qdss_dap_ahb_clk */ + 0x72, /* measure_only_gcc_qdss_dap_clk */ + 0x6D, /* measure_only_gcc_qdss_etr_ddr_clk */ + 0x6C, /* measure_only_gcc_qdss_etr_usb_clk */ + 0x6E, /* measure_only_gcc_qdss_stm_clk */ + 0x6F, /* measure_only_gcc_qdss_traceclkin_clk */ + 0x71, /* measure_only_gcc_qdss_trig_clk */ + 0x70, /* measure_only_gcc_qdss_tsctr_clk */ + 0x77, /* measure_only_gcc_qdss_usb_prim_clk */ + 0x76, /* measure_only_gcc_qdss_xo_clk */ + 0x86, /* measure_only_gcc_sdcc1_at_clk */ + 0xE, /* measure_only_gcc_sys_noc_at_clk */ + 0xA8, /* measure_only_gcc_tme_at_clk */ + 0xA7, /* measure_only_gcc_tme_trig_clk */ + 0xDF, /* measure_only_gcc_turing_at_clk */ + 0xE0, /* measure_only_gcc_turing_trig_clk */ + 0x57, /* measure_only_gcc_video_ahb_clk */ + 0x64, /* measure_only_gcc_video_xo_clk */ + 0x132, /* measure_only_gcc_wpss_at_clk */ + 0x131, /* measure_only_gcc_wpss_m_at_clk */ + 0x133, /* measure_only_gcc_wpss_trig_clk */ + 0xCB, /* measure_only_memnoc_clk */ + 0xFE, /* measure_only_pcie_0_pipe_clk */ + 0x107, /* measure_only_pcie_1_pipe_clk */ + 0xA, /* measure_only_snoc_clk */ + 0x82, /* measure_only_usb3_phy_wrapper_gcc_usb30_pipe_clk */ + 0x66, /* video_cc_debug_mux */ +}; + +static struct clk_debug_mux gcc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x72000, + .post_div_offset = 0x72004, + .cbcr_offset = 0x72008, + .src_sel_mask = 0x3FF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 2, + .mux_sels = gcc_debug_mux_sels, + .num_mux_sels = ARRAY_SIZE(gcc_debug_mux_sels), + .hw.init = &(struct clk_init_data){ + .name = "gcc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = gcc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(gcc_debug_mux_parent_names), + }, +}; + +static const char *const gpu_cc_debug_mux_parent_names[] = { + "gpu_cc_ahb_clk", + "gpu_cc_crc_ahb_clk", + "gpu_cc_cx_ff_clk", + "gpu_cc_cx_gmu_clk", + "gpu_cc_cxo_aon_clk", + "gpu_cc_cxo_clk", + "gpu_cc_demet_clk", + "gpu_cc_freq_measure_clk", + "gpu_cc_gx_ff_clk", + "gpu_cc_gx_gfx3d_rdvm_clk", + "gpu_cc_gx_gmu_clk", + "gpu_cc_gx_vsense_clk", + "gpu_cc_hub_aon_clk", + "gpu_cc_hub_cx_int_clk", + "gpu_cc_memnoc_gfx_clk", + "gpu_cc_mnd1x_0_gfx3d_clk", + "gpu_cc_mnd1x_1_gfx3d_clk", + "gpu_cc_sleep_clk", + "measure_only_gpu_cc_cx_gfx3d_clk", + "measure_only_gpu_cc_cx_gfx3d_slv_clk", + "measure_only_gpu_cc_gx_gfx3d_clk", +}; + +static int gpu_cc_debug_mux_sels[] = { + 0x16, /* gpu_cc_ahb_clk */ + 0x17, /* gpu_cc_crc_ahb_clk */ + 0x20, /* gpu_cc_cx_ff_clk */ + 0x1D, /* gpu_cc_cx_gmu_clk */ + 0xB, /* gpu_cc_cxo_aon_clk */ + 0x1E, /* gpu_cc_cxo_clk */ + 0xD, /* gpu_cc_demet_clk */ + 0xC, /* gpu_cc_freq_measure_clk */ + 0x13, /* gpu_cc_gx_ff_clk */ + 0x15, /* gpu_cc_gx_gfx3d_rdvm_clk */ + 0x12, /* gpu_cc_gx_gmu_clk */ + 0xF, /* gpu_cc_gx_vsense_clk */ + 0x2D, /* gpu_cc_hub_aon_clk */ + 0x1F, /* gpu_cc_hub_cx_int_clk */ + 0x21, /* gpu_cc_memnoc_gfx_clk */ + 0x28, /* gpu_cc_mnd1x_0_gfx3d_clk */ + 0x29, /* gpu_cc_mnd1x_1_gfx3d_clk */ + 0x1B, /* gpu_cc_sleep_clk */ + 0x24, /* measure_only_gpu_cc_cx_gfx3d_clk */ + 0x25, /* measure_only_gpu_cc_cx_gfx3d_slv_clk */ + 0xE, /* measure_only_gpu_cc_gx_gfx3d_clk */ +}; + +static struct clk_debug_mux gpu_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x9564, + .post_div_offset = 0x9270, + .cbcr_offset = 0x9274, + .src_sel_mask = 0xFF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 2, + .mux_sels = gpu_cc_debug_mux_sels, + .num_mux_sels = ARRAY_SIZE(gpu_cc_debug_mux_sels), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = gpu_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(gpu_cc_debug_mux_parent_names), + }, +}; + +static const char *const video_cc_debug_mux_parent_names[] = { + "measure_only_video_cc_ahb_clk", + "measure_only_video_cc_xo_clk", + "video_cc_mvs0_clk", + "video_cc_mvs0c_clk", + "video_cc_mvs1_clk", + "video_cc_mvs1c_clk", + "video_cc_sleep_clk", +}; + +static int video_cc_debug_mux_sels[] = { + 0x7, /* measure_only_video_cc_ahb_clk */ + 0xB, /* measure_only_video_cc_xo_clk */ + 0x3, /* video_cc_mvs0_clk */ + 0x1, /* video_cc_mvs0c_clk */ + 0x5, /* video_cc_mvs1_clk */ + 0x9, /* video_cc_mvs1c_clk */ + 0xC, /* video_cc_sleep_clk */ +}; + +static struct clk_debug_mux video_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x9A4C, + .post_div_offset = 0x80F8, + .cbcr_offset = 0x80FC, + .src_sel_mask = 0x3F, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 3, + .mux_sels = video_cc_debug_mux_sels, + .num_mux_sels = ARRAY_SIZE(video_cc_debug_mux_sels), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = video_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(video_cc_debug_mux_parent_names), + }, +}; + +static const char *const mc_cc_debug_mux_parent_names[] = { + "measure_only_mccc_clk", +}; + +static struct clk_debug_mux mc_cc_debug_mux = { + .period_offset = 0x50, + .hw.init = &(struct clk_init_data){ + .name = "mc_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = mc_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(mc_cc_debug_mux_parent_names), + }, +}; + +static struct mux_regmap_names mux_list[] = { + { .mux = &apss_cc_debug_mux, .regmap_name = "qcom,apsscc" }, + { .mux = &cam_cc_debug_mux, .regmap_name = "qcom,camcc" }, + { .mux = &disp_cc_debug_mux, .regmap_name = "qcom,dispcc" }, + { .mux = &gpu_cc_debug_mux, .regmap_name = "qcom,gpucc" }, + { .mux = &mc_cc_debug_mux, .regmap_name = "qcom,mccc" }, + { .mux = &video_cc_debug_mux, .regmap_name = "qcom,videocc" }, + { .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" }, +}; + +static struct clk_dummy measure_only_apcs_l3_post_acd_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_apcs_l3_post_acd_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_apcs_l3_pre_acd_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_apcs_l3_pre_acd_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_apcs_silver_post_acd_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_apcs_silver_post_acd_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_apcs_silver_pre_acd_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_apcs_silver_pre_acd_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_cam_cc_gdsc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_cam_cc_gdsc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_cnoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_cnoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_disp_cc_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_disp_cc_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_anoc_pcie_north_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_anoc_pcie_north_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_aoss_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_aoss_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_apss_qdss_apb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_apss_qdss_apb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_apss_qdss_tsctr_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_apss_qdss_tsctr_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_camera_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_camera_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_camera_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_camera_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_cnoc_qdss_stm_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_cnoc_qdss_stm_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_config_noc_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_config_noc_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_cpuss_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_cpuss_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_cpuss_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_cpuss_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_disp_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_disp_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_ddrss_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_ddrss_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_gpu_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_gpu_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_gpu_cfg_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_gpu_cfg_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_gpu_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_gpu_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_lpass_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_lpass_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_lpass_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_lpass_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_mmnoc_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_mmnoc_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_mmss_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_mmss_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_mmss_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_mmss_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_north_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_north_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_phy_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_phy_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_pimem_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_pimem_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_center_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_center_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_cfg_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_cfg_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_dap_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_dap_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_dap_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_dap_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_etr_ddr_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_etr_ddr_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_etr_usb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_etr_usb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_stm_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_stm_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_traceclkin_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_traceclkin_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_tsctr_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_tsctr_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_usb_prim_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_usb_prim_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_qdss_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_qdss_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_sdcc1_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_sdcc1_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_sys_noc_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_sys_noc_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_tme_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_tme_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_tme_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_tme_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_turing_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_turing_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_turing_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_turing_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_video_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_video_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_video_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_video_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_wpss_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_wpss_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_wpss_m_at_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_wpss_m_at_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_wpss_trig_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_wpss_trig_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gpu_cc_cx_gfx3d_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gpu_cc_cx_gfx3d_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gpu_cc_cx_gfx3d_slv_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gpu_cc_cx_gfx3d_slv_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gpu_cc_gx_gfx3d_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gpu_cc_gx_gfx3d_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_mccc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_mccc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_memnoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_memnoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_pcie_0_pipe_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_pcie_0_pipe_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_pcie_1_pipe_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_pcie_1_pipe_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_snoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_snoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_usb3_phy_wrapper_gcc_usb30_pipe_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_usb3_phy_wrapper_gcc_usb30_pipe_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_video_cc_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_video_cc_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_video_cc_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_video_cc_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_hw *debugcc_neo_hws[] = { + &measure_only_apcs_l3_post_acd_clk.hw, + &measure_only_apcs_l3_pre_acd_clk.hw, + &measure_only_apcs_silver_post_acd_clk.hw, + &measure_only_apcs_silver_pre_acd_clk.hw, + &measure_only_cam_cc_gdsc_clk.hw, + &measure_only_cnoc_clk.hw, + &measure_only_disp_cc_xo_clk.hw, + &measure_only_gcc_anoc_pcie_north_at_clk.hw, + &measure_only_gcc_aoss_at_clk.hw, + &measure_only_gcc_apss_qdss_apb_clk.hw, + &measure_only_gcc_apss_qdss_tsctr_clk.hw, + &measure_only_gcc_at_clk.hw, + &measure_only_gcc_camera_ahb_clk.hw, + &measure_only_gcc_camera_xo_clk.hw, + &measure_only_gcc_cnoc_qdss_stm_clk.hw, + &measure_only_gcc_config_noc_at_clk.hw, + &measure_only_gcc_cpuss_at_clk.hw, + &measure_only_gcc_cpuss_trig_clk.hw, + &measure_only_gcc_ddrss_at_clk.hw, + &measure_only_gcc_disp_ahb_clk.hw, + &measure_only_gcc_gpu_at_clk.hw, + &measure_only_gcc_gpu_cfg_ahb_clk.hw, + &measure_only_gcc_gpu_trig_clk.hw, + &measure_only_gcc_lpass_at_clk.hw, + &measure_only_gcc_lpass_trig_clk.hw, + &measure_only_gcc_mmnoc_at_clk.hw, + &measure_only_gcc_mmss_at_clk.hw, + &measure_only_gcc_mmss_trig_clk.hw, + &measure_only_gcc_north_at_clk.hw, + &measure_only_gcc_phy_at_clk.hw, + &measure_only_gcc_pimem_at_clk.hw, + &measure_only_gcc_qdss_center_at_clk.hw, + &measure_only_gcc_qdss_cfg_ahb_clk.hw, + &measure_only_gcc_qdss_dap_ahb_clk.hw, + &measure_only_gcc_qdss_dap_clk.hw, + &measure_only_gcc_qdss_etr_ddr_clk.hw, + &measure_only_gcc_qdss_etr_usb_clk.hw, + &measure_only_gcc_qdss_stm_clk.hw, + &measure_only_gcc_qdss_traceclkin_clk.hw, + &measure_only_gcc_qdss_trig_clk.hw, + &measure_only_gcc_qdss_tsctr_clk.hw, + &measure_only_gcc_qdss_usb_prim_clk.hw, + &measure_only_gcc_qdss_xo_clk.hw, + &measure_only_gcc_sdcc1_at_clk.hw, + &measure_only_gcc_sys_noc_at_clk.hw, + &measure_only_gcc_tme_at_clk.hw, + &measure_only_gcc_tme_trig_clk.hw, + &measure_only_gcc_turing_at_clk.hw, + &measure_only_gcc_turing_trig_clk.hw, + &measure_only_gcc_video_ahb_clk.hw, + &measure_only_gcc_video_xo_clk.hw, + &measure_only_gcc_wpss_at_clk.hw, + &measure_only_gcc_wpss_m_at_clk.hw, + &measure_only_gcc_wpss_trig_clk.hw, + &measure_only_gpu_cc_cx_gfx3d_clk.hw, + &measure_only_gpu_cc_cx_gfx3d_slv_clk.hw, + &measure_only_gpu_cc_gx_gfx3d_clk.hw, + &measure_only_mccc_clk.hw, + &measure_only_memnoc_clk.hw, + &measure_only_pcie_0_pipe_clk.hw, + &measure_only_pcie_1_pipe_clk.hw, + &measure_only_snoc_clk.hw, + &measure_only_usb3_phy_wrapper_gcc_usb30_pipe_clk.hw, + &measure_only_video_cc_ahb_clk.hw, + &measure_only_video_cc_xo_clk.hw, +}; + +static const struct of_device_id clk_debug_match_table[] = { + { .compatible = "qcom,neo-debugcc" }, + { } +}; + +static int clk_debug_neo_probe(struct platform_device *pdev) +{ + struct clk *clk; + int ret = 0, i; + + BUILD_BUG_ON(ARRAY_SIZE(apss_cc_debug_mux_parent_names) != + ARRAY_SIZE(apss_cc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(cam_cc_debug_mux_parent_names) != + ARRAY_SIZE(cam_cc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(disp_cc_debug_mux_parent_names) != + ARRAY_SIZE(disp_cc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(gcc_debug_mux_parent_names) != ARRAY_SIZE(gcc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(gpu_cc_debug_mux_parent_names) != + ARRAY_SIZE(gpu_cc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(video_cc_debug_mux_parent_names) != + ARRAY_SIZE(video_cc_debug_mux_sels)); + + clk = devm_clk_get(&pdev->dev, "xo_clk_src"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get xo clock\n"); + return PTR_ERR(clk); + } + + debug_mux_priv.cxo = clk; + + for (i = 0; i < ARRAY_SIZE(mux_list); i++) { + if (IS_ERR_OR_NULL(mux_list[i].mux->regmap)) { + ret = map_debug_bases(pdev, + mux_list[i].regmap_name, mux_list[i].mux); + if (ret == -EBADR) + continue; + else if (ret) + return ret; + } + } + + for (i = 0; i < ARRAY_SIZE(debugcc_neo_hws); i++) { + clk = devm_clk_register(&pdev->dev, debugcc_neo_hws[i]); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n", + clk_hw_get_name(debugcc_neo_hws[i]), + PTR_ERR(clk)); + return PTR_ERR(clk); + } + } + + for (i = 0; i < ARRAY_SIZE(mux_list); i++) { + if (!mux_list[i].mux->regmap) + continue; + + ret = devm_clk_register_debug_mux(&pdev->dev, mux_list[i].mux); + if (ret) { + dev_err(&pdev->dev, "Unable to register mux clk %s, err:(%d)\n", + clk_hw_get_name(&mux_list[i].mux->hw), + ret); + return ret; + } + } + + ret = clk_debug_measure_register(&gcc_debug_mux.hw); + if (ret) { + dev_err(&pdev->dev, "Could not register Measure clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered debug measure clocks\n"); + + return 0; +} + +static struct platform_driver clk_debug_driver = { + .probe = clk_debug_neo_probe, + .driver = { + .name = "neo-debugcc", + .of_match_table = clk_debug_match_table, + }, +}; + +static int __init clk_debug_neo_init(void) +{ + return platform_driver_register(&clk_debug_driver); +} +fs_initcall(clk_debug_neo_init); + +MODULE_DESCRIPTION("QTI DEBUG CC NEO Driver"); +MODULE_LICENSE("GPL"); From e06e41fa8ed70c286c6ff3d64fdcdd5c0cc5ffe1 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 9 Jul 2024 14:59:17 +0530 Subject: [PATCH 058/117] clk: qcom: tcsrcc-neo: Snapshot of tcsrcc driver for NEO Add snapshot of tcsrcc driver, which provides support for miscellaneous top-level clocks on NEO from msm-5.10 branch commit 2a83a5d9c328 ("clk: qcom: tcsrcc: Add initial tcsrcc driver"). Change-Id: Ieb38addadc96f98ff94475fd9eb14e0a77788d9e Signed-off-by: Kalpak Kawadkar Signed-off-by: Chintan Kothari --- drivers/clk/qcom/Kconfig | 9 ++ drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/tcsrcc-neo.c | 155 ++++++++++++++++++++++++++++++++++ 3 files changed, 165 insertions(+) create mode 100644 drivers/clk/qcom/tcsrcc-neo.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index f3cdf1322ab2..a8757a97e737 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1544,6 +1544,15 @@ config SXR_DEBUGCC_NEO NEO devices. Say Y if you want to support the debug clocks such as clock measurement functionality. + +config SXR_TCSRCC_NEO + tristate "Top-Level CSR Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the TCSR clock controller on Qualcomm Technologies, Inc + NEO devices. + Say Y if you want to support miscellaneous top-level clocks + such as for the PHY references. endif config VIRTIO_CLK diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 40573327aa2d..1cfe9f281e6b 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -196,6 +196,7 @@ obj-$(CONFIG_SXR_VIDEOCC_ANORAK) += videocc-anorak.o obj-$(CONFIG_SXR_VIDEOCC_NEO) += videocc-neo.o obj-$(CONFIG_SXR_VIDEOCC_NIOBE) += videocc-niobe.o obj-$(CONFIG_SM_TCSRCC_PINEAPPLE) += tcsrcc-pineapple.o +obj-$(CONFIG_SXR_TCSRCC_NEO) += tcsrcc-neo.o obj-$(CONFIG_SXR_TCSRCC_NIOBE) += tcsrcc-niobe.o obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o diff --git a/drivers/clk/qcom/tcsrcc-neo.c b/drivers/clk/qcom/tcsrcc-neo.c new file mode 100644 index 000000000000..e358f52ce1e2 --- /dev/null +++ b/drivers/clk/qcom/tcsrcc-neo.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "reset.h" +#include "vdd-level.h" + +static struct clk_branch tcsr_pcie_0_clkref_en = { + .halt_reg = 0x15100, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x15100, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "tcsr_pcie_0_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch tcsr_pcie_1_clkref_en = { + .halt_reg = 0x15114, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x15114, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "tcsr_pcie_1_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch tcsr_usb2_clkref_en = { + .halt_reg = 0x15118, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x15118, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "tcsr_usb2_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch tcsr_usb3_clkref_en = { + .halt_reg = 0x15108, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x15108, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "tcsr_usb3_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *tcsr_cc_neo_clocks[] = { + [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr, + [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr, + [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr, + [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr, +}; + +static const struct regmap_config tcsr_cc_neo_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x2f000, + .fast_io = true, +}; + +static const struct qcom_cc_desc tcsr_cc_neo_desc = { + .config = &tcsr_cc_neo_regmap_config, + .clks = tcsr_cc_neo_clocks, + .num_clks = ARRAY_SIZE(tcsr_cc_neo_clocks), +}; + +static const struct of_device_id tcsr_cc_neo_match_table[] = { + { .compatible = "qcom,neo-tcsrcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, tcsr_cc_neo_match_table); + +static int tcsr_cc_neo_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &tcsr_cc_neo_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_really_probe(pdev, &tcsr_cc_neo_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register TCSR CC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered TCSR CC clocks\n"); + + return ret; +} + +static void tcsr_cc_neo_sync_state(struct device *dev) +{ + qcom_cc_sync_state(dev, &tcsr_cc_neo_desc); +} + +static struct platform_driver tcsr_cc_neo_driver = { + .probe = tcsr_cc_neo_probe, + .driver = { + .name = "tcsr_cc-neo", + .of_match_table = tcsr_cc_neo_match_table, + .sync_state = tcsr_cc_neo_sync_state, + }, +}; + +static int __init tcsr_cc_neo_init(void) +{ + return platform_driver_register(&tcsr_cc_neo_driver); +} +subsys_initcall(tcsr_cc_neo_init); + +static void __exit tcsr_cc_neo_exit(void) +{ + platform_driver_unregister(&tcsr_cc_neo_driver); +} +module_exit(tcsr_cc_neo_exit); + +MODULE_DESCRIPTION("QTI TCSR_CC NEO Driver"); +MODULE_LICENSE("GPL"); From f2ee42036a3e3a28ee6f1ac4aa68f96ab6f8cd09 Mon Sep 17 00:00:00 2001 From: Tony Truong Date: Mon, 29 Jul 2024 13:53:41 -0700 Subject: [PATCH 059/117] rpmsg: glink_cma: Use mutex for SSR locking Incorrect use of spin lock leads to unexpected behavior when trying to lock between SSR callback and main workqueue. Both SSR callback and main workqueue calls functions which potentially can sleep within the spin lock. Switch over to mutex as that is the proper locking mechanism for these scenario. Change-Id: I7637a42b3928b9e9f03789238aac275332c764bb Signed-off-by: Tony Truong --- drivers/rpmsg/virtio_glink_cma.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/rpmsg/virtio_glink_cma.c b/drivers/rpmsg/virtio_glink_cma.c index 929770722421..c58c51d7b8b4 100644 --- a/drivers/rpmsg/virtio_glink_cma.c +++ b/drivers/rpmsg/virtio_glink_cma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -79,7 +79,7 @@ struct virtio_glink_bridge_dsp_info { struct notifier_block nb; void *notifier_handle; - spinlock_t ssr_lock; + struct mutex ssr_lock; struct list_head node; }; @@ -141,7 +141,7 @@ static int virtio_glink_bridge_send_msg(struct virtio_glink_bridge *vgbridge, msg->label = cpu_to_virtio32(vdev, label); sg_init_one(&sg, msg, sizeof(*msg)); - rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, msg, GFP_ATOMIC); + rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, msg, GFP_KERNEL); if (rc) { dev_err(&vdev->dev, "fail to add input buffer\n"); return rc; @@ -167,7 +167,7 @@ static int virtio_glink_bridge_send_msg_ack(struct virtio_glink_bridge *vgbridge ack->status = cpu_to_virtio32(vdev, status); sg_init_one(&sg, ack, sizeof(*ack)); - rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, ack, GFP_ATOMIC); + rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, ack, GFP_KERNEL); if (rc) { dev_err(&vdev->dev, "fail to add input buffer\n"); return rc; @@ -197,7 +197,7 @@ static int virtio_glink_bridge_ssr_cb(struct notifier_block *nb, dsp_info = container_of(nb, struct virtio_glink_bridge_dsp_info, nb); - spin_lock(&dsp_info->ssr_lock); + mutex_lock(&dsp_info->ssr_lock); dev = &dsp_info->vgbridge->vdev->dev; dev_info(dev, "received cb state %ld for %s\n", state, dsp_info->label); @@ -212,7 +212,7 @@ static int virtio_glink_bridge_ssr_cb(struct notifier_block *nb, default: break; } - spin_unlock(&dsp_info->ssr_lock); + mutex_unlock(&dsp_info->ssr_lock); return NOTIFY_DONE; } @@ -273,7 +273,7 @@ static void virtio_glink_bridge_rx_work(struct work_struct *work) goto out; } - spin_lock(&dsp_info->ssr_lock); + mutex_lock(&dsp_info->ssr_lock); switch (msg_type) { case MSG_SETUP: @@ -333,7 +333,7 @@ static void virtio_glink_bridge_rx_work(struct work_struct *work) rc = VIRTIO_GLINK_BRIDGE_SUCCESS; unlock: virtio_glink_bridge_send_msg_ack(vgbridge, msg_ack_type, label, rc); - spin_unlock(&dsp_info->ssr_lock); + mutex_unlock(&dsp_info->ssr_lock); return; out: virtio_glink_bridge_send_msg_ack(vgbridge, msg_ack_type, label, rc); @@ -383,7 +383,7 @@ static int virtio_glink_bridge_of_parse(struct virtio_glink_bridge *vgbridge) goto out; } - spin_lock_init(&dsp_info->ssr_lock); + mutex_init(&dsp_info->ssr_lock); dsp_info->np = child_np; list_add_tail(&dsp_info->node, &vgbridge->dsp_infos); } From f49cd77f73a211dce3421b22cc9f3e5d2a3c079a Mon Sep 17 00:00:00 2001 From: Tony Truong Date: Thu, 8 Aug 2024 16:24:32 -0700 Subject: [PATCH 060/117] rpmsg: glink_cma: Add msg type to reclaim inbuf There are cases where the device receives an inbuf msg type related to an ack and hold on to the buffer without it being reclaimed by the driver. Add a new msg type, MSG_INBUF_RECLAIM, so the driver can reclaim inbuf that the device no longer needs. Change-Id: Id92371f06134569031ee3ea4454104f83f790561 Signed-off-by: Tony Truong --- drivers/rpmsg/virtio_glink_cma.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/rpmsg/virtio_glink_cma.c b/drivers/rpmsg/virtio_glink_cma.c index c58c51d7b8b4..82132ac7b93f 100644 --- a/drivers/rpmsg/virtio_glink_cma.c +++ b/drivers/rpmsg/virtio_glink_cma.c @@ -51,6 +51,7 @@ enum { MSG_SSR_AFTER_POWERUP, /* outbound */ MSG_SSR_SETUP, /* inbound */ MSG_SSR_SETUP_ACK, /* outbound */ + MSG_INBUF_RECLAIM, /* inbound */ MSG_MAX, MSG_ERR = 0xff, }; @@ -121,6 +122,7 @@ static int virtio_glink_bridge_msg_type_supported(u32 msg_type) switch (msg_type) { case MSG_SETUP: case MSG_SSR_SETUP: + case MSG_INBUF_RECLAIM: return true; default: return false; @@ -264,6 +266,9 @@ static void virtio_glink_bridge_rx_work(struct work_struct *work) goto out; } + if (msg_type == MSG_INBUF_RECLAIM) + return; + msg_ack_type = virtio_glink_bridge_to_msg_ack_type(msg_type); dsp_info = virtio_glink_bridge_get_dsp_info(vgbridge, label); From f7cc38531aa6983f1bf3ea8d79cde6c777096f61 Mon Sep 17 00:00:00 2001 From: Rakesh Kundaram Date: Tue, 6 Aug 2024 15:10:23 +0530 Subject: [PATCH 061/117] soc: qcom: sysmon_subsystem_stats: Fix a potential null pointer dereference Fix a potential null pointer dereference issue in sysmon subsystem stats. Change-Id: I8ad500599b4d9e212a814abbd1b5f0dccdc6b3e6 Signed-off-by: Rakesh Kundaram --- drivers/soc/qcom/sysmon_subsystem_stats.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/soc/qcom/sysmon_subsystem_stats.c b/drivers/soc/qcom/sysmon_subsystem_stats.c index a6c903b2bd93..3c89799dd5bf 100644 --- a/drivers/soc/qcom/sysmon_subsystem_stats.c +++ b/drivers/soc/qcom/sysmon_subsystem_stats.c @@ -191,6 +191,8 @@ static int add_delta_time( ptr = g_sysmon_stats.sysmon_power_stats_cdsp; } else if (dsp_id == SLPI) { ptr = g_sysmon_stats.sysmon_power_stats_slpi; + } else { + return -EINVAL; } if (ver >= 2) { From 1a6163e69cf20465ab4e22314abbd73db19f802f Mon Sep 17 00:00:00 2001 From: Priyanka G Pai Date: Thu, 8 Aug 2024 19:30:59 +0530 Subject: [PATCH 062/117] msm: npu: Add NPU driver support for kernel 6.1 NPU driver snapshot from msm-5.15 branch commit f5053f87777d ("msm: npu: Fix OOB issue in IPC between driver and firmware"). Change-Id: Iea68b912dd7efd9a979969a91fb38d3611e3ff8c Signed-off-by: Priyanka G Pai --- drivers/media/platform/msm/npu/Kconfig | 11 + drivers/media/platform/msm/npu/Makefile | 11 + drivers/media/platform/msm/npu/npu_common.h | 277 ++ drivers/media/platform/msm/npu/npu_dbg.c | 33 + drivers/media/platform/msm/npu/npu_debugfs.c | 184 ++ drivers/media/platform/msm/npu/npu_dev.c | 2428 +++++++++++++++++ drivers/media/platform/msm/npu/npu_firmware.h | 176 ++ drivers/media/platform/msm/npu/npu_host_ipc.c | 438 +++ drivers/media/platform/msm/npu/npu_host_ipc.h | 464 ++++ drivers/media/platform/msm/npu/npu_hw.h | 53 + .../media/platform/msm/npu/npu_hw_access.c | 485 ++++ .../media/platform/msm/npu/npu_hw_access.h | 87 + drivers/media/platform/msm/npu/npu_mgr.c | 2112 ++++++++++++++ drivers/media/platform/msm/npu/npu_mgr.h | 147 + include/soc/qcom/subsystem_restart.h | 279 ++ 15 files changed, 7185 insertions(+) create mode 100644 drivers/media/platform/msm/npu/npu_common.h create mode 100644 drivers/media/platform/msm/npu/npu_dbg.c create mode 100644 drivers/media/platform/msm/npu/npu_debugfs.c create mode 100644 drivers/media/platform/msm/npu/npu_dev.c create mode 100644 drivers/media/platform/msm/npu/npu_firmware.h create mode 100644 drivers/media/platform/msm/npu/npu_host_ipc.c create mode 100644 drivers/media/platform/msm/npu/npu_host_ipc.h create mode 100644 drivers/media/platform/msm/npu/npu_hw.h create mode 100644 drivers/media/platform/msm/npu/npu_hw_access.c create mode 100644 drivers/media/platform/msm/npu/npu_hw_access.h create mode 100644 drivers/media/platform/msm/npu/npu_mgr.c create mode 100644 drivers/media/platform/msm/npu/npu_mgr.h create mode 100644 include/soc/qcom/subsystem_restart.h diff --git a/drivers/media/platform/msm/npu/Kconfig b/drivers/media/platform/msm/npu/Kconfig index 985ff9e1986d..5b4f10968a1c 100644 --- a/drivers/media/platform/msm/npu/Kconfig +++ b/drivers/media/platform/msm/npu/Kconfig @@ -7,3 +7,14 @@ config VIRTIO_NPU which provides acceleration for neural network processing. This driver is based on virtio. Say Y if you want to support virtual NPU. + +config MSM_NPU + tristate "QTI MSM Neural Processing Unit support" + depends on ARCH_QCOM + help + Enable support for Neural Processing Unit + for specific QTI chipsets. + This module serves as the common driver + for npu which provides acceleration for neural + network processing. + diff --git a/drivers/media/platform/msm/npu/Makefile b/drivers/media/platform/msm/npu/Makefile index 48f0b6ecc329..6899941e7ed0 100644 --- a/drivers/media/platform/msm/npu/Makefile +++ b/drivers/media/platform/msm/npu/Makefile @@ -1,3 +1,14 @@ # SPDX-License-Identifier: GPL-2.0-only +ifneq ($(CONFIG_VIRTIO_NPU),) obj-$(CONFIG_VIRTIO_NPU) := virtio_npu.o +else +msm_npu-objs := npu_dbg.o \ + npu_dev.o \ + npu_debugfs.o \ + npu_host_ipc.o \ + npu_hw_access.o \ + npu_mgr.o + +obj-$(CONFIG_MSM_NPU) := msm_npu.o +endif diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h new file mode 100644 index 000000000000..5e64ea23ccdc --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _NPU_COMMON_H +#define _NPU_COMMON_H + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "npu_mgr.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define NPU_MAX_MBOX_NUM 2 +#define NPU_MBOX_LOW_PRI 0 +#define NPU_MBOX_HIGH_PRI 1 + +#define DEFAULT_REG_DUMP_NUM 64 +#define ROW_BYTES 16 +#define GROUP_BYTES 4 + +#define NUM_MAX_CLK_NUM 24 +#define NPU_MAX_REGULATOR_NUM 2 +#define NPU_MAX_DT_NAME_LEN 21 +#define NPU_MAX_PWRLEVELS 8 +#define NPU_MAX_STATS_BUF_SIZE 16384 +#define NPU_MAX_PATCH_NUM 160 + +#define PERF_MODE_DEFAULT 0 + +enum npu_power_level { + NPU_PWRLEVEL_MINSVS = 0, + NPU_PWRLEVEL_LOWSVS, + NPU_PWRLEVEL_SVS, + NPU_PWRLEVEL_SVS_L1, + NPU_PWRLEVEL_NOM, + NPU_PWRLEVEL_NOM_L1, + NPU_PWRLEVEL_TURBO, + NPU_PWRLEVEL_TURBO_L1, + NPU_PWRLEVEL_OFF = 0xFFFFFFFF, +}; + +/* ------------------------------------------------------------------------- + * Data Structures + * ------------------------------------------------------------------------- + */ +struct npu_smmu_ctx { + int domain; + struct dma_iommu_mapping *mmu_mapping; + struct reg_bus_client *reg_bus_clt; + int32_t attach_cnt; +}; + +struct npu_ion_buf { + int fd; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *table; + dma_addr_t iova; + uint32_t size; + void *phys_addr; + void *buf; + struct list_head list; +}; + +struct npu_clk { + struct clk *clk; + char clk_name[NPU_MAX_DT_NAME_LEN]; +}; + +struct npu_regulator { + struct regulator *regulator; + char regulator_name[NPU_MAX_DT_NAME_LEN]; +}; + +struct npu_debugfs_ctx { + struct dentry *root; + uint32_t reg_off; + uint32_t reg_cnt; +}; + +struct npu_debugfs_reg_ctx { + char *buf; + size_t buf_len; + struct npu_device *npu_dev; +}; + +struct npu_mbox { + struct mbox_client client; + struct mbox_chan *chan; + struct npu_device *npu_dev; + uint32_t id; +}; + +/** + * struct npu_pwrlevel - Struct holding different pwrlevel info obtained + * from dtsi file + * @pwr_level: NPU power level + * @freq[]: NPU frequency vote in Hz + */ +struct npu_pwrlevel { + uint32_t pwr_level; + long clk_freq[NUM_MAX_CLK_NUM]; +}; + +/* + * struct npu_reg - Struct holding npu register information + * @ off - register offset + * @ val - register value + * @ valid - if register value is valid + */ +struct npu_reg { + uint32_t off; + uint32_t val; + bool valid; +}; + +/** + * struct npu_pwrctrl - Power control settings for a NPU device + * @pwr_vote_num - voting information for power enable + * @pwrlevels - List of supported power levels + * @active_pwrlevel - The currently active power level + * @default_pwrlevel - device wake up power level + * @max_pwrlevel - maximum allowable powerlevel per the user + * @min_pwrlevel - minimum allowable powerlevel per the user + * @num_pwrlevels - number of available power levels + * @cdsprm_pwrlevel - maximum power level from cdsprm + * @fmax_pwrlevel - maximum power level from qfprom fmax setting + * @uc_pwrlevel - power level from user driver setting + * @perf_mode_override - perf mode from sysfs to override perf mode + * settings from user driver + * @dcvs_mode - dcvs mode from sysfs to turn on dcvs mode + * settings from user driver + * @devbw - bw device + */ +struct npu_pwrctrl { + int32_t pwr_vote_num; + + struct npu_pwrlevel pwrlevels[NPU_MAX_PWRLEVELS]; + uint32_t active_pwrlevel; + uint32_t default_pwrlevel; + uint32_t max_pwrlevel; + uint32_t min_pwrlevel; + uint32_t num_pwrlevels; + + struct device *devbw; + uint32_t bwmon_enabled; + uint32_t uc_pwrlevel; + uint32_t cdsprm_pwrlevel; + uint32_t fmax_pwrlevel; + uint32_t perf_mode_override; + uint32_t dcvs_mode; + uint32_t cur_dcvs_activity; +}; + +/** + * struct npu_thermalctrl - Thermal control settings for a NPU device + * @max_state - maximum thermal mitigation state + * @current_state - current thermal mitigation state + * @pwr_level -power level that thermal control requested + */ +struct npu_thermalctrl { + unsigned long max_state; + unsigned long current_state; + uint32_t pwr_level; +}; + +#define NPU_MAX_IRQ 3 + +struct npu_irq { + char *name; + int irq; + int irq_type; +}; + +struct npu_io_data { + size_t size; + void __iomem *base; +}; + +struct npu_fw_io_data { + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; +}; + +struct npu_device { + struct mutex dev_lock; + + struct platform_device *pdev; + + dev_t dev_num; + struct cdev cdev; + struct class *class; + struct device *device; + + struct npu_io_data core_io; + struct npu_io_data tcm_io; + struct npu_io_data bwmon_io; + struct npu_io_data qfprom_io; + struct npu_fw_io_data fw_io; + + uint32_t core_clk_num; + struct npu_clk core_clks[NUM_MAX_CLK_NUM]; + + uint32_t regulator_num; + struct npu_regulator regulators[NPU_MAX_DT_NAME_LEN]; + + struct npu_irq irq[NPU_MAX_IRQ]; + + struct device *cb_device; + + struct npu_host_ctx host_ctx; + struct npu_smmu_ctx smmu_ctx; + struct npu_debugfs_ctx debugfs_ctx; + + struct npu_mbox mbox_aop; + + struct thermal_cooling_device *tcdev; + struct npu_pwrctrl pwrctrl; + struct npu_thermalctrl thermalctrl; + + struct llcc_slice_desc *sys_cache; + uint32_t execute_v2_flag; + bool cxlimit_registered; + struct icc_path *icc_npu_cdspmem; + struct icc_path *icc_cpu_imemcfg; + uint32_t hw_version; +}; + +struct npu_client { + struct npu_device *npu_dev; + struct mutex list_lock; + struct list_head mapped_buffer_list; +}; + +/* ------------------------------------------------------------------------- + * Function Prototypes + * ------------------------------------------------------------------------- + */ +int npu_debugfs_init(struct npu_device *npu_dev); +void npu_debugfs_deinit(struct npu_device *npu_dev); + +int npu_enable_core_power(struct npu_device *npu_dev); +void npu_disable_core_power(struct npu_device *npu_dev); +int npu_enable_post_pil_clocks(struct npu_device *npu_dev); +void npu_disable_post_pil_clocks(struct npu_device *npu_dev); + +irqreturn_t npu_intr_hdler(int irq, void *ptr); + +int npu_set_uc_power_level(struct npu_device *npu_dev, + uint32_t pwr_level); + +int fw_init(struct npu_device *npu_dev); +void fw_deinit(struct npu_device *npu_dev, bool ssr, bool fw_alive); +int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable); + +#endif /* _NPU_COMMON_H */ diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c new file mode 100644 index 000000000000..6a600a497534 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_dbg.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include "npu_common.h" +#include "npu_firmware.h" +#include "npu_hw.h" +#include "npu_hw_access.h" +#include "npu_mgr.h" + +/* ------------------------------------------------------------------------- + * Function Definitions - Debug + * ------------------------------------------------------------------------- + */ +void npu_dump_debug_timeout_stats(struct npu_device *npu_dev) +{ + uint32_t reg_val; + + reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START); + pr_info("fw jobs execute started count = %d\n", reg_val); + reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END); + pr_info("fw jobs execute finished count = %d\n", reg_val); + reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA); + pr_info("fw jobs aco parser debug = %d\n", reg_val); +} diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c new file mode 100644 index 000000000000..c7ac86e4fc6c --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_debugfs.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include + +#include "npu_hw.h" +#include "npu_hw_access.h" +#include "npu_common.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define NPU_LOG_BUF_SIZE 4096 + +/* ------------------------------------------------------------------------- + * Function Prototypes + * ------------------------------------------------------------------------- + */ +static int npu_debug_open(struct inode *inode, struct file *file); +static int npu_debug_release(struct inode *inode, struct file *file); +static ssize_t npu_debug_ctrl_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos); + +/* ------------------------------------------------------------------------- + * Variables + * ------------------------------------------------------------------------- + */ +static struct npu_device *g_npu_dev; + +static const struct file_operations npu_ctrl_fops = { + .open = npu_debug_open, + .release = npu_debug_release, + .read = NULL, + .write = npu_debug_ctrl_write, +}; + +/* ------------------------------------------------------------------------- + * Function Implementations + * ------------------------------------------------------------------------- + */ +static int npu_debug_open(struct inode *inode, struct file *file) +{ + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + return 0; +} + +static int npu_debug_release(struct inode *inode, struct file *file) +{ + return 0; +} + + +/* ------------------------------------------------------------------------- + * Function Implementations - DebugFS Control + * ------------------------------------------------------------------------- + */ +static ssize_t npu_debug_ctrl_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[24]; + struct npu_device *npu_dev = file->private_data; + struct npu_debugfs_ctx *debugfs; + int32_t rc = 0; + uint32_t val; + + pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev); + npu_dev = g_npu_dev; + debugfs = &npu_dev->debugfs_ctx; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (count >= 2) + buf[count-1] = 0;/* remove line feed */ + + if (strcmp(buf, "on") == 0) { + pr_info("triggering fw_init\n"); + if (fw_init(npu_dev) != 0) + pr_info("error in fw_init\n"); + } else if (strcmp(buf, "off") == 0) { + pr_info("triggering fw_deinit\n"); + fw_deinit(npu_dev, false, true); + } else if (strcmp(buf, "ssr") == 0) { + pr_info("trigger error irq\n"); + if (npu_enable_core_power(npu_dev)) + return -EPERM; + + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(1), 2); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2); + npu_disable_core_power(npu_dev); + } else if (strcmp(buf, "ssr_wdt") == 0) { + pr_info("trigger wdt irq\n"); + npu_disable_post_pil_clocks(npu_dev); + } else if (strcmp(buf, "loopback") == 0) { + pr_debug("loopback test\n"); + rc = npu_host_loopback_test(npu_dev); + pr_debug("loopback test end: %d\n", rc); + } else { + rc = kstrtou32(buf, 10, &val); + if (rc) { + pr_err("Invalid input for power level settings\n"); + } else { + val = min(val, npu_dev->pwrctrl.max_pwrlevel); + npu_dev->pwrctrl.active_pwrlevel = val; + pr_info("setting power state to %d\n", val); + } + } + + return count; +} +/* ------------------------------------------------------------------------- + * Function Implementations - DebugFS + * ------------------------------------------------------------------------- + */ +int npu_debugfs_init(struct npu_device *npu_dev) +{ + struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + g_npu_dev = npu_dev; + + debugfs->root = debugfs_create_dir("npu", NULL); + if (IS_ERR_OR_NULL(debugfs->root)) { + pr_err("debugfs_create_dir for npu failed, error %ld\n", + PTR_ERR(debugfs->root)); + return -ENODEV; + } + + if (!debugfs_create_file("ctrl", 0644, debugfs->root, + npu_dev, &npu_ctrl_fops)) { + pr_err("debugfs_create_file ctrl fail\n"); + goto err; + } + + debugfs_create_bool("sys_cache_disable", 0644, + debugfs->root, &(host_ctx->sys_cache_disable)); + + debugfs_create_u32("fw_dbg_mode", 0644, + debugfs->root, &(host_ctx->fw_dbg_mode)); + + debugfs_create_u32("fw_state", 0444, + debugfs->root, &(host_ctx->fw_state)); + + debugfs_create_u32("pwr_level", 0444, + debugfs->root, &(pwr->active_pwrlevel)); + + debugfs_create_u32("exec_flags", 0644, + debugfs->root, &(host_ctx->exec_flags_override)); + + + return 0; + +err: + npu_debugfs_deinit(npu_dev); + return -ENODEV; +} + +void npu_debugfs_deinit(struct npu_device *npu_dev) +{ + struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx; + + if (!IS_ERR_OR_NULL(debugfs->root)) { + debugfs_remove_recursive(debugfs->root); + debugfs->root = NULL; + } +} diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c new file mode 100644 index 000000000000..da980013f921 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -0,0 +1,2428 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "npu_common.h" +#include "npu_hw.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define CLASS_NAME "npu" +#define DRIVER_NAME "msm_npu" + +#define MBOX_OP_TIMEOUTMS 1000 + +#define ICC_NPU_CDSPMEM "icc-npu-cdspmem" +#define ICC_CPU_IMEMCFG "icc-cpu-imemcfg" +/* ------------------------------------------------------------------------- + * File Scope Prototypes + * ------------------------------------------------------------------------- + */ +static int npu_enable_regulators(struct npu_device *npu_dev); +static void npu_disable_regulators(struct npu_device *npu_dev); +static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil); +static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil); +static int npu_enable_core_clocks(struct npu_device *npu_dev); +static void npu_disable_core_clocks(struct npu_device *npu_dev); +static uint32_t npu_calc_power_level(struct npu_device *npu_dev); +static ssize_t caps_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t pwr_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t pwr_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t perf_mode_override_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t perf_mode_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t dcvs_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t dcvs_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t fw_unload_delay_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t fw_unload_delay_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t fw_state_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t fw_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static void npu_suspend_devbw(struct npu_device *npu_dev); +static void npu_resume_devbw(struct npu_device *npu_dev); +static bool npu_is_post_clock(const char *clk_name); +static bool npu_is_exclude_rate_clock(const char *clk_name); +static int npu_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state); +static int npu_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state); +static int npu_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state); +static int npu_open(struct inode *inode, struct file *file); +static int npu_close(struct inode *inode, struct file *file); +static int npu_get_info(struct npu_client *client, unsigned long arg); +static int npu_map_buf(struct npu_client *client, unsigned long arg); +static int npu_unmap_buf(struct npu_client *client, + unsigned long arg); +static int npu_load_network(struct npu_client *client, + unsigned long arg); +static int npu_load_network_v2(struct npu_client *client, + unsigned long arg); +static int npu_unload_network(struct npu_client *client, + unsigned long arg); +static int npu_exec_network(struct npu_client *client, + unsigned long arg); +static int npu_exec_network_v2(struct npu_client *client, + unsigned long arg); +static int npu_set_fw_state(struct npu_client *client, uint32_t enable); +static int npu_set_property(struct npu_client *client, + unsigned long arg); +static int npu_get_property(struct npu_client *client, + unsigned long arg); +static long npu_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static int npu_parse_dt_clock(struct npu_device *npu_dev); +static int npu_parse_dt_regulator(struct npu_device *npu_dev); +static int npu_of_parse_pwrlevels(struct npu_device *npu_dev, + struct device_node *node); +static int npu_pwrctrl_init(struct npu_device *npu_dev); +static int npu_probe(struct platform_device *pdev); +static int npu_remove(struct platform_device *pdev); +static int npu_suspend(struct platform_device *dev, pm_message_t state); +static int npu_resume(struct platform_device *dev); +static int __init npu_init(void); +static void __exit npu_exit(void); +static int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit); +static uint32_t npu_notify_cdsprm_cxlimit_corner(struct npu_device *npu_dev, + uint32_t pwr_lvl); +static void npu_icc_init(struct npu_device *npu_dev); +static void npu_icc_deinit(struct npu_device *npu_dev); +static void npu_disable_icc_bw(struct npu_device *npu_dev); +static void npu_enable_icc_bw(struct npu_device *npu_dev); +/* ------------------------------------------------------------------------- + * File Scope Variables + * ------------------------------------------------------------------------- + */ +static const char * const npu_post_clocks[] = { + "npu_cpc_clk", + "npu_cpc_timer_clk" +}; + +static const char * const npu_exclude_rate_clocks[] = { + "qdss_clk", + "at_clk", + "trig_clk", + "sleep_clk", + "conf_noc_ahb_clk", + "comp_noc_axi_clk", + "npu_core_cti_clk", + "npu_core_apb_clk", + "npu_core_atb_clk", + "npu_cpc_timer_clk", + "qtimer_core_clk", + "bwmon_clk", + "bto_core_clk" +}; + + +static struct npu_reg npu_saved_bw_registers[] = { + { BWMON2_SAMPLING_WINDOW, 0, false }, + { BWMON2_BYTE_COUNT_THRESHOLD_HIGH, 0, false }, + { BWMON2_BYTE_COUNT_THRESHOLD_MEDIUM, 0, false }, + { BWMON2_BYTE_COUNT_THRESHOLD_LOW, 0, false }, + { BWMON2_ZONE_ACTIONS, 0, false }, + { BWMON2_ZONE_COUNT_THRESHOLD, 0, false }, +}; + +static const struct npu_irq npu_irq_info[NPU_MAX_IRQ] = { + {"ipc_irq", 0, IRQF_TRIGGER_HIGH}, + {"error_irq", 0, IRQF_TRIGGER_RISING | IRQF_ONESHOT}, + {"wdg_bite_irq", 0, IRQF_TRIGGER_RISING | IRQF_ONESHOT}, +}; + +static struct npu_device *g_npu_dev; + +/* ------------------------------------------------------------------------- + * Entry Points for Probe + * ------------------------------------------------------------------------- + */ +/* Sys FS */ +static DEVICE_ATTR_RO(caps); +static DEVICE_ATTR_RW(pwr); +static DEVICE_ATTR_RW(perf_mode_override); +static DEVICE_ATTR_RW(dcvs_mode); +static DEVICE_ATTR_RW(fw_unload_delay_ms); +static DEVICE_ATTR_RW(fw_state); + +static struct attribute *npu_fs_attrs[] = { + &dev_attr_caps.attr, + &dev_attr_pwr.attr, + &dev_attr_perf_mode_override.attr, + &dev_attr_dcvs_mode.attr, + &dev_attr_fw_state.attr, + &dev_attr_fw_unload_delay_ms.attr, + NULL +}; + +static struct attribute_group npu_fs_attr_group = { + .attrs = npu_fs_attrs +}; + +static const struct of_device_id npu_dt_match[] = { + { .compatible = "qcom,msm-npu",}, + {} +}; + +static struct platform_driver npu_driver = { + .probe = npu_probe, + .remove = npu_remove, +#if defined(CONFIG_PM) + .suspend = npu_suspend, + .resume = npu_resume, +#endif + .driver = { + .name = "msm_npu", + .of_match_table = npu_dt_match, + .pm = NULL, + }, +}; + +static const struct file_operations npu_fops = { + .owner = THIS_MODULE, + .open = npu_open, + .release = npu_close, + .unlocked_ioctl = npu_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = npu_ioctl, +#endif +}; + +static const struct thermal_cooling_device_ops npu_cooling_ops = { + .get_max_state = npu_get_max_state, + .get_cur_state = npu_get_cur_state, + .set_cur_state = npu_set_cur_state, +}; + +/* ------------------------------------------------------------------------- + * SysFS - Capabilities + * ------------------------------------------------------------------------- + */ +static ssize_t caps_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t ret = 0; + struct npu_device *npu_dev = dev_get_drvdata(dev); + + if (!npu_enable_core_power(npu_dev)) { + if (scnprintf(buf, PAGE_SIZE, "hw_version :0x%X", + REGR(npu_dev, NPU_HW_VERSION)) < 0) + ret = -EINVAL; + npu_disable_core_power(npu_dev); + } else + ret = -EPERM; + + return ret; +} + +/* ------------------------------------------------------------------------- + * SysFS - Power State + * ------------------------------------------------------------------------- + */ +static ssize_t pwr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + (pwr->pwr_vote_num > 0) ? "on" : "off"); +} + +static ssize_t pwr_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + bool pwr_on = false; + + if (strtobool(buf, &pwr_on) < 0) + return -EINVAL; + + if (pwr_on) { + if (npu_enable_core_power(npu_dev)) + return -EPERM; + } else { + npu_disable_core_power(npu_dev); + } + + return count; +} + +/* ------------------------------------------------------------------------- + * SysFS - perf_mode_override + * ------------------------------------------------------------------------- + */ +static ssize_t perf_mode_override_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->perf_mode_override); +} + +static ssize_t perf_mode_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct npu_client client; + struct npu_device *npu_dev = dev_get_drvdata(dev); + uint32_t val; + int rc; + + rc = kstrtou32(buf, 10, &val); + if (rc) { + pr_err("Invalid input for perf mode setting\n"); + return -EINVAL; + } + + val = min(val, npu_dev->pwrctrl.num_pwrlevels); + npu_dev->pwrctrl.perf_mode_override = val; + pr_info("setting uc_pwrlevel_override to %d\n", val); + + client.npu_dev = npu_dev; + npu_host_set_perf_mode(&client, 0, val); + + return count; +} + +static ssize_t dcvs_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->dcvs_mode); +} + +static ssize_t dcvs_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + struct msm_npu_property prop; + uint32_t val; + int ret = 0; + + ret = kstrtou32(buf, 10, &val); + if (ret) { + pr_err("Invalid input for dcvs mode setting\n"); + return -EINVAL; + } + + val = min(val, (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1)); + pr_debug("sysfs: setting dcvs_mode to %d\n", val); + + prop.prop_id = MSM_NPU_PROP_ID_DCVS_MODE; + prop.num_of_params = 1; + prop.network_hdl = 0; + prop.prop_param[0] = val; + + ret = npu_host_set_fw_property(npu_dev, &prop); + if (ret) { + pr_err("npu_host_set_fw_property failed %d\n", ret); + return ret; + } + + npu_dev->pwrctrl.dcvs_mode = val; + + return count; +} + +/* ------------------------------------------------------------------------- + * SysFS - Delayed FW unload + * ------------------------------------------------------------------------- + */ +static ssize_t fw_unload_delay_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + npu_dev->host_ctx.fw_unload_delay_ms); +} + +static ssize_t fw_unload_delay_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + uint32_t val; + int rc; + + rc = kstrtou32(buf, 10, &val); + if (rc) { + pr_err("Invalid input for fw unload delay setting\n"); + return -EINVAL; + } + + npu_dev->host_ctx.fw_unload_delay_ms = val; + pr_debug("setting fw_unload_delay_ms to %d\n", val); + + return count; +} + +/* ------------------------------------------------------------------------- + * SysFS - firmware state + * ------------------------------------------------------------------------- + */ +static ssize_t fw_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + (npu_dev->host_ctx.fw_state == FW_ENABLED) ? + "on" : "off"); +} + +static ssize_t fw_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct npu_device *npu_dev = dev_get_drvdata(dev); + struct npu_client client; + bool enable = false; + int rc; + + if (strtobool(buf, &enable) < 0) + return -EINVAL; + + client.npu_dev = npu_dev; + rc = npu_set_fw_state(&client, enable ? 1 : 0); + + if (rc) { + pr_err("%s fw failed\n", enable ? "enable" : "disable"); + return rc; + } + + return count; +} + +/* ------------------------------------------------------------------------- + * Power Related + * ------------------------------------------------------------------------- + */ +static enum npu_power_level cdsprm_corner_to_npu_power_level( + enum cdsprm_npu_corner corner) +{ + enum npu_power_level pwr_lvl = NPU_PWRLEVEL_TURBO_L1; + + switch (corner) { + case CDSPRM_NPU_CLK_OFF: + pwr_lvl = NPU_PWRLEVEL_OFF; + break; + case CDSPRM_NPU_MIN_SVS: + pwr_lvl = NPU_PWRLEVEL_MINSVS; + break; + case CDSPRM_NPU_LOW_SVS: + pwr_lvl = NPU_PWRLEVEL_LOWSVS; + break; + case CDSPRM_NPU_SVS: + pwr_lvl = NPU_PWRLEVEL_SVS; + break; + case CDSPRM_NPU_SVS_L1: + pwr_lvl = NPU_PWRLEVEL_SVS_L1; + break; + case CDSPRM_NPU_NOM: + pwr_lvl = NPU_PWRLEVEL_NOM; + break; + case CDSPRM_NPU_NOM_L1: + pwr_lvl = NPU_PWRLEVEL_NOM_L1; + break; + case CDSPRM_NPU_TURBO: + pwr_lvl = NPU_PWRLEVEL_TURBO; + break; + case CDSPRM_NPU_TURBO_L1: + default: + pwr_lvl = NPU_PWRLEVEL_TURBO_L1; + break; + } + + return pwr_lvl; +} + +static enum cdsprm_npu_corner npu_power_level_to_cdsprm_corner( + enum npu_power_level pwr_lvl) +{ + enum cdsprm_npu_corner corner = CDSPRM_NPU_MIN_SVS; + + switch (pwr_lvl) { + case NPU_PWRLEVEL_OFF: + corner = CDSPRM_NPU_CLK_OFF; + break; + case NPU_PWRLEVEL_MINSVS: + corner = CDSPRM_NPU_MIN_SVS; + break; + case NPU_PWRLEVEL_LOWSVS: + corner = CDSPRM_NPU_LOW_SVS; + break; + case NPU_PWRLEVEL_SVS: + corner = CDSPRM_NPU_SVS; + break; + case NPU_PWRLEVEL_SVS_L1: + corner = CDSPRM_NPU_SVS_L1; + break; + case NPU_PWRLEVEL_NOM: + corner = CDSPRM_NPU_NOM; + break; + case NPU_PWRLEVEL_NOM_L1: + corner = CDSPRM_NPU_NOM_L1; + break; + case NPU_PWRLEVEL_TURBO: + corner = CDSPRM_NPU_TURBO; + break; + case NPU_PWRLEVEL_TURBO_L1: + default: + corner = CDSPRM_NPU_TURBO_L1; + break; + } + + return corner; +} + +static int npu_set_cdsprm_corner_limit(enum cdsprm_npu_corner corner) +{ + struct npu_pwrctrl *pwr; + enum npu_power_level pwr_lvl; + + if (!g_npu_dev) + return 0; + + pwr = &g_npu_dev->pwrctrl; + pwr_lvl = cdsprm_corner_to_npu_power_level(corner); + pwr->cdsprm_pwrlevel = pwr_lvl; + pr_debug("power level from cdsp %d\n", pwr_lvl); + + return npu_set_power_level(g_npu_dev, false); +} + +static const struct cdsprm_npu_limit_cbs cdsprm_npu_limit_cbs = { + .set_corner_limit = npu_set_cdsprm_corner_limit, +}; + +int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable) +{ + if (!npu_dev->cxlimit_registered) + return 0; + + pr_debug("notify cxlimit %s activity\n", enable ? "enable" : "disable"); + + return cdsprm_cxlimit_npu_activity_notify(enable ? 1 : 0); + return 0; +} + +static uint32_t npu_notify_cdsprm_cxlimit_corner( + struct npu_device *npu_dev, uint32_t pwr_lvl) +{ + uint32_t corner, pwr_lvl_to_set; + + if (!npu_dev->cxlimit_registered) + return pwr_lvl; + + corner = npu_power_level_to_cdsprm_corner(pwr_lvl); + corner = cdsprm_cxlimit_npu_corner_notify(corner); + pwr_lvl_to_set = cdsprm_corner_to_npu_power_level(corner); + pr_debug("Notify cdsprm %d:%d\n", pwr_lvl, + pwr_lvl_to_set); + + return pwr_lvl_to_set; +} + +static int npu_cdsprm_cxlimit_init(struct npu_device *npu_dev) +{ + bool enabled; + int ret = 0; + + enabled = of_property_read_bool(npu_dev->pdev->dev.of_node, + "qcom,npu-cxlimit-enable"); + pr_debug("qcom,npu-xclimit-enable is %s\n", enabled ? "true" : "false"); + + npu_dev->cxlimit_registered = false; + if (enabled) { + ret = cdsprm_cxlimit_npu_limit_register(&cdsprm_npu_limit_cbs); + ret = 0; + if (ret) { + pr_err("register cxlimit npu limit failed\n"); + } else { + pr_debug("register cxlimit npu limit succeeds\n"); + npu_dev->cxlimit_registered = true; + } + } + + return ret; +} + +static int npu_cdsprm_cxlimit_deinit(struct npu_device *npu_dev) +{ + int ret = 0; + + if (npu_dev->cxlimit_registered) { + ret = cdsprm_cxlimit_npu_limit_deregister(); + ret = 0; + if (ret) + pr_err("deregister cxlimit npu limit failed\n"); + npu_dev->cxlimit_registered = false; + } + + return ret; +} + +int npu_enable_core_power(struct npu_device *npu_dev) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + int ret = 0; + + mutex_lock(&npu_dev->dev_lock); + if (!pwr->pwr_vote_num) { + npu_enable_icc_bw(npu_dev); + ret = npu_enable_regulators(npu_dev); + if (ret) { + npu_disable_icc_bw(npu_dev); + goto fail; + } + + + ret = npu_enable_core_clocks(npu_dev); + if (ret) { + npu_disable_regulators(npu_dev); + pwr->pwr_vote_num = 0; + npu_disable_icc_bw(npu_dev); + goto fail; + } + npu_resume_devbw(npu_dev); + } + pwr->pwr_vote_num++; +fail: + mutex_unlock(&npu_dev->dev_lock); + + return ret; +} + +void npu_disable_core_power(struct npu_device *npu_dev) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + mutex_lock(&npu_dev->dev_lock); + if (!pwr->pwr_vote_num) { + mutex_unlock(&npu_dev->dev_lock); + return; + } + + pwr->pwr_vote_num--; + if (!pwr->pwr_vote_num) { + npu_suspend_devbw(npu_dev); + npu_disable_core_clocks(npu_dev); + npu_disable_regulators(npu_dev); + npu_disable_icc_bw(npu_dev); + pwr->active_pwrlevel = pwr->default_pwrlevel; + pwr->uc_pwrlevel = pwr->max_pwrlevel; + pwr->cdsprm_pwrlevel = pwr->max_pwrlevel; + pwr->cur_dcvs_activity = pwr->num_pwrlevels; + pr_debug("setting back to power level=%d\n", + pwr->active_pwrlevel); + } + mutex_unlock(&npu_dev->dev_lock); +} + +static int npu_enable_core_clocks(struct npu_device *npu_dev) +{ + return npu_enable_clocks(npu_dev, false); +} + +static void npu_disable_core_clocks(struct npu_device *npu_dev) +{ + return npu_disable_clocks(npu_dev, false); +} + +int npu_enable_post_pil_clocks(struct npu_device *npu_dev) +{ + return npu_enable_clocks(npu_dev, true); +} + +void npu_disable_post_pil_clocks(struct npu_device *npu_dev) +{ + npu_disable_clocks(npu_dev, true); +} + +static uint32_t npu_power_level_from_index(struct npu_device *npu_dev, + uint32_t index) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + if (index >= pwr->num_pwrlevels) + index = pwr->num_pwrlevels - 1; + + return pwr->pwrlevels[index].pwr_level; +} + +static uint32_t npu_power_level_to_index(struct npu_device *npu_dev, + uint32_t pwr_lvl) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + int i; + + for (i = 0; i < pwr->num_pwrlevels; i++) { + if (pwr->pwrlevels[i].pwr_level > pwr_lvl) + break; + } + + + return i == 0 ? 0 : i - 1; +} + +static uint32_t npu_calc_power_level(struct npu_device *npu_dev) +{ + uint32_t ret_level; + uint32_t therm_pwr_level = npu_dev->thermalctrl.pwr_level; + uint32_t active_pwr_level = npu_dev->pwrctrl.active_pwrlevel; + uint32_t uc_pwr_level = npu_dev->pwrctrl.uc_pwrlevel; + + /* + * pick the lowese power level between thermal power and usecase power + * settings + */ + ret_level = min(therm_pwr_level, uc_pwr_level); + pr_debug("%s therm=%d active=%d uc=%d set level=%d\n", + __func__, therm_pwr_level, active_pwr_level, uc_pwr_level, + ret_level); + + return ret_level; +} + +static int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct npu_pwrlevel *pwrlevel; + int i, ret = 0; + uint32_t pwr_level_to_set, pwr_level_to_cdsprm, pwr_level_idx; + + /* get power level to set */ + pwr_level_to_set = npu_calc_power_level(npu_dev); + pwr_level_to_cdsprm = pwr_level_to_set; + + if (!pwr->pwr_vote_num) { + pr_debug("power is not enabled during set request\n"); + pwr->active_pwrlevel = min(pwr_level_to_set, + npu_dev->pwrctrl.cdsprm_pwrlevel); + return 0; + } + + /* notify cxlimit to get allowed power level */ + if ((pwr_level_to_set > pwr->active_pwrlevel) && notify_cxlimit) + pwr_level_to_set = npu_notify_cdsprm_cxlimit_corner( + npu_dev, pwr_level_to_cdsprm); + + pwr_level_to_set = min(pwr_level_to_set, + npu_dev->pwrctrl.cdsprm_pwrlevel); + + /* if the same as current, dont do anything */ + if (pwr_level_to_set == pwr->active_pwrlevel) { + pr_debug("power level %d doesn't change\n", pwr_level_to_set); + return 0; + } + + pr_debug("setting power level to [%d]\n", pwr_level_to_set); + pwr_level_idx = npu_power_level_to_index(npu_dev, pwr_level_to_set); + pwrlevel = &npu_dev->pwrctrl.pwrlevels[pwr_level_idx]; + + for (i = 0; i < npu_dev->core_clk_num; i++) { + if (npu_is_exclude_rate_clock( + npu_dev->core_clks[i].clk_name)) + continue; + + if (npu_dev->host_ctx.fw_state == FW_DISABLED) { + if (npu_is_post_clock( + npu_dev->core_clks[i].clk_name)) + continue; + } + + pr_debug("requested rate of clock [%s] to [%ld]\n", + npu_dev->core_clks[i].clk_name, pwrlevel->clk_freq[i]); + + ret = clk_set_rate(npu_dev->core_clks[i].clk, + pwrlevel->clk_freq[i]); + if (ret) { + pr_debug("clk_set_rate %s to %ld failed with %d\n", + npu_dev->core_clks[i].clk_name, + pwrlevel->clk_freq[i], ret); + break; + } + } + + if ((pwr_level_to_cdsprm < pwr->active_pwrlevel) && notify_cxlimit) { + npu_notify_cdsprm_cxlimit_corner(npu_dev, + pwr_level_to_cdsprm); + pr_debug("Notify cdsprm(post) %d\n", pwr_level_to_cdsprm); + } + + pwr->active_pwrlevel = pwr_level_to_set; + return ret; +} + +int npu_set_uc_power_level(struct npu_device *npu_dev, + uint32_t perf_mode) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + uint32_t uc_pwrlevel_to_set; + + uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev, + perf_mode - 1); + + if (uc_pwrlevel_to_set > pwr->max_pwrlevel) + uc_pwrlevel_to_set = pwr->max_pwrlevel; + + pwr->uc_pwrlevel = uc_pwrlevel_to_set; + return npu_set_power_level(npu_dev, true); +} + +/* ------------------------------------------------------------------------- + * Bandwidth Related + * ------------------------------------------------------------------------- + */ +static void npu_save_bw_registers(struct npu_device *npu_dev) +{ + int i; + + if (!npu_dev->bwmon_io.base) + return; + + for (i = 0; i < ARRAY_SIZE(npu_saved_bw_registers); i++) { + npu_saved_bw_registers[i].val = npu_bwmon_reg_read(npu_dev, + npu_saved_bw_registers[i].off); + npu_saved_bw_registers[i].valid = true; + } +} + +static void npu_restore_bw_registers(struct npu_device *npu_dev) +{ + int i; + + if (!npu_dev->bwmon_io.base) + return; + + for (i = 0; i < ARRAY_SIZE(npu_saved_bw_registers); i++) { + if (npu_saved_bw_registers[i].valid) { + npu_bwmon_reg_write(npu_dev, + npu_saved_bw_registers[i].off, + npu_saved_bw_registers[i].val); + npu_saved_bw_registers[i].valid = false; + } + } +} + +static void npu_suspend_devbw(struct npu_device *npu_dev) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + int ret; + + if (pwr->bwmon_enabled && pwr->devbw) { + pwr->bwmon_enabled = 0; + //TODO + //ret = devfreq_suspend_icc(pwr->devbw); + ret = 0; + if (ret) + pr_err("devfreq_suspend_devbw failed rc:%d\n", + ret); + npu_save_bw_registers(npu_dev); + } +} + +static void npu_resume_devbw(struct npu_device *npu_dev) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + int ret; + + if (!pwr->bwmon_enabled && pwr->devbw) { + pwr->bwmon_enabled = 1; + npu_restore_bw_registers(npu_dev); + //TODO + //ret = devfreq_resume_icc(pwr->devbw); + ret = 0; + if (ret) + pr_err("devfreq_resume_devbw failed rc:%d\n", ret); + } +} + +/* ------------------------------------------------------------------------- + * Clocks Related + * ------------------------------------------------------------------------- + */ +static bool npu_is_post_clock(const char *clk_name) +{ + int ret = false; + int i; + + for (i = 0; i < ARRAY_SIZE(npu_post_clocks); i++) { + if (!strcmp(clk_name, npu_post_clocks[i])) { + ret = true; + break; + } + } + return ret; +} + +static bool npu_is_exclude_rate_clock(const char *clk_name) +{ + int ret = false; + int i; + + for (i = 0; i < ARRAY_SIZE(npu_exclude_rate_clocks); i++) { + if (!strcmp(clk_name, npu_exclude_rate_clocks[i])) { + ret = true; + break; + } + } + return ret; +} + +static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) +{ + int i, rc = 0; + struct npu_clk *core_clks = npu_dev->core_clks; + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct npu_pwrlevel *pwrlevel; + uint32_t pwrlevel_to_set, pwrlevel_idx; + + pwrlevel_to_set = pwr->active_pwrlevel; + if (!post_pil) { + pwrlevel_to_set = npu_notify_cdsprm_cxlimit_corner( + npu_dev, pwrlevel_to_set); + pr_debug("Notify cdsprm %d\n", pwrlevel_to_set); + pwr->active_pwrlevel = pwrlevel_to_set; + } + + pwrlevel_idx = npu_power_level_to_index(npu_dev, pwrlevel_to_set); + pwrlevel = &pwr->pwrlevels[pwrlevel_idx]; + for (i = 0; i < npu_dev->core_clk_num; i++) { + if (post_pil) { + if (!npu_is_post_clock(core_clks[i].clk_name)) + continue; + } else { + if (npu_is_post_clock(core_clks[i].clk_name)) + continue; + } + + pr_debug("enabling clock %s\n", core_clks[i].clk_name); + + rc = clk_prepare_enable(core_clks[i].clk); + if (rc) { + pr_err("%s enable failed\n", + core_clks[i].clk_name); + break; + } + + if (npu_is_exclude_rate_clock(core_clks[i].clk_name)) + continue; + + pr_debug("setting rate of clock %s to %ld\n", + core_clks[i].clk_name, pwrlevel->clk_freq[i]); + + rc = clk_set_rate(core_clks[i].clk, + pwrlevel->clk_freq[i]); + /* not fatal error, keep using previous clk rate */ + if (rc) { + pr_err("clk_set_rate %s to %ld failed\n", + core_clks[i].clk_name, + pwrlevel->clk_freq[i]); + rc = 0; + } + } + + if (rc) { + for (i--; i >= 0; i--) { + if (post_pil) { + if (!npu_is_post_clock(core_clks[i].clk_name)) + continue; + } else { + if (npu_is_post_clock(core_clks[i].clk_name)) + continue; + } + pr_debug("disabling clock %s\n", core_clks[i].clk_name); + clk_disable_unprepare(core_clks[i].clk); + } + } + + return rc; +} + +static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) +{ + int i = 0; + struct npu_clk *core_clks = npu_dev->core_clks; + + if (!post_pil) { + npu_notify_cdsprm_cxlimit_corner(npu_dev, NPU_PWRLEVEL_OFF); + pr_debug("Notify cdsprm clock off\n"); + } + + for (i = npu_dev->core_clk_num - 1; i >= 0 ; i--) { + if (post_pil) { + if (!npu_is_post_clock(core_clks[i].clk_name)) + continue; + } else { + if (npu_is_post_clock(core_clks[i].clk_name)) + continue; + } + + pr_debug("disabling clock %s\n", core_clks[i].clk_name); + clk_disable_unprepare(core_clks[i].clk); + } +} + +/* ------------------------------------------------------------------------- + * Thermal Functions + * ------------------------------------------------------------------------- + */ +static int npu_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct npu_device *npu_dev = cdev->devdata; + struct npu_thermalctrl *thermalctrl = &npu_dev->thermalctrl; + + pr_debug("enter %s thermal max state=%lu\n", __func__, + thermalctrl->max_state); + + *state = thermalctrl->max_state; + + return 0; +} + +static int npu_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct npu_device *npu_dev = cdev->devdata; + struct npu_thermalctrl *thermal = &npu_dev->thermalctrl; + + pr_debug("enter %s thermal current state=%lu\n", __func__, + thermal->current_state); + + *state = thermal->current_state; + + return 0; +} + +static int +npu_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) +{ + struct npu_device *npu_dev = cdev->devdata; + struct npu_thermalctrl *thermal = &npu_dev->thermalctrl; + + pr_debug("enter %s request state=%lu\n", __func__, state); + if (state > thermal->max_state) + return -EINVAL; + + thermal->current_state = state; + thermal->pwr_level = npu_power_level_from_index(npu_dev, + thermal->max_state - state); + + return npu_set_power_level(npu_dev, true); +} + +/* ------------------------------------------------------------------------- + * Regulator Related + * ------------------------------------------------------------------------- + */ +static int npu_enable_regulators(struct npu_device *npu_dev) +{ + int i = 0; + int rc = 0; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_regulator *regulators = npu_dev->regulators; + + if (!host_ctx->power_vote_num) { + for (i = 0; i < npu_dev->regulator_num; i++) { + rc = regulator_enable(regulators[i].regulator); + if (rc < 0) { + pr_err("%s enable failed\n", + regulators[i].regulator_name); + break; + } + pr_debug("regulator %s enabled\n", + regulators[i].regulator_name); + } + } + host_ctx->power_vote_num++; + return rc; +} + +static void npu_disable_regulators(struct npu_device *npu_dev) +{ + int i = 0; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_regulator *regulators = npu_dev->regulators; + + if (host_ctx->power_vote_num > 0) { + for (i = 0; i < npu_dev->regulator_num; i++) { + regulator_disable(regulators[i].regulator); + pr_debug("regulator %s disabled\n", + regulators[i].regulator_name); + } + host_ctx->power_vote_num--; + } +} + +/* ------------------------------------------------------------------------- + * Interrupt Related + * ------------------------------------------------------------------------- + */ +int npu_enable_irq(struct npu_device *npu_dev) +{ + int i; + + /* clear pending irq state */ + REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), NPU_ERROR_IRQ_MASK); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0), NPU_ERROR_IRQ_MASK); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_OWNER(0), NPU_ERROR_IRQ_MASK); + REGW(npu_dev, NPU_MASTERn_WDOG_IRQ_OWNER(0), NPU_WDOG_IRQ_MASK); + + for (i = 0; i < NPU_MAX_IRQ; i++) { + if (npu_dev->irq[i].irq != 0) { + enable_irq(npu_dev->irq[i].irq); + pr_debug("enable irq %d\n", npu_dev->irq[i].irq); + } + } + + return 0; +} + +void npu_disable_irq(struct npu_device *npu_dev) +{ + int i; + + for (i = 0; i < NPU_MAX_IRQ; i++) { + if (npu_dev->irq[i].irq != 0) { + disable_irq(npu_dev->irq[i].irq); + pr_debug("disable irq %d\n", npu_dev->irq[i].irq); + } + } + + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0), 0); + /* clear pending irq state */ + REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), NPU_ERROR_IRQ_MASK); +} + +/* ------------------------------------------------------------------------- + * System Cache + * ------------------------------------------------------------------------- + */ +int npu_enable_sys_cache(struct npu_device *npu_dev) +{ + int rc = 0; + uint32_t reg_val = 0; + + if (!npu_dev->host_ctx.sys_cache_disable) { + npu_dev->sys_cache = llcc_slice_getd(LLCC_NPU); + if (IS_ERR_OR_NULL(npu_dev->sys_cache)) { + pr_warn("unable to init sys cache\n"); + npu_dev->sys_cache = NULL; + npu_dev->host_ctx.sys_cache_disable = true; + return 0; + } + + /* set npu side regs - program SCID */ + reg_val = NPU_CACHE_ATTR_IDn___POR | SYS_CACHE_SCID; + + REGW(npu_dev, NPU_CACHE_ATTR_IDn(0), reg_val); + REGW(npu_dev, NPU_CACHE_ATTR_IDn(1), reg_val); + REGW(npu_dev, NPU_CACHE_ATTR_IDn(2), reg_val); + REGW(npu_dev, NPU_CACHE_ATTR_IDn(3), reg_val); + REGW(npu_dev, NPU_CACHE_ATTR_IDn(4), reg_val); + + pr_debug("prior to activate sys cache\n"); + rc = llcc_slice_activate(npu_dev->sys_cache); + if (rc) { + pr_warn("failed to activate sys cache\n"); + llcc_slice_putd(npu_dev->sys_cache); + npu_dev->sys_cache = NULL; + return 0; + } + + pr_debug("sys cache activated\n"); + } + + return rc; +} + +void npu_disable_sys_cache(struct npu_device *npu_dev) +{ + int rc = 0; + + if (!npu_dev->host_ctx.sys_cache_disable) { + if (npu_dev->sys_cache) { + rc = llcc_slice_deactivate(npu_dev->sys_cache); + if (rc) { + pr_err("failed to deactivate sys cache\n"); + return; + } + pr_debug("sys cache deactivated\n"); + llcc_slice_putd(npu_dev->sys_cache); + npu_dev->sys_cache = NULL; + } + } +} + +/* ------------------------------------------------------------------------- + * Open/Close + * ------------------------------------------------------------------------- + */ +static int npu_open(struct inode *inode, struct file *file) +{ + struct npu_device *npu_dev = container_of(inode->i_cdev, + struct npu_device, cdev); + struct npu_client *client; + + client = kmalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->npu_dev = npu_dev; + mutex_init(&client->list_lock); + INIT_LIST_HEAD(&(client->mapped_buffer_list)); + file->private_data = client; + + return 0; +} + +static int npu_close(struct inode *inode, struct file *file) +{ + struct npu_client *client = file->private_data; + + npu_host_cleanup_networks(client); + mutex_destroy(&client->list_lock); + kfree(client); + return 0; +} + +/* ------------------------------------------------------------------------- + * IOCTL Implementations + * ------------------------------------------------------------------------- + */ +static int npu_get_info(struct npu_client *client, unsigned long arg) +{ + struct npu_device *npu_dev = client->npu_dev; + struct msm_npu_get_info_ioctl req; + void __user *argp = (void __user *)arg; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + ret = npu_host_get_info(npu_dev, &req); + + if (ret) { + pr_err("npu_host_get_info failed\n"); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + + if (ret) { + pr_err("fail to copy to user\n"); + return -EFAULT; + } + return 0; +} + +static int npu_map_buf(struct npu_client *client, unsigned long arg) +{ + struct msm_npu_map_buf_ioctl req; + void __user *argp = (void __user *)arg; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + ret = npu_host_map_buf(client, &req); + + if (ret) { + pr_err("npu_host_map_buf failed\n"); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + + if (ret) { + pr_err("fail to copy to user\n"); + return -EFAULT; + } + return 0; +} + +static int npu_unmap_buf(struct npu_client *client, unsigned long arg) +{ + struct msm_npu_unmap_buf_ioctl req; + void __user *argp = (void __user *)arg; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + ret = npu_host_unmap_buf(client, &req); + + if (ret) { + pr_err("npu_host_unmap_buf failed\n"); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + + if (ret) { + pr_err("fail to copy to user\n"); + return -EFAULT; + } + return 0; +} + +static int npu_load_network(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_load_network_ioctl req; + struct msm_npu_unload_network_ioctl unload_req; + void __user *argp = (void __user *)arg; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + pr_debug("network load with perf request %d\n", req.perf_mode); + + ret = npu_host_load_network(client, &req); + if (ret) { + pr_err("npu_host_load_network failed %d\n", ret); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("fail to copy to user\n"); + ret = -EFAULT; + unload_req.network_hdl = req.network_hdl; + npu_host_unload_network(client, &unload_req); + } + return ret; +} + +static int npu_load_network_v2(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_load_network_ioctl_v2 req; + struct msm_npu_unload_network_ioctl unload_req; + void __user *argp = (void __user *)arg; + struct msm_npu_patch_info_v2 *patch_info = NULL; + int ret; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + if (req.patch_info_num > NPU_MAX_PATCH_NUM) { + pr_err("Invalid patch info num %d[max:%d]\n", + req.patch_info_num, NPU_MAX_PATCH_NUM); + return -EINVAL; + } + + if (req.patch_info_num) { + patch_info = kmalloc_array(req.patch_info_num, + sizeof(*patch_info), GFP_KERNEL); + if (!patch_info) + return -ENOMEM; + + ret = copy_from_user(patch_info, + (void __user *)req.patch_info, + req.patch_info_num * sizeof(*patch_info)); + if (ret) { + pr_err("fail to copy patch info\n"); + kfree(patch_info); + return -EFAULT; + } + } + + pr_debug("network load with perf request %d\n", req.perf_mode); + + ret = npu_host_load_network_v2(client, &req, patch_info); + + kfree(patch_info); + if (ret) { + pr_err("npu_host_load_network_v2 failed %d\n", ret); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("fail to copy to user\n"); + ret = -EFAULT; + unload_req.network_hdl = req.network_hdl; + npu_host_unload_network(client, &unload_req); + } + + return ret; +} + +static int npu_unload_network(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_unload_network_ioctl req; + void __user *argp = (void __user *)arg; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + ret = npu_host_unload_network(client, &req); + + if (ret) { + pr_err("npu_host_unload_network failed %d\n", ret); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + + if (ret) { + pr_err("fail to copy to user\n"); + return -EFAULT; + } + return 0; +} + +static int npu_exec_network(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_exec_network_ioctl req; + void __user *argp = (void __user *)arg; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) || + (req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) { + pr_err("Invalid input/out layer num %d[max:%d] %d[max:%d]\n", + req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM, + req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM); + return -EINVAL; + } + + if (!req.patching_required) { + pr_err("Only support patched network\n"); + return -EINVAL; + } + + ret = npu_host_exec_network(client, &req); + + if (ret) { + pr_err("npu_host_exec_network failed %d\n", ret); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + + if (ret) { + pr_err("fail to copy to user\n"); + return -EFAULT; + } + return 0; +} + +static int npu_exec_network_v2(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_exec_network_ioctl_v2 req; + void __user *argp = (void __user *)arg; + struct msm_npu_patch_buf_info *patch_buf_info = NULL; + int ret; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + if ((req.patch_buf_info_num > NPU_MAX_PATCH_NUM) || + (req.patch_buf_info_num == 0)) { + pr_err("Invalid patch buf info num %d[max:%d]\n", + req.patch_buf_info_num, NPU_MAX_PATCH_NUM); + return -EINVAL; + } + + if (req.stats_buf_size > NPU_MAX_STATS_BUF_SIZE) { + pr_err("Invalid stats buffer size %d max %d\n", + req.stats_buf_size, NPU_MAX_STATS_BUF_SIZE); + return -EINVAL; + } + + if (req.patch_buf_info_num) { + patch_buf_info = kmalloc_array(req.patch_buf_info_num, + sizeof(*patch_buf_info), GFP_KERNEL); + if (!patch_buf_info) + return -ENOMEM; + + ret = copy_from_user(patch_buf_info, + (void __user *)req.patch_buf_info, + req.patch_buf_info_num * sizeof(*patch_buf_info)); + if (ret) { + pr_err("fail to copy patch buf info\n"); + kfree(patch_buf_info); + return -EFAULT; + } + } + + ret = npu_host_exec_network_v2(client, &req, patch_buf_info); + + kfree(patch_buf_info); + if (ret) { + pr_err("npu_host_exec_network_v2 failed %d\n", ret); + return ret; + } + + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("fail to copy to user\n"); + ret = -EFAULT; + } + + return ret; +} + +static int npu_set_fw_state(struct npu_client *client, uint32_t enable) +{ + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int rc = 0; + + if (host_ctx->network_num > 0) { + pr_err("Need to unload network first\n"); + mutex_unlock(&npu_dev->dev_lock); + return -EINVAL; + } + + if (enable) { + pr_debug("enable fw\n"); + rc = fw_init(npu_dev); + if (rc) { + pr_err("enable fw failed\n"); + } else { + host_ctx->npu_init_cnt++; + pr_debug("npu_init_cnt %d\n", + host_ctx->npu_init_cnt); + /* set npu to lowest power level */ + if (npu_set_uc_power_level(npu_dev, 1)) + pr_warn("Failed to set uc power level\n"); + } + } else if (host_ctx->npu_init_cnt > 0) { + pr_debug("disable fw\n"); + fw_deinit(npu_dev, false, true); + host_ctx->npu_init_cnt--; + pr_debug("npu_init_cnt %d\n", host_ctx->npu_init_cnt); + } else { + pr_err("can't disable fw %d\n", host_ctx->npu_init_cnt); + } + + return rc; +} + +static int npu_set_property(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_property prop; + void __user *argp = (void __user *)arg; + int ret = -EINVAL; + + ret = copy_from_user(&prop, argp, sizeof(prop)); + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + switch (prop.prop_id) { + case MSM_NPU_PROP_ID_FW_STATE: + ret = npu_set_fw_state(client, + (uint32_t)prop.prop_param[0]); + break; + case MSM_NPU_PROP_ID_PERF_MODE: + ret = npu_host_set_perf_mode(client, + (uint32_t)prop.network_hdl, + (uint32_t)prop.prop_param[0]); + break; + default: + ret = npu_host_set_fw_property(client->npu_dev, &prop); + if (ret) + pr_err("npu_host_set_fw_property failed\n"); + break; + } + + return ret; +} + +static int npu_get_property(struct npu_client *client, + unsigned long arg) +{ + struct msm_npu_property prop; + void __user *argp = (void __user *)arg; + int ret = -EINVAL; + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + ret = copy_from_user(&prop, argp, sizeof(prop)); + if (ret) { + pr_err("fail to copy from user\n"); + return -EFAULT; + } + + switch (prop.prop_id) { + case MSM_NPU_PROP_ID_FW_STATE: + prop.prop_param[0] = host_ctx->fw_state; + break; + case MSM_NPU_PROP_ID_PERF_MODE: + prop.prop_param[0] = npu_host_get_perf_mode(client, + (uint32_t)prop.network_hdl); + break; + case MSM_NPU_PROP_ID_PERF_MODE_MAX: + prop.prop_param[0] = npu_dev->pwrctrl.num_pwrlevels; + break; + case MSM_NPU_PROP_ID_DRV_VERSION: + prop.prop_param[0] = 0; + break; + case MSM_NPU_PROP_ID_HARDWARE_VERSION: + prop.prop_param[0] = npu_dev->hw_version; + break; + default: + ret = npu_host_get_fw_property(client->npu_dev, &prop); + if (ret) { + pr_err("npu_host_set_fw_property failed\n"); + return ret; + } + break; + } + + ret = copy_to_user(argp, &prop, sizeof(prop)); + if (ret) { + pr_err("fail to copy to user\n"); + return -EFAULT; + } + + return ret; +} + +static long npu_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOIOCTLCMD; + struct npu_client *client = file->private_data; + + switch (cmd) { + case MSM_NPU_GET_INFO: + ret = npu_get_info(client, arg); + break; + case MSM_NPU_MAP_BUF: + ret = npu_map_buf(client, arg); + break; + case MSM_NPU_UNMAP_BUF: + ret = npu_unmap_buf(client, arg); + break; + case MSM_NPU_LOAD_NETWORK: + ret = npu_load_network(client, arg); + break; + case MSM_NPU_LOAD_NETWORK_V2: + ret = npu_load_network_v2(client, arg); + break; + case MSM_NPU_UNLOAD_NETWORK: + ret = npu_unload_network(client, arg); + break; + case MSM_NPU_EXEC_NETWORK: + ret = npu_exec_network(client, arg); + break; + case MSM_NPU_EXEC_NETWORK_V2: + ret = npu_exec_network_v2(client, arg); + break; + case MSM_NPU_SET_PROP: + ret = npu_set_property(client, arg); + break; + case MSM_NPU_GET_PROP: + ret = npu_get_property(client, arg); + break; + default: + pr_err("unexpected IOCTL %x\n", cmd); + } + + return ret; +} + +/* ------------------------------------------------------------------------- + * Device Tree Parsing + * ------------------------------------------------------------------------- + */ +static int npu_parse_dt_clock(struct npu_device *npu_dev) +{ + int rc = 0; + uint32_t i; + const char *clock_name; + int num_clk; + struct npu_clk *core_clks = npu_dev->core_clks; + struct platform_device *pdev = npu_dev->pdev; + + num_clk = of_property_count_strings(pdev->dev.of_node, + "clock-names"); + if (num_clk <= 0) { + pr_err("clocks are not defined\n"); + rc = -EINVAL; + goto clk_err; + } else if (num_clk > NUM_MAX_CLK_NUM) { + pr_err("number of clocks %d exceeds limit\n", num_clk); + rc = -EINVAL; + goto clk_err; + } + + npu_dev->core_clk_num = num_clk; + for (i = 0; i < num_clk; i++) { + of_property_read_string_index(pdev->dev.of_node, "clock-names", + i, &clock_name); + strscpy(core_clks[i].clk_name, clock_name, + sizeof(core_clks[i].clk_name)); + core_clks[i].clk = devm_clk_get(&pdev->dev, clock_name); + if (IS_ERR(core_clks[i].clk)) { + pr_err("unable to get clk: %s\n", clock_name); + rc = -EINVAL; + break; + } + } + +clk_err: + return rc; +} + +static int npu_parse_dt_regulator(struct npu_device *npu_dev) +{ + int rc = 0; + uint32_t i; + const char *name; + int num; + struct npu_regulator *regulators = npu_dev->regulators; + struct platform_device *pdev = npu_dev->pdev; + + num = of_property_count_strings(pdev->dev.of_node, + "qcom,proxy-reg-names"); + if (num <= 0) { + rc = -EINVAL; + pr_err("regulator not defined\n"); + goto regulator_err; + } + if (num > NPU_MAX_REGULATOR_NUM) { + rc = -EINVAL; + pr_err("regulator number %d is over the limit %d\n", num, + NPU_MAX_REGULATOR_NUM); + num = NPU_MAX_REGULATOR_NUM; + } + + npu_dev->regulator_num = num; + for (i = 0; i < num; i++) { + of_property_read_string_index(pdev->dev.of_node, + "qcom,proxy-reg-names", i, &name); + strscpy(regulators[i].regulator_name, name, + sizeof(regulators[i].regulator_name)); + regulators[i].regulator = devm_regulator_get(&pdev->dev, name); + if (IS_ERR(regulators[i].regulator)) { + pr_err("unable to get regulator: %s\n", name); + rc = -EINVAL; + break; + } + } + +regulator_err: + return rc; +} + +static int npu_of_parse_pwrlevels(struct npu_device *npu_dev, + struct device_node *node) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct device_node *child; + uint32_t init_level_index = 0, init_power_level; + uint32_t fmax, fmax_pwrlvl; + + pwr->num_pwrlevels = 0; + pwr->min_pwrlevel = NPU_PWRLEVEL_TURBO_L1; + pwr->max_pwrlevel = NPU_PWRLEVEL_MINSVS; + + for_each_available_child_of_node(node, child) { + uint32_t i = 0; + uint32_t index; + uint32_t pwr_level; + uint32_t clk_array_values[NUM_MAX_CLK_NUM]; + uint32_t clk_rate; + struct npu_pwrlevel *level; + + if (of_property_read_u32(child, "reg", &index)) { + pr_err("Can't find reg property\n"); + return -EINVAL; + } + + if (of_property_read_u32(child, "vreg", &pwr_level)) { + pr_err("Can't find vreg property\n"); + return -EINVAL; + } + + if (index >= NPU_MAX_PWRLEVELS) { + pr_err("pwrlevel index %d is out of range\n", + index); + continue; + } + + if (index >= pwr->num_pwrlevels) + pwr->num_pwrlevels = index + 1; + + if (of_property_read_u32_array(child, "clk-freq", + clk_array_values, npu_dev->core_clk_num)) { + pr_err("pwrlevel index %d read clk-freq failed %d\n", + index, npu_dev->core_clk_num); + return -EINVAL; + } + + level = &pwr->pwrlevels[index]; + level->pwr_level = pwr_level; + if (pwr->min_pwrlevel > pwr_level) + pwr->min_pwrlevel = pwr_level; + if (pwr->max_pwrlevel < pwr_level) + pwr->max_pwrlevel = pwr_level; + + for (i = 0; i < npu_dev->core_clk_num; i++) { + if (npu_is_exclude_rate_clock( + npu_dev->core_clks[i].clk_name)) + continue; + + clk_rate = clk_round_rate(npu_dev->core_clks[i].clk, + clk_array_values[i]); + pr_debug("clk %s rate [%u]:[%u]\n", + npu_dev->core_clks[i].clk_name, + clk_array_values[i], clk_rate); + level->clk_freq[i] = clk_rate; + } + } + + /* Read FMAX info if available */ + if (npu_dev->qfprom_io.base) { + fmax = (npu_qfprom_reg_read(npu_dev, + QFPROM_FMAX_REG_OFFSET) & QFPROM_FMAX_BITS_MASK) >> + QFPROM_FMAX_BITS_SHIFT; + pr_debug("fmax %x\n", fmax); + + switch (fmax) { + case 1: + case 2: + fmax_pwrlvl = NPU_PWRLEVEL_NOM; + break; + case 3: + fmax_pwrlvl = NPU_PWRLEVEL_SVS_L1; + break; + default: + fmax_pwrlvl = pwr->max_pwrlevel; + break; + } + + if (fmax_pwrlvl < pwr->max_pwrlevel) + pwr->max_pwrlevel = fmax_pwrlvl; + } + + of_property_read_u32(node, "initial-pwrlevel", &init_level_index); + pr_debug("initial-pwrlevel %d\n", init_level_index); + + if (init_level_index >= pwr->num_pwrlevels) + init_level_index = pwr->num_pwrlevels - 1; + + init_power_level = npu_power_level_from_index(npu_dev, + init_level_index); + if (init_power_level > pwr->max_pwrlevel) { + init_power_level = pwr->max_pwrlevel; + pr_debug("Adjust init power level to %d\n", init_power_level); + } + + pr_debug("init power level %d max %d min %d\n", init_power_level, + pwr->max_pwrlevel, pwr->min_pwrlevel); + pwr->active_pwrlevel = pwr->default_pwrlevel = init_power_level; + pwr->uc_pwrlevel = pwr->max_pwrlevel; + pwr->perf_mode_override = 0; + pwr->cdsprm_pwrlevel = pwr->max_pwrlevel; + pwr->cur_dcvs_activity = pwr->num_pwrlevels; + + return 0; +} + +static int npu_pwrctrl_init(struct npu_device *npu_dev) +{ + struct platform_device *pdev = npu_dev->pdev; + struct device_node *node; + int ret = 0; + struct platform_device *p2dev; + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + + /* Power levels */ + node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels"); + + if (!node) { + pr_err("unable to find 'qcom,npu-pwrlevels'\n"); + return -EINVAL; + } + + ret = npu_of_parse_pwrlevels(npu_dev, node); + if (ret) + return ret; + + /* Parse Bandwidth */ + node = of_parse_phandle(pdev->dev.of_node, + "qcom,npubw-dev", 0); + + if (node) { + /* Set to 1 initially - we assume bwmon is on */ + pwr->bwmon_enabled = 1; + p2dev = of_find_device_by_node(node); + if (p2dev) { + pwr->devbw = &p2dev->dev; + } else { + pr_err("parser power level failed\n"); + ret = -EINVAL; + return ret; + } + } else { + pr_warn("bwdev is not defined in dts\n"); + pwr->devbw = NULL; + } + + return ret; +} + +static int npu_thermalctrl_init(struct npu_device *npu_dev) +{ + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct npu_thermalctrl *thermalctrl = &npu_dev->thermalctrl; + + thermalctrl->max_state = pwr->num_pwrlevels - 1; + thermalctrl->current_state = 0; + return 0; +} + +static int npu_irq_init(struct npu_device *npu_dev) +{ + unsigned long irq_type; + int ret = 0, i; + + memcpy(npu_dev->irq, npu_irq_info, sizeof(npu_irq_info)); + for (i = 0; i < NPU_MAX_IRQ; i++) { + irq_type = npu_irq_info[i].irq_type; + npu_dev->irq[i].irq = platform_get_irq_byname( + npu_dev->pdev, npu_dev->irq[i].name); + if (npu_dev->irq[i].irq < 0) { + pr_err("get_irq for %s failed\n\n", + npu_dev->irq[i].name); + ret = -EINVAL; + break; + } + + pr_debug("irq %s: %d\n", npu_dev->irq[i].name, + npu_dev->irq[i].irq); + irq_set_status_flags(npu_dev->irq[i].irq, + IRQ_NOAUTOEN); + ret = devm_request_irq(&npu_dev->pdev->dev, + npu_dev->irq[i].irq, npu_intr_hdler, + irq_type, npu_dev->irq[i].name, + npu_dev); + if (ret) { + pr_err("devm_request_irq(%s:%d) failed\n", + npu_dev->irq[i].name, + npu_dev->irq[i].irq); + break; + } + } + + return ret; +} + +static int npu_mbox_init(struct npu_device *npu_dev) +{ + struct platform_device *pdev = npu_dev->pdev; + struct npu_mbox *mbox_aop = &npu_dev->mbox_aop; + + if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) { + mbox_aop->client.dev = &pdev->dev; + mbox_aop->client.tx_block = true; + mbox_aop->client.tx_tout = MBOX_OP_TIMEOUTMS; + mbox_aop->client.knows_txdone = false; + + mbox_aop->chan = mbox_request_channel(&mbox_aop->client, 0); + if (IS_ERR(mbox_aop->chan)) { + pr_warn("aop mailbox is not available\n"); + mbox_aop->chan = NULL; + } + } + return 0; +} + +static void npu_mbox_deinit(struct npu_device *npu_dev) +{ + if (npu_dev->mbox_aop.chan) { + mbox_free_channel(npu_dev->mbox_aop.chan); + npu_dev->mbox_aop.chan = NULL; + } +} + +static int npu_hw_info_init(struct npu_device *npu_dev) +{ + int rc = 0; + + rc = npu_enable_core_power(npu_dev); + if (rc) { + pr_err("Failed to enable power\n"); + return rc; + } + + npu_dev->hw_version = REGR(npu_dev, NPU_HW_VERSION); + pr_debug("NPU_HW_VERSION 0x%x\n", npu_dev->hw_version); + npu_disable_core_power(npu_dev); + + return rc; +} + +static void npu_icc_init(struct npu_device *npu_dev) +{ + struct platform_device *pdev = npu_dev->pdev; + + npu_dev->icc_npu_cdspmem = of_icc_get(&pdev->dev, ICC_NPU_CDSPMEM); + if (IS_ERR_OR_NULL(npu_dev->icc_npu_cdspmem)) { + dev_err(&pdev->dev, "(%ld): failed getting %s path\n", + PTR_ERR(npu_dev->icc_npu_cdspmem), ICC_NPU_CDSPMEM); + npu_dev->icc_npu_cdspmem = NULL; + } else { + pr_info("get interconnects between npu and cdsp_mem successfully\n"); + } + npu_dev->icc_cpu_imemcfg = of_icc_get(&pdev->dev, ICC_CPU_IMEMCFG); + if (IS_ERR_OR_NULL(npu_dev->icc_cpu_imemcfg)) { + dev_err(&pdev->dev, "(%ld): failed getting %s path\n", + PTR_ERR(npu_dev->icc_cpu_imemcfg), ICC_CPU_IMEMCFG); + npu_dev->icc_cpu_imemcfg = NULL; + } else { + pr_info("get interconnects between cpu and imemcfg successfully\n"); + } +} + +static void npu_icc_deinit(struct npu_device *npu_dev) +{ + if (npu_dev->icc_npu_cdspmem) { + icc_put(npu_dev->icc_npu_cdspmem); + npu_dev->icc_npu_cdspmem = NULL; + } + if (npu_dev->icc_cpu_imemcfg) { + icc_put(npu_dev->icc_cpu_imemcfg); + npu_dev->icc_cpu_imemcfg = NULL; + } +} + +static void npu_enable_icc_bw(struct npu_device *npu_dev) +{ + int ret = 0; + + if (npu_dev->icc_npu_cdspmem) { + ret = icc_set_bw(npu_dev->icc_npu_cdspmem, 100, 100); + if (ret) + pr_err("set interconnects npu-cdspmem bw failed, ret: (%d)\n", ret); + } else { + pr_err("icc_path icc_npu_cdspmem is nullptr\n"); + } + + if (npu_dev->icc_cpu_imemcfg) { + ret = icc_set_bw(npu_dev->icc_cpu_imemcfg, 100, 100); + if (ret) + pr_err("set interconnects cpu-imemcfg bw failed, ret: (%d)\n", ret); + } else { + pr_err("icc_path icc_cpu_imemcfg is nullptr\n"); + } +} + +static void npu_disable_icc_bw(struct npu_device *npu_dev) +{ + icc_set_bw(npu_dev->icc_npu_cdspmem, 0, 0); + icc_set_bw(npu_dev->icc_cpu_imemcfg, 0, 0); +} +static int npu_alloc_memory_region(struct npu_device *npu_dev) +{ + struct device *dev = &npu_dev->pdev->dev; + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!node) { + pr_err("no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) + return ret; + + npu_dev->fw_io.mem_phys = npu_dev->fw_io.mem_reloc = r.start; + npu_dev->fw_io.mem_size = resource_size(&r); + npu_dev->fw_io.mem_region = devm_ioremap_wc(dev, npu_dev->fw_io.mem_phys, + npu_dev->fw_io.mem_size); + if (!npu_dev->fw_io.mem_region) { + pr_err("unable to map memory region: %pa+%zx\n", + &r.start, npu_dev->fw_io.mem_size); + return -EBUSY; + } + + return 0; +} + +/* ------------------------------------------------------------------------- + * Probe/Remove + * ------------------------------------------------------------------------- + */ +static int npu_probe(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res = NULL; + struct npu_device *npu_dev = NULL; + struct thermal_cooling_device *tcdev = NULL; + + if (!qcom_scm_is_available()) { + pr_err("qcom scm is not available, npu probe defer\n"); + return -EPROBE_DEFER; + } + npu_dev = devm_kzalloc(&pdev->dev, + sizeof(struct npu_device), GFP_KERNEL); + if (!npu_dev) + return -EFAULT; + npu_dev->pdev = pdev; + mutex_init(&npu_dev->dev_lock); + + platform_set_drvdata(pdev, npu_dev); + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "core"); + if (!res) { + pr_err("unable to get core resource\n"); + rc = -ENODEV; + goto error_get_dev_num; + } + npu_dev->core_io.size = resource_size(res); + npu_dev->core_io.base = devm_ioremap(&pdev->dev, res->start, + npu_dev->core_io.size); + if (unlikely(!npu_dev->core_io.base)) { + pr_err("unable to map core\n"); + rc = -ENOMEM; + goto error_get_dev_num; + } + pr_debug("core phy address=0x%llx virt=%pK\n", + res->start, npu_dev->core_io.base); + + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "tcm"); + if (!res) { + pr_err("unable to get tcm resource\n"); + rc = -ENODEV; + goto error_get_dev_num; + } + npu_dev->tcm_io.size = resource_size(res); + npu_dev->tcm_io.base = devm_ioremap(&pdev->dev, res->start, + npu_dev->tcm_io.size); + if (unlikely(!npu_dev->tcm_io.base)) { + pr_err("unable to map tcm\n"); + rc = -ENOMEM; + goto error_get_dev_num; + } + pr_debug("core phy address=0x%llx virt=%pK\n", + res->start, npu_dev->tcm_io.base); + + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "bwmon"); + if (!res) { + pr_err("unable to get bwmon resource\n"); + rc = -ENODEV; + goto error_get_dev_num; + } + npu_dev->bwmon_io.size = resource_size(res); + npu_dev->bwmon_io.base = devm_ioremap(&pdev->dev, res->start, + npu_dev->bwmon_io.size); + if (unlikely(!npu_dev->bwmon_io.base)) { + pr_err("unable to map bwmon\n"); + rc = -ENOMEM; + goto error_get_dev_num; + } + pr_debug("bwmon phy address=0x%llx virt=%pK\n", + res->start, npu_dev->bwmon_io.base); + + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "qfprom_physical"); + if (!res) { + pr_info("unable to get qfprom_physical resource\n"); + } else { + npu_dev->qfprom_io.size = resource_size(res); + npu_dev->qfprom_io.base = devm_ioremap(&pdev->dev, res->start, + npu_dev->qfprom_io.size); + if (unlikely(!npu_dev->qfprom_io.base)) { + pr_err("unable to map qfprom_physical\n"); + rc = -ENOMEM; + goto error_get_dev_num; + } + pr_debug("qfprom_physical phy address=0x%llx virt=%pK\n", + res->start, npu_dev->qfprom_io.base); + } + if (npu_alloc_memory_region(npu_dev)) { + rc = -ENOMEM; + goto error_get_dev_num; + } + rc = npu_parse_dt_regulator(npu_dev); + if (rc) + goto error_get_dev_num; + + rc = npu_parse_dt_clock(npu_dev); + if (rc) + goto error_get_dev_num; + + //init interconnets path if exists in devicetree file + npu_icc_init(npu_dev); + + rc = npu_hw_info_init(npu_dev); + if (rc) + goto error_get_dev_num; + + rc = npu_pwrctrl_init(npu_dev); + if (rc) + goto error_get_dev_num; + + rc = npu_thermalctrl_init(npu_dev); + if (rc) + goto error_get_dev_num; + + rc = npu_irq_init(npu_dev); + if (rc) + goto error_get_dev_num; + + rc = npu_mbox_init(npu_dev); + if (rc) + goto error_get_dev_num; + + /* character device might be optional */ + rc = alloc_chrdev_region(&npu_dev->dev_num, 0, 1, DRIVER_NAME); + if (rc < 0) { + pr_err("alloc_chrdev_region failed: %d\n", rc); + goto error_get_dev_num; + } + + npu_dev->class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(npu_dev->class)) { + rc = PTR_ERR(npu_dev->class); + pr_err("class_create failed: %d\n", rc); + goto error_class_create; + } + + npu_dev->device = device_create(npu_dev->class, NULL, + npu_dev->dev_num, NULL, DRIVER_NAME); + if (IS_ERR(npu_dev->device)) { + rc = PTR_ERR(npu_dev->device); + pr_err("device_create failed: %d\n", rc); + goto error_class_device_create; + } + + cdev_init(&npu_dev->cdev, &npu_fops); + rc = cdev_add(&npu_dev->cdev, + MKDEV(MAJOR(npu_dev->dev_num), 0), 1); + if (rc < 0) { + pr_err("cdev_add failed %d\n", rc); + goto error_cdev_add; + } + dev_set_drvdata(npu_dev->device, npu_dev); + pr_debug("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev), + dev_get_drvdata(npu_dev->device)); + rc = sysfs_create_group(&npu_dev->device->kobj, &npu_fs_attr_group); + if (rc) { + pr_err("unable to register npu sysfs nodes\n"); + goto error_res_init; + } + + if (IS_ENABLED(CONFIG_THERMAL)) { + tcdev = thermal_of_cooling_device_register(pdev->dev.of_node, + "npu", npu_dev, + &npu_cooling_ops); + if (IS_ERR(tcdev)) { + dev_err(&pdev->dev, + "npu: failed to register npu as cooling device\n"); + rc = PTR_ERR(tcdev); + goto error_driver_init; + } + npu_dev->tcdev = tcdev; + } + + rc = npu_cdsprm_cxlimit_init(npu_dev); + if (rc) + goto error_driver_init; +#ifdef CONFIG_DEBUG_FS + npu_debugfs_init(npu_dev); +#endif + rc = npu_host_init(npu_dev); + if (rc) { + pr_err("unable to init host\n"); + goto error_driver_init; + } + + g_npu_dev = npu_dev; + + return rc; +error_driver_init: + npu_cdsprm_cxlimit_deinit(npu_dev); + if (npu_dev->tcdev) + thermal_cooling_device_unregister(npu_dev->tcdev); + sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group); +error_res_init: + cdev_del(&npu_dev->cdev); +error_cdev_add: + device_destroy(npu_dev->class, npu_dev->dev_num); +error_class_device_create: + class_destroy(npu_dev->class); +error_class_create: + unregister_chrdev_region(npu_dev->dev_num, 1); + npu_mbox_deinit(npu_dev); +error_get_dev_num: + npu_icc_deinit(npu_dev); + return rc; +} + +static int npu_remove(struct platform_device *pdev) +{ + struct npu_device *npu_dev; + + npu_dev = platform_get_drvdata(pdev); + npu_host_deinit(npu_dev); + npu_debugfs_deinit(npu_dev); + npu_cdsprm_cxlimit_deinit(npu_dev); + if (npu_dev->tcdev) + thermal_cooling_device_unregister(npu_dev->tcdev); + sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group); + cdev_del(&npu_dev->cdev); + device_destroy(npu_dev->class, npu_dev->dev_num); + class_destroy(npu_dev->class); + unregister_chrdev_region(npu_dev->dev_num, 1); + platform_set_drvdata(pdev, NULL); + npu_mbox_deinit(npu_dev); + npu_icc_deinit(npu_dev); + g_npu_dev = NULL; + + return 0; +} + +/* ------------------------------------------------------------------------- + * Suspend/Resume + * ------------------------------------------------------------------------- + */ +#if defined(CONFIG_PM) +static int npu_suspend(struct platform_device *dev, pm_message_t state) +{ + return 0; +} + +static int npu_resume(struct platform_device *dev) +{ + return 0; +} +#endif + +/* ------------------------------------------------------------------------- + * Module Entry Points + * ------------------------------------------------------------------------- + */ +static int __init npu_init(void) +{ + int rc; + + rc = platform_driver_register(&npu_driver); + if (rc) + pr_err("register failed %d\n", rc); + return rc; +} + +static void __exit npu_exit(void) +{ + platform_driver_unregister(&npu_driver); +} + +module_init(npu_init); +module_exit(npu_exit); + +MODULE_DEVICE_TABLE(of, npu_dt_match); +MODULE_DESCRIPTION("MSM NPU driver"); +MODULE_LICENSE("GPL"); +MODULE_INFO(intree, "Y"); diff --git a/drivers/media/platform/msm/npu/npu_firmware.h b/drivers/media/platform/msm/npu/npu_firmware.h new file mode 100644 index 000000000000..33ef141282f7 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_firmware.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _NPU_FIRMWARE_H +#define _NPU_FIRMWARE_H + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +/* NPU Firmware Control/Status Register, written by FW and read HOST */ +#define REG_NPU_FW_CTRL_STATUS NPU_GPR0 +/* written by HOST and read by FW for control */ +#define REG_NPU_HOST_CTRL_STATUS NPU_GPR1 +/* Data value for control */ +#define REG_NPU_HOST_CTRL_VALUE NPU_GPR2 +/* Simulates an interrupt for FW->HOST, used for pre-silicon */ +#define REG_FW_TO_HOST_EVENT NPU_GPR3 +/* Read/Written by both host and dsp for sync between driver and dsp */ +#define REG_HOST_DSP_CTRL_STATUS NPU_GPR4 +/* Data value for debug */ +#define REG_NPU_FW_DEBUG_DATA NPU_GPR13 + +/* Started job count */ +#define REG_FW_JOB_CNT_START NPU_GPR14 +/* Finished job count */ +#define REG_FW_JOB_CNT_END NPU_GPR15 + +/* NPU FW Control/Status Register */ +/* bit fields definitions in CTRL STATUS REG */ +#define FW_CTRL_STATUS_IPC_READY_BIT 0 +#define FW_CTRL_STATUS_LOG_READY_BIT 1 +#define FW_CTRL_STATUS_EXECUTE_THREAD_READY_BIT 2 +#define FW_CTRL_STATUS_MAIN_THREAD_READY_BIT 3 +#define FW_CTRL_STATUS_LOADED_ACO_BIT 4 +#define FW_CTRL_STATUS_EXECUTING_ACO_BIT 5 +#define FW_CTRL_STATUS_SHUTDOWN_DONE_BIT 12 +#define FW_CTRL_STATUS_STACK_CORRUPT_BIT 13 + +/* 32 bit values of the bit fields above */ +#define FW_CTRL_STATUS_IPC_READY_VAL (1 << FW_CTRL_STATUS_IPC_READY_BIT) +#define FW_CTRL_STATUS_LOG_READY_VAL (1 << FW_CTRL_STATUS_LOG_READY_BIT) +#define FW_CTRL_STATUS_EXECUTE_THREAD_READY_VAL \ + (1 << FW_CTRL_STATUS_EXECUTE_THREAD_READY_BIT) +#define FW_CTRL_STATUS_MAIN_THREAD_READY_VAL \ + (1 << FW_CTRL_STATUS_MAIN_THREAD_READY_BIT) +#define FW_CTRL_STATUS_LOADED_ACO_VAL \ + (1 << FW_CTRL_STATUS_LOADED_ACO_BIT) +#define FW_CTRL_STATUS_EXECUTING_ACO_VAL \ + (1 << FW_CTRL_STATUS_EXECUTING_ACO_BIT) +#define FW_CTRL_STATUS_SHUTDOWN_DONE_VAL \ + (1 << FW_CTRL_STATUS_SHUTDOWN_DONE_BIT) +#define FW_CTRL_STATUS_STACK_CORRUPT_VAL \ + (1 << FW_CTRL_STATUS_STACK_CORRUPT_BIT) + +/* NPU HOST Control/Status Register */ +/* bit fields definitions in CTRL STATUS REG */ +/* Host has programmed IPC address into the REG_NPU_HOST_CTRL_VALUE register */ +#define HOST_CTRL_STATUS_IPC_ADDRESS_READY_BIT 0 +/* Host has enabled logging during boot */ +#define HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_BIT 1 +/* Host has enabled the clk gating of CAL during boot */ +#define HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_BIT 2 +/* Host requests to pause fw during boot up */ +#define HOST_CTRL_STATUS_FW_PAUSE 3 +/* Host requests to disable watchdog */ +#define HOST_CTRL_STATUS_DISABLE_WDOG_BIT 4 + +/* 32 bit values of the bit fields above */ +#define HOST_CTRL_STATUS_IPC_ADDRESS_READY_VAL \ + (1 << HOST_CTRL_STATUS_IPC_ADDRESS_READY_BIT) +#define HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_VAL \ + (1 << HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_BIT) +#define HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL \ + (1 << HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_BIT) +#define HOST_CTRL_STATUS_FW_PAUSE_VAL \ + (1 << HOST_CTRL_STATUS_FW_PAUSE) +#define HOST_CTRL_STATUS_DISABLE_WDOG_VAL \ + (1 << HOST_CTRL_STATUS_DISABLE_WDOG_BIT) + + +/* NPU HOST DSP Control/Status Register */ +/* notification of power up */ +/* following bits are set by host and read by dsp */ +#define HOST_DSP_CTRL_STATUS_PWR_UP_BIT 0 +/* notification of power dwn */ +#define HOST_DSP_CTRL_STATUS_PWR_DWN_BIT 1 +/* following bits are set by dsp and read by host */ +/* notification of power up acknowlegement*/ +#define HOST_DSP_CTRL_STATUS_PWR_UP_ACK_BIT 4 +/* notification of power down acknowlegement*/ +#define HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_BIT 5 + + +/* 32 bit values of the bit fields above */ +#define HOST_DSP_CTRL_STATUS_PWR_UP_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_UP_BIT) +#define HOST_DSP_CTRL_STATUS_PWR_DWN_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_DWN_BIT) +#define HOST_DSP_CTRL_STATUS_PWR_UP_ACK_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_UP_ACK_BIT) +#define HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_VAL \ + (1 << HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_BIT) + +/* Queue table header definition */ +struct hfi_queue_tbl_header { + uint32_t qtbl_version; /* queue table version number */ + uint32_t qtbl_size; /* total tables+queues size in bytes */ + uint32_t qtbl_qhdr0_offset; /* offset of the 1st queue header entry */ + uint32_t qtbl_qhdr_size; /* queue header size */ + uint32_t qtbl_num_q; /* total number of queues */ + uint32_t qtbl_num_active_q; /* number of active queues */ +}; + +/* Queue header definition */ +struct hfi_queue_header { + uint32_t qhdr_status; /* 0 == inactive, 1 == active */ + /* 4 byte-aligned start offset from start of q table */ + uint32_t qhdr_start_offset; + /* queue type */ + uint32_t qhdr_type; + /* in bytes, value of 0 means packets are variable size.*/ + uint32_t qhdr_q_size; + /* size of the Queue packet entries, in bytes, 0 means variable size */ + uint32_t qhdr_pkt_size; + + uint32_t qhdr_pkt_drop_cnt; + /* receiver watermark in # of queue packets */ + uint32_t qhdr_rx_wm; + /* transmitter watermark in # of queue packets */ + uint32_t qhdr_tx_wm; + /* + * set to request an interrupt from transmitter + * if qhdr_tx_wm is reached + */ + uint32_t qhdr_rx_req; + /* + * set to request an interrupt from receiver + * if qhdr_rx_wm is reached + */ + uint32_t qhdr_tx_req; + uint32_t qhdr_rx_irq_status; /* Not used */ + uint32_t qhdr_tx_irq_status; /* Not used */ + uint32_t qhdr_read_idx; /* read index in bytes */ + uint32_t qhdr_write_idx; /* write index in bytes */ +}; + +/* in bytes */ +#define HFI_QUEUE_TABLE_HEADER_SIZE (sizeof(struct hfi_queue_tbl_header)) +#define HFI_QUEUE_HEADER_SIZE (sizeof(struct hfi_queue_header)) +#define HFI_QUEUE_TABLE_SIZE (HFI_QUEUE_TABLE_HEADER_SIZE + \ + (NPU_HFI_NUMBER_OF_QS * HFI_QUEUE_HEADER_SIZE)) + +/* Queue Indexes */ +#define IPC_QUEUE_CMD_HIGH_PRIORITY 0 /* High priority Queue APPS->M0 */ +#define IPC_QUEUE_APPS_EXEC 1 /* APPS Execute Queue APPS->M0 */ +#define IPC_QUEUE_DSP_EXEC 2 /* DSP Execute Queue DSP->M0 */ +#define IPC_QUEUE_APPS_RSP 3 /* APPS Message Queue M0->APPS */ +#define IPC_QUEUE_DSP_RSP 4 /* DSP Message Queue DSP->APPS */ +#define IPC_QUEUE_LOG 5 /* Log Message Queue M0->APPS */ + +#define NPU_HFI_NUMBER_OF_QS 6 +#define NPU_HFI_NUMBER_OF_ACTIVE_QS 6 + +#define NPU_HFI_QUEUES_PER_CHANNEL 2 + +#endif /* _NPU_FIRMWARE_H */ diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c new file mode 100644 index 000000000000..63cb62a40f53 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_host_ipc.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include "npu_hw_access.h" +#include "npu_mgr.h" +#include "npu_firmware.h" +#include "npu_hw.h" +#include "npu_host_ipc.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +/* HFI IPC interface */ +#define TX_HDR_TYPE 0x01000000 +#define RX_HDR_TYPE 0x00010000 +#define HFI_QTBL_STATUS_ENABLED 0x00000001 + +#define QUEUE_TBL_VERSION 0x87654321 + +/* ------------------------------------------------------------------------- + * Data Structures + * ------------------------------------------------------------------------- + */ +struct npu_queue_tuple { + uint32_t size; + uint32_t hdr; + uint32_t start_offset; +}; + +static struct npu_queue_tuple npu_q_setup[6] = { + { 1024, IPC_QUEUE_CMD_HIGH_PRIORITY | TX_HDR_TYPE | RX_HDR_TYPE, 0}, + { 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0}, + { 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0}, + { 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0}, + { 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0}, + { 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE, 0}, +}; + +/* ------------------------------------------------------------------------- + * File Scope Function Prototypes + * ------------------------------------------------------------------------- + */ +static int npu_host_ipc_init_hfi(struct npu_device *npu_dev); +static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev, + uint32_t q_idx, void *cmd_ptr); +static int npu_host_ipc_read_msg_hfi(struct npu_device *npu_dev, + uint32_t q_idx, uint32_t *msg_ptr); +static int ipc_queue_read(struct npu_device *npu_dev, uint32_t target_que, + uint8_t *packet, uint8_t *is_tx_req_set); +static int ipc_queue_write(struct npu_device *npu_dev, uint32_t target_que, + uint8_t *packet, uint8_t *is_rx_req_set); + +/* ------------------------------------------------------------------------- + * Function Definitions + * ------------------------------------------------------------------------- + */ +static int npu_host_ipc_init_hfi(struct npu_device *npu_dev) +{ + int status = 0; + struct hfi_queue_tbl_header *q_tbl_hdr = NULL; + struct hfi_queue_header *q_hdr_arr = NULL; + struct hfi_queue_header *q_hdr = NULL; + void *q_tbl_addr = NULL; + uint32_t reg_val = 0; + uint32_t q_idx = 0; + uint32_t q_tbl_size = sizeof(struct hfi_queue_tbl_header) + + (NPU_HFI_NUMBER_OF_QS * sizeof(struct hfi_queue_header)); + uint32_t q_size = 0; + uint32_t cur_start_offset = 0; + + reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS); + + /* + * If the firmware is already running and we're just attaching, + * we do not need to do this + */ + if ((reg_val & FW_CTRL_STATUS_LOG_READY_VAL) != 0) + return status; + + /* check for valid interface queue table start address */ + q_tbl_addr = kzalloc(q_tbl_size, GFP_KERNEL); + if (q_tbl_addr == NULL) + return -ENOMEM; + + /* retrieve interface queue table start address */ + q_tbl_hdr = q_tbl_addr; + q_hdr_arr = (struct hfi_queue_header *)((uint8_t *)q_tbl_addr + + sizeof(struct hfi_queue_tbl_header)); + + /* initialize the interface queue table header */ + q_tbl_hdr->qtbl_version = QUEUE_TBL_VERSION; + q_tbl_hdr->qtbl_size = q_tbl_size; + q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_tbl_header); + q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header); + q_tbl_hdr->qtbl_num_q = NPU_HFI_NUMBER_OF_QS; + q_tbl_hdr->qtbl_num_active_q = NPU_HFI_NUMBER_OF_ACTIVE_QS; + + cur_start_offset = q_tbl_size; + + for (q_idx = IPC_QUEUE_CMD_HIGH_PRIORITY; + q_idx <= IPC_QUEUE_LOG; q_idx++) { + q_hdr = &q_hdr_arr[q_idx]; + /* queue is active */ + q_hdr->qhdr_status = 0x01; + q_hdr->qhdr_start_offset = cur_start_offset; + npu_q_setup[q_idx].start_offset = cur_start_offset; + q_size = npu_q_setup[q_idx].size; + q_hdr->qhdr_type = npu_q_setup[q_idx].hdr; + /* in bytes */ + q_hdr->qhdr_q_size = q_size; + /* variable size packets */ + q_hdr->qhdr_pkt_size = 0; + q_hdr->qhdr_pkt_drop_cnt = 0; + q_hdr->qhdr_rx_wm = 0x1; + q_hdr->qhdr_tx_wm = 0x1; + /* since queue is initially empty */ + q_hdr->qhdr_rx_req = 0x1; + q_hdr->qhdr_tx_req = 0x0; + /* not used */ + q_hdr->qhdr_rx_irq_status = 0; + /* not used */ + q_hdr->qhdr_tx_irq_status = 0; + q_hdr->qhdr_read_idx = 0; + q_hdr->qhdr_write_idx = 0; + cur_start_offset += q_size; + } + + MEMW(npu_dev, IPC_ADDR, (uint8_t *)q_tbl_hdr, q_tbl_size); + kfree(q_tbl_addr); + /* Write in the NPU's address for where IPC starts */ + REGW(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_VALUE, + (uint32_t)IPC_MEM_OFFSET_FROM_SSTCM); + /* Set value bit */ + reg_val = REGR(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_STATUS); + REGW(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_STATUS, reg_val | + HOST_CTRL_STATUS_IPC_ADDRESS_READY_VAL); + return status; +} + +static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev, + uint32_t q_idx, void *cmd_ptr) +{ + int status = 0; + uint8_t is_rx_req_set = 0; + uint32_t retry_cnt = 5; + + status = ipc_queue_write(npu_dev, q_idx, (uint8_t *)cmd_ptr, + &is_rx_req_set); + + if (status == -ENOSPC) { + do { + msleep(20); + status = ipc_queue_write(npu_dev, q_idx, + (uint8_t *)cmd_ptr, &is_rx_req_set); + } while ((status == -ENOSPC) && (--retry_cnt > 0)); + } + + if (status == 0) { + if (is_rx_req_set == 1) + status = INTERRUPT_RAISE_NPU(npu_dev); + } + + if (status == 0) + pr_debug("Cmd Msg put on Command Queue - SUCCESSS\n"); + else + pr_err("Cmd Msg put on Command Queue - FAILURE\n"); + + return status; +} + +static int npu_host_ipc_read_msg_hfi(struct npu_device *npu_dev, + uint32_t q_idx, uint32_t *msg_ptr) +{ + int status = 0; + uint8_t is_tx_req_set; + + status = ipc_queue_read(npu_dev, q_idx, (uint8_t *)msg_ptr, + &is_tx_req_set); + + if (status == 0) { + /* raise interrupt if qhdr_tx_req is set */ + if (is_tx_req_set == 1) + status = INTERRUPT_RAISE_NPU(npu_dev); + } + + return status; +} + +static int ipc_queue_read(struct npu_device *npu_dev, + uint32_t target_que, uint8_t *packet, + uint8_t *is_tx_req_set) +{ + int status = 0; + struct hfi_queue_header queue; + uint32_t packet_size, new_read_idx; + size_t read_ptr; + size_t offset = 0; + + offset = (size_t)IPC_ADDR + sizeof(struct hfi_queue_tbl_header) + + target_que * sizeof(struct hfi_queue_header); + + if ((packet == NULL) || (is_tx_req_set == NULL)) + return -EINVAL; + + /* Read the queue */ + MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue, + HFI_QUEUE_HEADER_SIZE); + + if (queue.qhdr_type != npu_q_setup[target_que].hdr || + queue.qhdr_q_size != npu_q_setup[target_que].size || + queue.qhdr_read_idx >= queue.qhdr_q_size || + queue.qhdr_write_idx >= queue.qhdr_q_size || + queue.qhdr_start_offset != + npu_q_setup[target_que].start_offset) { + pr_err("Invalid Queue header\n"); + status = -EIO; + goto exit; + } + + /* check if queue is empty */ + if (queue.qhdr_read_idx == queue.qhdr_write_idx) { + /* + * set qhdr_rx_req, to inform the sender that the Interrupt + * needs to be raised with the next packet queued + */ + queue.qhdr_rx_req = 1; + *is_tx_req_set = 0; + status = -EPERM; + goto exit; + } + + read_ptr = ((size_t)(size_t)IPC_ADDR + + queue.qhdr_start_offset + queue.qhdr_read_idx); + + /* Read packet size */ + MEMR(npu_dev, (void *)((size_t)read_ptr), packet, 4); + packet_size = *((uint32_t *)packet); + + pr_debug("target_que: %d, packet_size: %d\n", + target_que, + packet_size); + + if ((packet_size == 0) || + (packet_size > NPU_IPC_BUF_LENGTH)) { + pr_err("Invalid packet size %d\n", packet_size); + status = -EINVAL; + goto exit; + } + new_read_idx = queue.qhdr_read_idx + packet_size; + + if (new_read_idx < (queue.qhdr_q_size)) { + MEMR(npu_dev, (void *)((size_t)read_ptr), packet, packet_size); + } else { + new_read_idx -= (queue.qhdr_q_size); + + MEMR(npu_dev, (void *)((size_t)read_ptr), packet, + packet_size - new_read_idx); + + MEMR(npu_dev, (void *)((size_t)IPC_ADDR + + queue.qhdr_start_offset), + (void *)((size_t)packet + (packet_size-new_read_idx)), + new_read_idx); + } + + queue.qhdr_read_idx = new_read_idx; + + if (queue.qhdr_read_idx == queue.qhdr_write_idx) + /* + * receiver wants an interrupt from transmitter + * (when next item queued) because queue is empty + */ + queue.qhdr_rx_req = 1; + else + /* clear qhdr_rx_req since the queue is not empty */ + queue.qhdr_rx_req = 0; + + if (queue.qhdr_tx_req == 1) + /* transmitter requested an interrupt */ + *is_tx_req_set = 1; + else + *is_tx_req_set = 0; +exit: + /* Update RX interrupt request -- queue.qhdr_rx_req */ + MEMW(npu_dev, (void *)((size_t)offset + + (uint32_t)((size_t)&(queue.qhdr_rx_req) - + (size_t)&queue)), (uint8_t *)&queue.qhdr_rx_req, + sizeof(queue.qhdr_rx_req)); + /* Update Read pointer -- queue.qhdr_read_idx */ + MEMW(npu_dev, (void *)((size_t)offset + (uint32_t)( + (size_t)&(queue.qhdr_read_idx) - (size_t)&queue)), + (uint8_t *)&queue.qhdr_read_idx, sizeof(queue.qhdr_read_idx)); + + return status; +} + +static int ipc_queue_write(struct npu_device *npu_dev, + uint32_t target_que, uint8_t *packet, + uint8_t *is_rx_req_set) +{ + int status = 0; + struct hfi_queue_header queue; + uint32_t packet_size, new_write_idx; + uint32_t empty_space; + void *write_ptr; + uint32_t read_idx; + + size_t offset = (size_t)IPC_ADDR + + sizeof(struct hfi_queue_tbl_header) + + target_que * sizeof(struct hfi_queue_header); + + if ((packet == NULL) || (is_rx_req_set == NULL)) + return -EINVAL; + + MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue, + HFI_QUEUE_HEADER_SIZE); + + if (queue.qhdr_type != npu_q_setup[target_que].hdr || + queue.qhdr_q_size != npu_q_setup[target_que].size || + queue.qhdr_read_idx >= queue.qhdr_q_size || + queue.qhdr_write_idx >= queue.qhdr_q_size || + queue.qhdr_start_offset != + npu_q_setup[target_que].start_offset) { + pr_err("Invalid Queue header\n"); + status = -EIO; + goto exit; + } + + packet_size = (*(uint32_t *)packet); + if (packet_size == 0) { + /* assign failed status and return */ + status = -EPERM; + goto exit; + } + + /* sample Read Idx */ + read_idx = queue.qhdr_read_idx; + + /* Calculate Empty Space(UWord32) in the Queue */ + empty_space = (queue.qhdr_write_idx >= read_idx) ? + ((queue.qhdr_q_size) - (queue.qhdr_write_idx - read_idx)) : + (read_idx - queue.qhdr_write_idx); + + if (empty_space <= packet_size) { + /* + * If Queue is FULL/ no space for message + * set qhdr_tx_req. + */ + queue.qhdr_tx_req = 1; + + /* + * Queue is FULL, force raise an interrupt to Receiver + */ + *is_rx_req_set = 1; + + status = -ENOSPC; + goto exit; + } + + /* + * clear qhdr_tx_req so that receiver does not raise an interrupt + * on reading packets from Queue, since there is space to write + * the next packet + */ + queue.qhdr_tx_req = 0; + + new_write_idx = (queue.qhdr_write_idx + packet_size); + + write_ptr = (void *)(size_t)((size_t)IPC_ADDR + + queue.qhdr_start_offset + queue.qhdr_write_idx); + + if (new_write_idx < queue.qhdr_q_size) { + MEMW(npu_dev, (void *)((size_t)write_ptr), (uint8_t *)packet, + packet_size); + } else { + /* wraparound case */ + new_write_idx -= (queue.qhdr_q_size); + + MEMW(npu_dev, (void *)((size_t)write_ptr), (uint8_t *)packet, + packet_size - new_write_idx); + + MEMW(npu_dev, (void *)((size_t)((size_t)IPC_ADDR + + queue.qhdr_start_offset)), (uint8_t *)(packet + + (packet_size - new_write_idx)), new_write_idx); + } + + /* Update qhdr_write_idx */ + queue.qhdr_write_idx = new_write_idx; + + *is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0; + + /* Update Write pointer -- queue.qhdr_write_idx */ +exit: + /* Update TX request -- queue.qhdr_tx_req */ + MEMW(npu_dev, (void *)((size_t)(offset + (uint32_t)( + (size_t)&(queue.qhdr_tx_req) - (size_t)&queue))), + &queue.qhdr_tx_req, sizeof(queue.qhdr_tx_req)); + MEMW(npu_dev, (void *)((size_t)(offset + (uint32_t)( + (size_t)&(queue.qhdr_write_idx) - (size_t)&queue))), + &queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx)); + + return status; +} + +/* ------------------------------------------------------------------------- + * IPC Interface functions + * ------------------------------------------------------------------------- + */ +int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t q_idx, + void *cmd_ptr) +{ + return npu_host_ipc_send_cmd_hfi(npu_dev, q_idx, cmd_ptr); +} + +int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t q_idx, + uint32_t *msg_ptr) +{ + return npu_host_ipc_read_msg_hfi(npu_dev, q_idx, msg_ptr); +} + +int npu_host_ipc_pre_init(struct npu_device *npu_dev) +{ + return npu_host_ipc_init_hfi(npu_dev); +} + +int npu_host_ipc_post_init(struct npu_device *npu_dev) +{ + return 0; +} diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.h b/drivers/media/platform/msm/npu/npu_host_ipc.h new file mode 100644 index 000000000000..a78cd39edf29 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_host_ipc.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef NPU_HOST_IPC_H +#define NPU_HOST_IPC_H + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +/* Messages sent **to** NPU */ +/* IPC Message Commands -- uint32_t */ +/* IPC command start base */ +#define NPU_IPC_CMD_BASE 0x00000000 +/* ipc_cmd_load_pkt */ +#define NPU_IPC_CMD_LOAD 0x00000001 +/* ipc_cmd_unload_pkt */ +#define NPU_IPC_CMD_UNLOAD 0x00000002 +/* ipc_cmd_execute_pkt */ +#define NPU_IPC_CMD_EXECUTE 0x00000003 +/* ipc_cmd_set_logging_state */ +#define NPU_IPC_CMD_CONFIG_LOG 0x00000004 +#define NPU_IPC_CMD_CONFIG_PERFORMANCE 0x00000005 +#define NPU_IPC_CMD_CONFIG_DEBUG 0x00000006 +#define NPU_IPC_CMD_SHUTDOWN 0x00000007 +/* ipc_cmd_loopback_packet */ +#define NPU_IPC_CMD_LOOPBACK 0x00000008 +/* ipc_cmd_load_packet_v2_t */ +#define NPU_IPC_CMD_LOAD_V2 0x00000009 +/* ipc_cmd_execute_packet_v2 */ +#define NPU_IPC_CMD_EXECUTE_V2 0x0000000A +/* ipc_cmd_set_property_packet */ +#define NPU_IPC_CMD_SET_PROPERTY 0x0000000B +/* ipc_cmd_get_property_packet */ +#define NPU_IPC_CMD_GET_PROPERTY 0x0000000C + +/* Messages sent **from** NPU */ +/* IPC Message Response -- uint32_t */ +/* IPC response start base */ +#define NPU_IPC_MSG_BASE 0x00010000 +/* ipc_msg_load_pkt */ +#define NPU_IPC_MSG_LOAD_DONE 0x00010001 +/* ipc_msg_header_pkt */ +#define NPU_IPC_MSG_UNLOAD_DONE 0x00010002 +/* ipc_msg_header_pkt */ +#define NPU_IPC_MSG_EXECUTE_DONE 0x00010003 +/* ipc_msg_event_notify_pkt */ +#define NPU_IPC_MSG_EVENT_NOTIFY 0x00010004 +/* ipc_msg_loopback_pkt */ +#define NPU_IPC_MSG_LOOPBACK_DONE 0x00010005 +/* ipc_msg_execute_pkt_v2 */ +#define NPU_IPC_MSG_EXECUTE_V2_DONE 0x00010006 +/* ipc_msg_set_property_packet */ +#define NPU_IPC_MSG_SET_PROPERTY_DONE 0x00010007 +/* ipc_msg_get_property_packet */ +#define NPU_IPC_MSG_GET_PROPERTY_DONE 0x00010008 +/* ipc_msg_general_notify_pkt */ +#define NPU_IPC_MSG_GENERAL_NOTIFY 0x00010010 + +/* IPC Notify Message Type -- uint32_t */ +#define NPU_NOTIFY_DCVS_MODE 0x00002000 + +/* Logging message size */ +/* Number 32-bit elements for the maximum log message size */ +#define NPU_LOG_MSG_MAX_SIZE 4 + +/* Performance */ +/* Performance counters for current network layer */ +/* Amount of data read from all the DMA read channels */ +#define NPU_PERFORMANCE_DMA_DATA_READ 0x01 +/* Amount of data written from all the DMA write channels */ +#define NPU_PERFORMANCE_DMA_DATA_WRITTEN 0x02 +/* Number of blocks read by DMA channels */ +#define NPU_PERFORMANCE_DMA_NUM_BLOCKS_READ 0x03 +/* Number of blocks written by DMA channels */ +#define NPU_PERFORMANCE_DMA_NUM_BLOCKS_WRITTEN 0x04 +/* Number of instructions executed by CAL */ +#define NPU_PERFORMANCE_INSTRUCTIONS_CAL 0x05 +/* Number of instructions executed by CUB */ +#define NPU_PERFORMANCE_INSTRUCTIONS_CUB 0x06 +/* Timestamp of start of network load */ +#define NPU_PERFORMANCE_TIMESTAMP_LOAD_START 0x07 +/* Timestamp of end of network load */ +#define NPU_PERFORMANCE_TIMESTAMP_LOAD_END 0x08 +/* Timestamp of start of network execute */ +#define NPU_PERFORMANCE_TIMESTAMP_EXECUTE_START 0x09 +/* Timestamp of end of network execute */ +#define NPU_PERFORMANCE_TIMESTAMP_EXECUTE_END 0x10 +/* Timestamp of CAL start */ +#define NPU_PERFORMANCE_TIMESTAMP_CAL_START 0x11 +/* Timestamp of CAL end */ +#define NPU_PERFORMANCE_TIMESTAMP_CAL_END 0x12 +/* Timestamp of CUB start */ +#define NPU_PERFORMANCE_TIMESTAMP_CUB_START 0x13 +/* Timestamp of CUB end */ +#define NPU_PERFORMANCE_TIMESTAMP_CUB_END 0x14 + +/* Performance enable */ +/* Select which counters you want back per layer */ + +/* Shutdown */ +/* Immediate shutdown, discard any state, etc */ +#define NPU_SHUTDOWN_IMMEDIATE 0x01 +/* Shutdown after current execution (if any) is completed */ +#define NPU_SHUTDOWN_WAIT_CURRENT_EXECUTION 0x02 + +/* Debug stats */ +#define NUM_LAYER_STATS_PER_EXE_MSG_MAX 110 + +/* DCVS */ +#define NPU_DCVS_ACTIVITY_MAX_PERF 0x100 + +/* ------------------------------------------------------------------------- + * Data Structures + * ------------------------------------------------------------------------- + */ +/* Command Header - Header for all Messages **TO** NPU */ +/* + * command header packet definition for + * messages sent from host->NPU + */ +struct ipc_cmd_header_pkt { + uint32_t size; + uint32_t cmd_type; + uint32_t trans_id; + uint32_t flags; /* TDO what flags and why */ +}; + +/* Message Header - Header for all messages **FROM** NPU */ +/* + * message header packet definition for + * mesasges sent from NPU->host + */ +struct ipc_msg_header_pkt { + uint32_t size; + uint32_t msg_type; + uint32_t status; + uint32_t trans_id; + uint32_t flags; +}; + +/* Execute */ +/* + * FIRMWARE + * keep lastNetworkIDRan = uint32 + * keep wasLastNetworkChunky = BOOLEAN + */ +/* + * ACO Buffer definition + */ +struct npu_aco_buffer { + /* + * used to track if previous network is the same and already loaded, + * we can save a dma + */ + uint32_t network_id; + /* + * size of header + first chunk ACO buffer - + * this saves a dma by dmaing both header and first chunk + */ + uint32_t buf_size; + /* + * SMMU 32-bit mapped address that the DMA engine can read - + * uses lower 32 bits + */ + uint64_t address; +}; + +/* + * ACO Buffer V2 definition + */ +struct npu_aco_buffer_v2 { + /* + * used to track if previous network is the same and already loaded, + * we can save a dma + */ + uint32_t network_id; + /* + * size of header + first chunk ACO buffer - + * this saves a dma by dmaing both header and first chunk + */ + uint32_t buf_size; + /* + * SMMU 32-bit mapped address that the DMA engine can read - + * uses lower 32 bits + */ + uint32_t address; + /* + * number of layers in the network + */ + uint32_t num_layers; +}; + +/* + * ACO Patch Parameters + */ +struct npu_patch_tuple { + uint32_t value; + uint32_t chunk_id; + uint16_t instruction_size_in_bytes; + uint16_t variable_size_in_bits; + uint16_t shift_value_in_bits; + uint32_t loc_offset; +}; + +/* + * ACO Patch Tuple V2 + */ +struct npu_patch_tuple_v2 { + uint32_t value; + uint32_t chunk_id; + uint32_t instruction_size_in_bytes; + uint32_t variable_size_in_bits; + uint32_t shift_value_in_bits; + uint32_t loc_offset; +}; + +struct npu_patch_params { + uint32_t num_params; + struct npu_patch_tuple param[2]; +}; + +/* + * LOAD command packet definition + */ +struct ipc_cmd_load_pkt { + struct ipc_cmd_header_pkt header; + struct npu_aco_buffer buf_pkt; +}; + +/* + * LOAD command packet V2 definition + */ +struct ipc_cmd_load_pkt_v2 { + struct ipc_cmd_header_pkt header; + struct npu_aco_buffer_v2 buf_pkt; + uint32_t num_patch_params; + struct npu_patch_tuple_v2 patch_params[]; +}; + +/* + * UNLOAD command packet definition + */ +struct ipc_cmd_unload_pkt { + struct ipc_cmd_header_pkt header; + uint32_t network_hdl; +}; + +/* + * Execute packet definition + */ +struct ipc_cmd_execute_pkt { + struct ipc_cmd_header_pkt header; + struct npu_patch_params patch_params; + uint32_t network_hdl; +}; + +struct npu_patch_params_v2 { + uint32_t value; + uint32_t id; +}; + +/* + * Execute packet V2 definition + */ +struct ipc_cmd_execute_pkt_v2 { + struct ipc_cmd_header_pkt header; + uint32_t network_hdl; + uint32_t num_patch_params; + struct npu_patch_params_v2 patch_params[]; +}; + +/* + * Loopback packet definition + */ +struct ipc_cmd_loopback_pkt { + struct ipc_cmd_header_pkt header; + uint32_t loopbackParams; +}; + +/* + * Generic property definition + */ +struct ipc_cmd_prop_pkt { + struct ipc_cmd_header_pkt header; + uint32_t prop_id; + uint32_t num_params; + uint32_t network_hdl; + uint32_t prop_param[]; +}; + +/* + * Generic property response packet definition + */ +struct ipc_msg_prop_pkt { + struct ipc_msg_header_pkt header; + uint32_t prop_id; + uint32_t num_params; + uint32_t network_hdl; + uint32_t prop_param[]; +}; + +/* + * Generic notify message packet definition + */ +struct ipc_msg_general_notify_pkt { + struct ipc_msg_header_pkt header; + uint32_t notify_id; + uint32_t num_params; + uint32_t network_hdl; + uint32_t notify_param[]; +}; + + +/* + * LOAD response packet definition + */ +struct ipc_msg_load_pkt { + struct ipc_msg_header_pkt header; + uint32_t network_hdl; +}; + +/* + * UNLOAD response packet definition + */ +struct ipc_msg_unload_pkt { + struct ipc_msg_header_pkt header; + uint32_t network_hdl; +}; + +/* + * Layer Stats information returned back during EXECUTE_DONE response + */ +struct ipc_layer_stats { + /* + * hardware tick count per layer + */ + uint32_t tick_count; +}; + +struct ipc_execute_layer_stats { + /* + * total number of layers associated with the execution + */ + uint32_t total_num_layers; + /* + * pointer to each layer stats + */ + struct ipc_layer_stats + layer_stats_list[NUM_LAYER_STATS_PER_EXE_MSG_MAX]; +}; + +struct ipc_execute_stats { + /* + * total e2e IPC tick count during EXECUTE cmd + */ + uint32_t e2e_ipc_tick_count; + /* + * tick count on ACO loading + */ + uint32_t aco_load_tick_count; + /* + * tick count on ACO execution + */ + uint32_t aco_execution_tick_count; + /* + * individual layer stats + */ + struct ipc_execute_layer_stats exe_stats; +}; + +/* + * EXECUTE response packet definition + */ +struct ipc_msg_execute_pkt { + struct ipc_msg_header_pkt header; + struct ipc_execute_stats stats; + uint32_t network_hdl; +}; + +/* + * EXECUTE V2 response packet definition + */ +struct ipc_msg_execute_pkt_v2 { + struct ipc_msg_header_pkt header; + uint32_t network_hdl; + uint32_t stats_data[]; +}; + +/* + * LOOPBACK response packet definition + */ +struct ipc_msg_loopback_pkt { + struct ipc_msg_header_pkt header; + uint32_t loopbackParams; +}; + +/* Logging Related */ + +/* + * ipc_log_state_t - Logging state + */ +struct ipc_log_state { + uint32_t module_msk; + uint32_t level_msk; +}; + +struct ipc_cmd_log_state_pkt { + struct ipc_cmd_header_pkt header; + struct ipc_log_state log_state; +}; + +struct ipc_msg_log_state_pkt { + struct ipc_msg_header_pkt header; + struct ipc_log_state log_state; +}; + +/* + * Logging message + * This is a message from the NPU that contains the + * logging message. The values of part1-4 are not exposed + * the receiver has to refer to the logging implementation to + * intrepret what these mean and how to parse + */ +struct ipc_msg_log_pkt { + struct ipc_msg_header_pkt header; + uint32_t log_msg[NPU_LOG_MSG_MAX_SIZE]; +}; + +/* Performance Related */ + +/* + * Set counter mask of which counters we want + * This is a message from HOST->NPU Firmware + */ +struct ipc_cmd_set_performance_query { + struct ipc_cmd_header_pkt header; + uint32_t cnt_msk; +}; + +/* + * Set counter mask of which counters we want + * This is a message from HOST->NPU Firmware + */ +struct ipc_msg_performance_counters { + struct ipc_cmd_header_pkt header; + uint32_t layer_id; + uint32_t num_tulpes; + /* Array of tuples [HEADER,value] */ + uint32_t cnt_tulpes[]; +}; + +/* + * ipc_cmd_shutdown - Shutdown command + */ +struct ipc_cmd_shutdown_pkt { + struct ipc_cmd_header_pkt header; + uint32_t shutdown_flags; +}; + +#endif /* NPU_HOST_IPC_H */ diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h new file mode 100644 index 000000000000..3ca79896b492 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_hw.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef NPU_HW_H +#define NPU_HW_H + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define NPU_HW_VERSION (0x00000000) +#define NPU_MASTERn_IPC_IRQ_OUT(n) (0x00001004+0x1000*(n)) +#define NPU_CACHE_ATTR_IDn___POR 0x00011100 +#define NPU_CACHE_ATTR_IDn(n) (0x00000800+0x4*(n)) +#define NPU_MASTERn_IPC_IRQ_IN_CTRL(n) (0x00001008+0x1000*(n)) +#define NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S 4 +#define NPU_MASTERn_IPC_IRQ_OUT_CTRL(n) (0x00001004+0x1000*(n)) +#define NPU_MASTER0_IPC_IRQ_OUT_CTRL__IRQ_TYPE_PULSE 4 +#define NPU_GPR0 (0x00000100) +#define NPU_MASTERn_ERROR_IRQ_STATUS(n) (0x00001010+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_INCLUDE(n) (0x00001014+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_ENABLE(n) (0x00001018+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_CLEAR(n) (0x0000101C+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_SET(n) (0x00001020+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_OWNER(n) (0x00007000+4*(n)) +#define NPU_ERROR_IRQ_MASK 0x000000E3 +#define NPU_MASTERn_WDOG_IRQ_STATUS(n) (0x00001030+0x1000*(n)) +#define NPU_WDOG_BITE_IRQ_STATUS (1 << 1) +#define NPU_MASTERn_WDOG_IRQ_INCLUDE(n) (0x00001034+0x1000*(n)) +#define NPU_WDOG_BITE_IRQ_INCLUDE (1 << 1) +#define NPU_MASTERn_WDOG_IRQ_OWNER(n) (0x00007010+4*(n)) +#define NPU_WDOG_IRQ_MASK 0x00000002 + + +#define NPU_GPR1 (0x00000104) +#define NPU_GPR2 (0x00000108) +#define NPU_GPR3 (0x0000010C) +#define NPU_GPR4 (0x00000110) +#define NPU_GPR13 (0x00000134) +#define NPU_GPR14 (0x00000138) +#define NPU_GPR15 (0x0000013C) + +#define BWMON2_SAMPLING_WINDOW (0x000003A8) +#define BWMON2_BYTE_COUNT_THRESHOLD_HIGH (0x000003AC) +#define BWMON2_BYTE_COUNT_THRESHOLD_MEDIUM (0x000003B0) +#define BWMON2_BYTE_COUNT_THRESHOLD_LOW (0x000003B4) +#define BWMON2_ZONE_ACTIONS (0x000003B8) +#define BWMON2_ZONE_COUNT_THRESHOLD (0x000003BC) + +#endif /* NPU_HW_H */ diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c new file mode 100644 index 000000000000..c851cfd01ed4 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_hw_access.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include + +#include "npu_hw_access.h" +#include "npu_common.h" +#include "npu_hw.h" + +/* ------------------------------------------------------------------------- + * Functions - Register + * ------------------------------------------------------------------------- + */ +static uint32_t npu_reg_read(void __iomem *base, size_t size, uint32_t off) +{ + if (!base) { + pr_err("NULL base address\n"); + return 0; + } + + if ((off % 4) != 0) { + pr_err("offset %x is not aligned\n", off); + return 0; + } + + if (off >= size) { + pr_err("offset exceeds io region %x:%x\n", off, size); + return 0; + } + + return readl_relaxed(base + off); +} + +static void npu_reg_write(void __iomem *base, size_t size, uint32_t off, + uint32_t val) +{ + if (!base) { + pr_err("NULL base address\n"); + return; + } + + if ((off % 4) != 0) { + pr_err("offset %x is not aligned\n", off); + return; + } + + if (off >= size) { + pr_err("offset exceeds io region %x:%x\n", off, size); + return; + } + + writel_relaxed(val, base + off); + __iowmb(); +} + +uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off) +{ + return npu_reg_read(npu_dev->core_io.base, npu_dev->core_io.size, off); +} + +void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val) +{ + npu_reg_write(npu_dev->core_io.base, npu_dev->core_io.size, + off, val); +} + +uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off) +{ + return npu_reg_read(npu_dev->bwmon_io.base, npu_dev->bwmon_io.size, + off); +} + +void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off, + uint32_t val) +{ + npu_reg_write(npu_dev->bwmon_io.base, npu_dev->bwmon_io.size, + off, val); +} + +uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off) +{ + return npu_reg_read(npu_dev->qfprom_io.base, + npu_dev->qfprom_io.size, off); +} + +/* ------------------------------------------------------------------------- + * Functions - Memory + * ------------------------------------------------------------------------- + */ +void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src, + uint32_t size) +{ + size_t dst_off = (size_t)dst; + uint32_t *src_ptr32 = (uint32_t *)src; + uint8_t *src_ptr8 = NULL; + uint32_t i = 0; + uint32_t num = 0; + + if (dst_off >= npu_dev->tcm_io.size || + (npu_dev->tcm_io.size - dst_off) < size) { + pr_err("memory write exceeds io region %x:%x:%x\n", + dst_off, size, npu_dev->tcm_io.size); + return; + } + + num = size/4; + for (i = 0; i < num; i++) { + writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off); + dst_off += 4; + } + + if (size%4 != 0) { + src_ptr8 = (uint8_t *)((size_t)src + (num*4)); + num = size%4; + for (i = 0; i < num; i++) { + writeb_relaxed(src_ptr8[i], npu_dev->tcm_io.base + + dst_off); + dst_off += 1; + } + } + + __iowmb(); +} + +int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst, + uint32_t size) +{ + size_t src_off = (size_t)src; + uint32_t *out32 = (uint32_t *)dst; + uint8_t *out8 = NULL; + uint32_t i = 0; + uint32_t num = 0; + + if (src_off >= npu_dev->tcm_io.size || + (npu_dev->tcm_io.size - src_off) < size) { + pr_err("memory read exceeds io region %x:%x:%x\n", + src_off, size, npu_dev->tcm_io.size); + return 0; + } + + num = size/4; + for (i = 0; i < num; i++) { + out32[i] = readl_relaxed(npu_dev->tcm_io.base + src_off); + src_off += 4; + } + + if (size%4 != 0) { + out8 = (uint8_t *)((size_t)dst + (num*4)); + num = size%4; + for (i = 0; i < num; i++) { + out8[i] = readb_relaxed(npu_dev->tcm_io.base + src_off); + src_off += 1; + } + } + return 0; +} + +void *npu_ipc_addr(void) +{ + return (void *)(IPC_MEM_OFFSET_FROM_SSTCM); +} + +/* ------------------------------------------------------------------------- + * Functions - Interrupt + * ------------------------------------------------------------------------- + */ +void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + uint32_t wdg_irq_sts = 0, error_irq_sts = 0; + + /* Clear irq state */ + REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); + + wdg_irq_sts = REGR(npu_dev, NPU_MASTERn_WDOG_IRQ_STATUS(0)); + if (wdg_irq_sts != 0) { + pr_err("wdg irq %x\n", wdg_irq_sts); + host_ctx->wdg_irq_sts |= wdg_irq_sts; + host_ctx->fw_error = true; + } + + error_irq_sts = REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_STATUS(0)); + error_irq_sts &= REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0)); + if (error_irq_sts != 0) { + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), error_irq_sts); + pr_err("error irq %x\n", error_irq_sts); + host_ctx->err_irq_sts |= error_irq_sts; + host_ctx->fw_error = true; + } +} + +int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev) +{ + /* Bit 4 is setting IRQ_SOURCE_SELECT to local + * and we're triggering a pulse to NPU_MASTER0_IPC_IN_IRQ0 + */ + npu_core_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_IN_CTRL(0), 0x1 + << NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S | 0x1); + + return 0; +} + +int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev) +{ + npu_core_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_OUT_CTRL(1), 0x8); + + return 0; +} + +/* ------------------------------------------------------------------------- + * Functions - ION Memory + * ------------------------------------------------------------------------- + */ +static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client + *client, int buf_hdl, uint32_t size) +{ + struct npu_ion_buf *ret_val = NULL, *tmp; + struct list_head *pos = NULL; + + mutex_lock(&client->list_lock); + list_for_each(pos, &(client->mapped_buffer_list)) { + tmp = list_entry(pos, struct npu_ion_buf, list); + if (tmp->fd == buf_hdl) { + ret_val = tmp; + break; + } + } + + if (ret_val) { + /* mapped already, treat as invalid request */ + pr_err("ion buf has been mapped\n"); + ret_val = NULL; + } else { + ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL); + if (ret_val) { + ret_val->fd = buf_hdl; + ret_val->size = size; + ret_val->iova = 0; + list_add(&(ret_val->list), + &(client->mapped_buffer_list)); + } + } + mutex_unlock(&client->list_lock); + + return ret_val; +} + +static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_client + *client, int buf_hdl) +{ + struct list_head *pos = NULL; + struct npu_ion_buf *ret_val = NULL, *tmp; + + mutex_lock(&client->list_lock); + list_for_each(pos, &(client->mapped_buffer_list)) { + tmp = list_entry(pos, struct npu_ion_buf, list); + if (tmp->fd == buf_hdl) { + ret_val = tmp; + break; + } + } + mutex_unlock(&client->list_lock); + + return ret_val; +} + +static void npu_free_npu_ion_buffer(struct npu_client + *client, int buf_hdl) +{ + struct list_head *pos = NULL; + struct npu_ion_buf *npu_ion_buf = NULL; + + mutex_lock(&client->list_lock); + list_for_each(pos, &(client->mapped_buffer_list)) { + npu_ion_buf = list_entry(pos, struct npu_ion_buf, list); + if (npu_ion_buf->fd == buf_hdl) { + list_del(&npu_ion_buf->list); + kfree(npu_ion_buf); + break; + } + } + mutex_unlock(&client->list_lock); +} + +int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size, + uint64_t *addr) +{ + MODULE_IMPORT_NS(DMA_BUF); + int ret = 0; + struct npu_device *npu_dev = client->npu_dev; + struct npu_ion_buf *ion_buf = NULL; + struct npu_smmu_ctx *smmu_ctx = &npu_dev->smmu_ctx; + + if (buf_hdl == 0) + return -EINVAL; + + ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size); + if (!ion_buf) { + pr_err("%s fail to alloc npu_ion_buffer\n", __func__); + ret = -ENOMEM; + return ret; + } + + smmu_ctx->attach_cnt++; + + ion_buf->dma_buf = dma_buf_get(ion_buf->fd); + if (IS_ERR_OR_NULL(ion_buf->dma_buf)) { + pr_err("dma_buf_get failed %d\n", ion_buf->fd); + ret = -ENOMEM; + ion_buf->dma_buf = NULL; + goto map_end; + } + + ion_buf->attachment = dma_buf_attach(ion_buf->dma_buf, + &(npu_dev->pdev->dev)); + if (IS_ERR(ion_buf->attachment)) { + ret = -ENOMEM; + ion_buf->attachment = NULL; + goto map_end; + } + + ion_buf->attachment->dma_map_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; + + ion_buf->table = dma_buf_map_attachment(ion_buf->attachment, + DMA_BIDIRECTIONAL); + if (IS_ERR(ion_buf->table)) { + pr_err("npu dma_buf_map_attachment failed\n"); + ret = -ENOMEM; + ion_buf->table = NULL; + goto map_end; + } + + ion_buf->iova = ion_buf->table->sgl->dma_address; + ion_buf->size = ion_buf->dma_buf->size; + *addr = ion_buf->iova; + pr_debug("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova, + ion_buf->size); +map_end: + if (ret) + npu_mem_unmap(client, buf_hdl, 0); + + return ret; +} + +void npu_mem_invalidate(struct npu_client *client, int buf_hdl) +{ + struct npu_device *npu_dev = client->npu_dev; + struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(client, + buf_hdl); + + if (!ion_buf) + pr_err("%s can't find ion buf\n", __func__); + else + dma_sync_sg_for_cpu(&(npu_dev->pdev->dev), ion_buf->table->sgl, + ion_buf->table->nents, DMA_BIDIRECTIONAL); +} + +bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr) +{ + struct npu_ion_buf *ion_buf = NULL; + struct list_head *pos = NULL; + bool valid = false; + + mutex_lock(&client->list_lock); + list_for_each(pos, &(client->mapped_buffer_list)) { + ion_buf = list_entry(pos, struct npu_ion_buf, list); + if (ion_buf->iova == addr) { + valid = true; + break; + } + } + mutex_unlock(&client->list_lock); + + return valid; +} + +void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr) +{ + MODULE_IMPORT_NS(DMA_BUF); + struct npu_device *npu_dev = client->npu_dev; + struct npu_ion_buf *ion_buf = NULL; + + /* clear entry and retrieve the corresponding buffer */ + ion_buf = npu_get_npu_ion_buffer(client, buf_hdl); + if (!ion_buf) { + pr_err("%s could not find buffer\n", __func__); + return; + } + + if (ion_buf->iova != addr) + pr_warn("unmap address %llu doesn't match %llu\n", addr, + ion_buf->iova); + + if (ion_buf->table) + dma_buf_unmap_attachment(ion_buf->attachment, ion_buf->table, + DMA_BIDIRECTIONAL); + if (ion_buf->dma_buf && ion_buf->attachment) + dma_buf_detach(ion_buf->dma_buf, ion_buf->attachment); + if (ion_buf->dma_buf) + dma_buf_put(ion_buf->dma_buf); + npu_dev->smmu_ctx.attach_cnt--; + + pr_debug("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova, + ion_buf->size); + npu_free_npu_ion_buffer(client, buf_hdl); +} + +/* ------------------------------------------------------------------------- + * Functions - Features + * ------------------------------------------------------------------------- + */ +uint8_t npu_hw_clk_gating_enabled(void) +{ + return 1; +} + +uint8_t npu_hw_log_enabled(void) +{ + return 1; +} + +/* ------------------------------------------------------------------------- + * Functions - Subsystem/PIL + * ------------------------------------------------------------------------- + */ +#define NPU_PAS_ID (23) + +int npu_subsystem_get(struct npu_device *npu_dev, const char *fw_name) +{ + struct device *dev = npu_dev->device; + const struct firmware *firmware_p; + ssize_t fw_size; + /* load firmware */ + int ret = request_firmware(&firmware_p, fw_name, dev); + + if (ret < 0) { + pr_err("request_firmware %s failed: %d\n", fw_name, ret); + return ret; + } + fw_size = qcom_mdt_get_size(firmware_p); + if (fw_size < 0 || fw_size > npu_dev->fw_io.mem_size) { + pr_err("npu fw size invalid, %lld\n", fw_size); + return -EINVAL; + } + /* load the ELF segments to memory */ + ret = qcom_mdt_load(dev, firmware_p, fw_name, NPU_PAS_ID, + npu_dev->fw_io.mem_region, npu_dev->fw_io.mem_phys, + npu_dev->fw_io.mem_size, &npu_dev->fw_io.mem_reloc); + release_firmware(firmware_p); + if (ret) { + pr_err("qcom_mdt_load failure, %d\n", ret); + return ret; + } + ret = qcom_scm_pas_auth_and_reset(NPU_PAS_ID); + if (ret) { + pr_err("failed to authenticate image and release reset\n"); + return -2; + } + pr_debug("done pas auth\n"); + return 0; +} + +void npu_subsystem_put(struct npu_device *npu_dev) +{ + int ret = qcom_scm_pas_shutdown(NPU_PAS_ID); + + if (ret) + pr_err("failed to shutdown: %d\n", ret); + +} diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h new file mode 100644 index 000000000000..5e5ac67ffc75 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_hw_access.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _NPU_HW_ACCESS_H +#define _NPU_HW_ACCESS_H + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include "npu_common.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define IPC_MEM_OFFSET_FROM_SSTCM 0x00010000 +#define SYS_CACHE_SCID 23 + +#define QFPROM_FMAX_REG_OFFSET 0x000001C8 +#define QFPROM_FMAX_BITS_MASK 0x0000000C +#define QFPROM_FMAX_BITS_SHIFT 2 + +#define REGW(npu_dev, off, val) npu_core_reg_write(npu_dev, off, val) +#define REGR(npu_dev, off) npu_core_reg_read(npu_dev, off) +#define MEMW(npu_dev, dst, src, size) npu_mem_write(npu_dev, (void *)(dst),\ + (void *)(src), size) +#define MEMR(npu_dev, src, dst, size) npu_mem_read(npu_dev, (void *)(src),\ + (void *)(dst), size) +#define IPC_ADDR npu_ipc_addr() +#define INTERRUPT_ACK(npu_dev, num) npu_interrupt_ack(npu_dev, num) +#define INTERRUPT_RAISE_NPU(npu_dev) npu_interrupt_raise_m0(npu_dev) +#define INTERRUPT_RAISE_DSP(npu_dev) npu_interrupt_raise_dsp(npu_dev) + +/* ------------------------------------------------------------------------- + * Data Structures + * ------------------------------------------------------------------------- + */ +struct npu_device; +struct npu_ion_buf_t; +struct npu_host_ctx; +struct npu_client; +typedef irqreturn_t (*intr_hdlr_fn)(int32_t irq, void *ptr); +typedef void (*wq_hdlr_fn) (struct work_struct *work); + +/* ------------------------------------------------------------------------- + * Function Prototypes + * ------------------------------------------------------------------------- + */ +uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off); +void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val); +uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off); +void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off, + uint32_t val); +void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src, + uint32_t size); +int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst, + uint32_t size); +uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off); + +int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size, + uint64_t *addr); +void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr); +void npu_mem_invalidate(struct npu_client *client, int buf_hdl); +bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr); + +void *npu_ipc_addr(void); +void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num); +int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev); +int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev); + +uint8_t npu_hw_clk_gating_enabled(void); +uint8_t npu_hw_log_enabled(void); + +int npu_enable_irq(struct npu_device *npu_dev); +void npu_disable_irq(struct npu_device *npu_dev); + +int npu_enable_sys_cache(struct npu_device *npu_dev); +void npu_disable_sys_cache(struct npu_device *npu_dev); + +int npu_subsystem_get(struct npu_device *npu_dev, const char *fw_name); +void npu_subsystem_put(struct npu_device *npu_dev); + +#endif /* _NPU_HW_ACCESS_H*/ diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c new file mode 100644 index 000000000000..1aea3a971ffa --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -0,0 +1,2112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include "npu_hw_access.h" +#include "npu_mgr.h" +#include "npu_firmware.h" +#include "npu_hw.h" +#include "npu_host_ipc.h" +#include "npu_common.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS 10 +#define NPU_FW_TIMEOUT_MS 1000 + +/* ------------------------------------------------------------------------- + * File Scope Function Prototypes + * ------------------------------------------------------------------------- + */ +static void host_irq_wq(struct work_struct *work); +static void fw_deinit_wq(struct work_struct *work); +static void turn_off_fw_logging(struct npu_device *npu_dev); +static int wait_for_status_ready(struct npu_device *npu_dev, + uint32_t status_reg, uint32_t status_bits, bool poll); +static struct npu_network *alloc_network(struct npu_host_ctx *ctx, + struct npu_client *client); +static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx, + struct npu_client *client, uint32_t hdl); +static struct npu_network *get_network_by_id(struct npu_host_ctx *ctx, + struct npu_client *client, int64_t id); +static void free_network(struct npu_host_ctx *ctx, struct npu_client *client, + int64_t id); +static int network_get(struct npu_network *network); +static int network_put(struct npu_network *network); +static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg); +static void host_session_msg_hdlr(struct npu_device *npu_dev); +static int host_error_hdlr(struct npu_device *npu_dev, bool force); +static int npu_send_network_cmd(struct npu_device *npu_dev, + struct npu_network *network, void *cmd_ptr); +static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx, + void *cmd_ptr); +static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up); +static int npu_notify_aop(struct npu_device *npu_dev, bool on); +static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity); +static void npu_destroy_wq(struct npu_host_ctx *host_ctx); +static struct workqueue_struct *npu_create_wq(struct npu_host_ctx *host_ctx, + const char *name); + +/* ------------------------------------------------------------------------- + * Function Definitions - Init / Deinit + * ------------------------------------------------------------------------- + */ +int fw_init(struct npu_device *npu_dev) +{ + uint32_t reg_val; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int ret = 0, retry_cnt = 3; + bool need_retry; + + mutex_lock(&host_ctx->lock); + if (host_ctx->fw_state == FW_ENABLED) { + host_ctx->fw_ref_cnt++; + pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt); + mutex_unlock(&host_ctx->lock); + return 0; + } + +retry: + need_retry = false; + npu_notify_aop(npu_dev, true); + + if (npu_enable_core_power(npu_dev)) { + ret = -EPERM; + goto enable_pw_fail; + } + + if (npu_enable_sys_cache(npu_dev)) { + ret = -EPERM; + goto enable_sys_cache_fail; + } + /* Boot the NPU subsystem */ + if (npu_subsystem_get(npu_dev, "npu.mdt")) { + pr_err("pil load npu fw failed\n"); + ret = -ENODEV; + goto subsystem_get_fail; + } + + /* Clear control/status registers */ + REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0); + REGW(npu_dev, REG_NPU_HOST_CTRL_VALUE, 0x0); + REGW(npu_dev, REG_FW_TO_HOST_EVENT, 0x0); + pr_debug("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode); + reg_val = 0; + if (host_ctx->fw_dbg_mode & FW_DBG_MODE_PAUSE) + reg_val |= HOST_CTRL_STATUS_FW_PAUSE_VAL; + + if (host_ctx->fw_dbg_mode & FW_DBG_DISABLE_WDOG) + reg_val |= HOST_CTRL_STATUS_DISABLE_WDOG_VAL; + + REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val); + /* Read back to flush all registers for fw to read */ + REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS); + + /* Post PIL clocks */ + if (npu_enable_post_pil_clocks(npu_dev)) { + ret = -EPERM; + goto enable_post_clk_fail; + } + + /* + * Set logging state and clock gating state + * during FW bootup initialization + */ + reg_val = REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS); + + /* Enable clock gating only if the HW access platform allows it */ + if (npu_hw_clk_gating_enabled()) + reg_val |= HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL; + if (host_ctx->fw_dbg_mode & FW_DBG_ENABLE_LOGGING) { + //Enable logging + reg_val |= HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_VAL; + } + REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val); + + /* Initialize the host side IPC */ + ret = npu_host_ipc_pre_init(npu_dev); + if (ret) { + pr_err("npu_host_ipc_pre_init failed %d\n", ret); + goto enable_post_clk_fail; + } + + /* Keep reading ctrl status until NPU is ready */ + pr_debug("waiting for status ready from fw\n"); + + if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS, + FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, true)) { + ret = -EPERM; + need_retry = true; + goto wait_fw_ready_fail; + } + + npu_host_ipc_post_init(npu_dev); + + if (npu_enable_irq(npu_dev)) { + ret = -EPERM; + goto wait_fw_ready_fail; + } + + npu_notify_dsp(npu_dev, true); + host_ctx->fw_state = FW_ENABLED; + host_ctx->fw_error = false; + host_ctx->fw_ref_cnt++; + reinit_completion(&host_ctx->fw_deinit_done); + + mutex_unlock(&host_ctx->lock); + pr_debug("firmware init complete\n"); + pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt); + + /* Set logging state */ + if (!npu_hw_log_enabled()) { + pr_debug("fw logging disabled\n"); + turn_off_fw_logging(npu_dev); + } + + return ret; + +wait_fw_ready_fail: + npu_disable_post_pil_clocks(npu_dev); +enable_post_clk_fail: + npu_subsystem_put(npu_dev); +subsystem_get_fail: + npu_disable_sys_cache(npu_dev); +enable_sys_cache_fail: + npu_disable_core_power(npu_dev); +enable_pw_fail: + npu_notify_aop(npu_dev, false); + host_ctx->fw_state = FW_DISABLED; + if (need_retry && (retry_cnt > 0)) { + retry_cnt--; + pr_warn("retry fw init %d\n", retry_cnt); + goto retry; + } + mutex_unlock(&host_ctx->lock); + return ret; +} + +void fw_deinit(struct npu_device *npu_dev, bool ssr, bool fw_alive) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct ipc_cmd_shutdown_pkt cmd_shutdown_pkt; + struct npu_network *network = NULL; + int ret = 0, i; + + mutex_lock(&host_ctx->lock); + if (!ssr && (host_ctx->fw_ref_cnt > 0)) + host_ctx->fw_ref_cnt--; + + pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt); + + if (host_ctx->fw_state != FW_ENABLED) { + pr_err("fw is not enabled\n"); + mutex_unlock(&host_ctx->lock); + return; + } + + if ((host_ctx->fw_ref_cnt > 0) && !ssr) { + mutex_unlock(&host_ctx->lock); + return; + } + + npu_disable_irq(npu_dev); + + if (fw_alive) { + /* Command header */ + cmd_shutdown_pkt.header.cmd_type = NPU_IPC_CMD_SHUTDOWN; + cmd_shutdown_pkt.header.size = + sizeof(struct ipc_cmd_shutdown_pkt); + cmd_shutdown_pkt.header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + cmd_shutdown_pkt.header.flags = 0xF; + ret = npu_host_ipc_send_cmd(npu_dev, + IPC_QUEUE_CMD_HIGH_PRIORITY, &cmd_shutdown_pkt); + + pr_debug("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret); + + if (ret) { + pr_err("npu_host_ipc_send_cmd failed\n"); + } else { + /* Keep reading ctrl status until NPU shuts down */ + pr_debug("waiting for shutdown status from fw\n"); + if (wait_for_status_ready(npu_dev, + REG_NPU_FW_CTRL_STATUS, + FW_CTRL_STATUS_SHUTDOWN_DONE_VAL, true)) { + pr_err("wait for fw shutdown timedout\n"); + ret = -ETIMEDOUT; + } + } + } + + npu_disable_post_pil_clocks(npu_dev); + npu_disable_sys_cache(npu_dev); + npu_subsystem_put(npu_dev); + host_ctx->fw_state = FW_DISABLED; + + /* + * if fw is still alive, notify dsp before power off + * otherwise delay 500 ms to make sure dsp has finished + * its own ssr handling. + */ + if (fw_alive) + npu_notify_dsp(npu_dev, false); + else + msleep(500); + + npu_disable_core_power(npu_dev); + + if (ssr) { + /* mark all existing network to error state */ + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + network = &host_ctx->networks[i]; + if (network->is_valid) + network->fw_error = true; + } + } + + complete(&host_ctx->fw_deinit_done); + mutex_unlock(&host_ctx->lock); + pr_debug("firmware deinit complete\n"); + npu_notify_aop(npu_dev, false); +} + +int npu_host_init(struct npu_device *npu_dev) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + memset(host_ctx, 0, sizeof(*host_ctx)); + init_completion(&host_ctx->misc_done); + init_completion(&host_ctx->fw_deinit_done); + mutex_init(&host_ctx->lock); + atomic_set(&host_ctx->ipc_trans_id, 1); + host_ctx->npu_dev = npu_dev; + + host_ctx->wq = npu_create_wq(host_ctx, "npu_wq"); + if (!host_ctx->wq) + return -EPERM; + + host_ctx->prop_buf = kzalloc(sizeof(struct msm_npu_property), + GFP_KERNEL); + if (!host_ctx->prop_buf) + return -ENOMEM; + + host_ctx->misc_pending = false; + + return 0; +} + +void npu_host_deinit(struct npu_device *npu_dev) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + kfree(host_ctx->prop_buf); + npu_destroy_wq(host_ctx); + mutex_destroy(&host_ctx->lock); +} + +/* ------------------------------------------------------------------------- + * Function Definitions - Interrupt Handler + * ------------------------------------------------------------------------- + */ +irqreturn_t npu_intr_hdler(int irq, void *ptr) +{ + /* Check the interrupt we received */ + /* Currently this is the IPC interrupt */ + struct npu_device *npu_dev = (struct npu_device *)ptr; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + INTERRUPT_ACK(npu_dev, irq); + + /* Check that the event thread currently is running */ + if (host_ctx->wq) + queue_work(host_ctx->wq, &host_ctx->irq_work); + + return IRQ_HANDLED; +} + +/* ------------------------------------------------------------------------- + * Function Definitions - Control + * ------------------------------------------------------------------------- + */ +static int host_error_hdlr(struct npu_device *npu_dev, bool force) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_network *network = NULL; + int i; + + if ((host_ctx->wdg_irq_sts == 0) && (host_ctx->err_irq_sts == 0) + && !force) + return 0; + + if (host_ctx->wdg_irq_sts) + pr_info("watchdog irq triggered\n"); + + fw_deinit(npu_dev, true, force); + host_ctx->wdg_irq_sts = 0; + host_ctx->err_irq_sts = 0; + + /* flush all pending npu cmds */ + mutex_lock(&host_ctx->lock); + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + network = &host_ctx->networks[i]; + if (network->is_valid && network->cmd_pending && + network->fw_error) { + network->cmd_pending = false; + pr_debug("complete network %llx\n", + network->id); + complete(&network->cmd_done); + } + } + host_ctx->misc_pending = false; + complete_all(&host_ctx->misc_done); + mutex_unlock(&host_ctx->lock); + + return 1; +} + +static void host_irq_wq(struct work_struct *work) +{ + struct npu_host_ctx *host_ctx; + struct npu_device *npu_dev; + + host_ctx = container_of(work, struct npu_host_ctx, irq_work); + npu_dev = container_of(host_ctx, struct npu_device, host_ctx); + + if (host_error_hdlr(npu_dev, false)) + return; + + host_session_msg_hdlr(npu_dev); +} + +static void fw_deinit_wq(struct work_struct *work) +{ + struct npu_host_ctx *host_ctx; + struct npu_device *npu_dev; + + pr_debug("%s: deinit fw\n", __func__); + host_ctx = container_of(work, struct npu_host_ctx, fw_deinit_work.work); + npu_dev = container_of(host_ctx, struct npu_device, host_ctx); + + if (atomic_read(&host_ctx->fw_deinit_work_cnt) == 0) + return; + + do { + fw_deinit(npu_dev, false, true); + } while (!atomic_dec_and_test(&host_ctx->fw_deinit_work_cnt)); +} + +static void npu_destroy_wq(struct npu_host_ctx *host_ctx) +{ + flush_delayed_work(&host_ctx->fw_deinit_work); + destroy_workqueue(host_ctx->wq); +} + +static struct workqueue_struct *npu_create_wq(struct npu_host_ctx *host_ctx, + const char *name) +{ + struct workqueue_struct *wq = + alloc_workqueue(name, WQ_HIGHPRI | WQ_UNBOUND, 0); + + INIT_WORK(&host_ctx->irq_work, host_irq_wq); + INIT_DELAYED_WORK(&host_ctx->fw_deinit_work, fw_deinit_wq); + + return wq; +} + +static void turn_off_fw_logging(struct npu_device *npu_dev) +{ + struct ipc_cmd_log_state_pkt log_packet; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int ret = 0; + + log_packet.header.cmd_type = NPU_IPC_CMD_CONFIG_LOG; + log_packet.header.size = sizeof(struct ipc_cmd_log_state_pkt); + log_packet.header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + log_packet.header.flags = 0xF; + log_packet.log_state.module_msk = 0; + log_packet.log_state.level_msk = 0; + ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY, + &log_packet); + + pr_debug("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret); + + if (ret) + pr_err("npu_host_ipc_send_cmd failed\n"); +} + +static int wait_for_status_ready(struct npu_device *npu_dev, + uint32_t status_reg, uint32_t status_bits, bool poll) +{ + uint32_t ctrl_sts = 0; + uint32_t wait_cnt = 0, max_wait_ms; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + max_wait_ms = (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT_MS : NPU_FW_TIMEOUT_MS; + if (poll) + wait_cnt = max_wait_ms * 10; + else + wait_cnt = max_wait_ms / NPU_FW_TIMEOUT_POLL_INTERVAL_MS; + + /* keep reading status register until bits are set */ + do { + ctrl_sts = REGR(npu_dev, status_reg); + if ((ctrl_sts & status_bits) == status_bits) { + pr_debug("status %x[reg %x] ready received\n", + status_bits, status_reg); + break; + } + + if (!wait_cnt) { + pr_err("timeout wait for status %x[%x] in reg %x\n", + status_bits, ctrl_sts, status_reg); + return -ETIMEDOUT; + } + + if (poll) + udelay(100); + else + msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS); + + wait_cnt--; + } while (1); + + return 0; + +} + +static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up) +{ + uint32_t ack_val, notify_val; + int ret = 0; + + if (pwr_up) { + notify_val = HOST_DSP_CTRL_STATUS_PWR_UP_VAL; + ack_val = HOST_DSP_CTRL_STATUS_PWR_UP_ACK_VAL; + } else { + notify_val = HOST_DSP_CTRL_STATUS_PWR_DWN_VAL; + ack_val = HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_VAL; + } + + REGW(npu_dev, REG_HOST_DSP_CTRL_STATUS, + notify_val); + /* Read back to flush register for dsp to read */ + REGR(npu_dev, REG_HOST_DSP_CTRL_STATUS); + + INTERRUPT_RAISE_DSP(npu_dev); + + ret = wait_for_status_ready(npu_dev, REG_HOST_DSP_CTRL_STATUS, + ack_val, true); + if (ret) + pr_warn("No response from dsp\n"); + + return ret; +} + +#define MAX_LEN 128 + +static int npu_notify_aop(struct npu_device *npu_dev, bool on) +{ + char buf[MAX_LEN]; + struct qmp_pkt pkt; + int buf_size, rc = 0; + + if (!npu_dev->mbox_aop.chan) { + pr_warn("aop mailbox channel is not available\n"); + return 0; + } + + buf_size = scnprintf(buf, MAX_LEN, "{class: bcm, res: npu_on, val: %d}", + on ? 1 : 0); + if (buf_size < 0) { + pr_err("prepare qmp notify buf failed\n"); + return -EINVAL; + } + + pr_debug("send msg %s to aop\n", buf); + memset(&pkt, 0, sizeof(pkt)); + pkt.size = (buf_size + 3) & ~0x3; + pkt.data = buf; + + rc = mbox_send_message(npu_dev->mbox_aop.chan, &pkt); + if (rc < 0) + pr_err("qmp message send failed, ret=%d\n", rc); + + return rc; +} + +/* ------------------------------------------------------------------------- + * Function Definitions - Network Management + * ------------------------------------------------------------------------- + */ +static int network_put(struct npu_network *network) +{ + if (!network) + return 0; + + return atomic_dec_return(&network->ref_cnt); +} + +static int network_get(struct npu_network *network) +{ + if (!network) + return 0; + + return atomic_inc_return(&network->ref_cnt); +} + +static struct npu_network *alloc_network(struct npu_host_ctx *ctx, + struct npu_client *client) +{ + int32_t i; + struct npu_network *network = ctx->networks; + + WARN_ON(!mutex_is_locked(&ctx->lock)); + + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + if (network->id == 0) + break; + + network++; + } + + if (i == MAX_LOADED_NETWORK) { + pr_err("No free network\n"); + return NULL; + } + + memset(network, 0, sizeof(struct npu_network)); + network->id = i + 1; + init_completion(&network->cmd_done); + network->is_valid = true; + network->client = client; + network->stats_buf = kzalloc(NPU_MAX_STATS_BUF_SIZE, + GFP_KERNEL); + if (!network->stats_buf) { + memset(network, 0, sizeof(struct npu_network)); + return NULL; + } + + ctx->network_num++; + pr_debug("%s:Active network num %d\n", __func__, ctx->network_num); + + return network; +} + +static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx, + struct npu_client *client, uint32_t hdl) +{ + int32_t i; + struct npu_network *network = ctx->networks; + + WARN_ON(!mutex_is_locked(&ctx->lock)); + + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + if (network->network_hdl == hdl) + break; + + network++; + } + + if ((i == MAX_LOADED_NETWORK) || !network->is_valid) { + pr_err("network hdl invalid %d\n", hdl); + return NULL; + } + + if (client && (client != network->client)) { + pr_err("network %lld doesn't belong to this client\n", + network->id); + return NULL; + } + + network_get(network); + return network; +} + +static struct npu_network *get_network_by_id(struct npu_host_ctx *ctx, + struct npu_client *client, int64_t id) +{ + struct npu_network *network = NULL; + + WARN_ON(!mutex_is_locked(&ctx->lock)); + + if (id < 1 || id > MAX_LOADED_NETWORK || + !ctx->networks[id - 1].is_valid) { + pr_err("Invalid network id %d\n", (int32_t)id); + return NULL; + } + + network = &ctx->networks[id - 1]; + if (client && (client != network->client)) { + pr_err("network %lld doesn't belong to this client\n", id); + return NULL; + } + + network_get(network); + return network; +} + +static void free_network(struct npu_host_ctx *ctx, struct npu_client *client, + int64_t id) +{ + struct npu_network *network = NULL; + + WARN_ON(!mutex_is_locked(&ctx->lock)); + + network = get_network_by_id(ctx, client, id); + if (network) { + network_put(network); + if (atomic_read(&network->ref_cnt) == 0) { + kfree(network->stats_buf); + memset(network, 0, sizeof(struct npu_network)); + ctx->network_num--; + pr_debug("%s:Active network num %d\n", __func__, + ctx->network_num); + } else { + pr_warn("network %lld:%d is in use\n", network->id, + atomic_read(&network->ref_cnt)); + } + } +} + +/* ------------------------------------------------------------------------- + * Function Definitions - IPC + * ------------------------------------------------------------------------- + */ + +static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) +{ + uint32_t msg_id; + struct npu_network *network = NULL; + struct npu_device *npu_dev = host_ctx->npu_dev; + + msg_id = msg[1]; + switch (msg_id) { + case NPU_IPC_MSG_EXECUTE_DONE: + { + struct ipc_msg_execute_pkt *exe_rsp_pkt = + (struct ipc_msg_execute_pkt *)msg; + + pr_debug("NPU_IPC_MSG_EXECUTE_DONE status: %d\n", + exe_rsp_pkt->header.status); + pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id); + pr_debug("e2e_IPC_time: %d (in tick count)\n", + exe_rsp_pkt->stats.e2e_ipc_tick_count); + pr_debug("aco_load_time: %d (in tick count)\n", + exe_rsp_pkt->stats.aco_load_tick_count); + pr_debug("aco_execute_time: %d (in tick count)\n", + exe_rsp_pkt->stats.aco_execution_tick_count); + pr_debug("total_num_layers: %d\n", + exe_rsp_pkt->stats.exe_stats.total_num_layers); + + network = get_network_by_hdl(host_ctx, NULL, + exe_rsp_pkt->network_hdl); + if (!network) { + pr_err("can't find network %x\n", + exe_rsp_pkt->network_hdl); + break; + } + + if (network->trans_id != exe_rsp_pkt->header.trans_id) { + pr_err("execute_pkt trans_id is not match %d:%d\n", + network->trans_id, + exe_rsp_pkt->header.trans_id); + network_put(network); + break; + } + + network->cmd_pending = false; + network->cmd_ret_status = exe_rsp_pkt->header.status; + + complete(&network->cmd_done); + network_put(network); + + break; + } + case NPU_IPC_MSG_EXECUTE_V2_DONE: + { + struct ipc_msg_execute_pkt_v2 *exe_rsp_pkt = + (struct ipc_msg_execute_pkt_v2 *)msg; + uint32_t stats_size = 0; + + pr_debug("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n", + exe_rsp_pkt->header.status); + pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id); + + network = get_network_by_hdl(host_ctx, NULL, + exe_rsp_pkt->network_hdl); + if (!network) { + pr_err("can't find network %x\n", + exe_rsp_pkt->network_hdl); + break; + } + + if (network->trans_id != exe_rsp_pkt->header.trans_id) { + pr_err("execute_pkt_v2 trans_id is not match %d:%d\n", + network->trans_id, + exe_rsp_pkt->header.trans_id); + network_put(network); + break; + } + + pr_debug("network id : %llu\n", network->id); + if (exe_rsp_pkt->header.size < sizeof(*exe_rsp_pkt)) { + pr_err("invalid packet header size, header.size: %d\n", + exe_rsp_pkt->header.size); + network_put(network); + break; + } + stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt); + pr_debug("stats_size %d:%d\n", exe_rsp_pkt->header.size, + stats_size); + stats_size = stats_size < network->stats_buf_size ? + stats_size : network->stats_buf_size; + if (stats_size) + memcpy(network->stats_buf, exe_rsp_pkt->stats_data, + stats_size); + + network->stats_buf_size = stats_size; + network->cmd_pending = false; + network->cmd_ret_status = exe_rsp_pkt->header.status; + complete(&network->cmd_done); + network_put(network); + break; + } + case NPU_IPC_MSG_LOAD_DONE: + { + uint32_t network_id = 0; + struct ipc_msg_load_pkt *load_rsp_pkt = + (struct ipc_msg_load_pkt *)msg; + + pr_debug("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n", + load_rsp_pkt->header.status, + load_rsp_pkt->header.trans_id); + + /* + * The upper 8 bits in flags is the current active + * network count in fw + */ + pr_debug("Current active network count in FW is %d\n", + load_rsp_pkt->header.flags >> 24); + + /* + * the upper 16 bits in returned network_hdl is + * the network ID + */ + pr_debug("network_hdl: %x\n", load_rsp_pkt->network_hdl); + network_id = load_rsp_pkt->network_hdl >> 16; + network = get_network_by_id(host_ctx, NULL, network_id); + if (!network) { + pr_err("can't find network %d\n", network_id); + break; + } + + if (network->trans_id != load_rsp_pkt->header.trans_id) { + pr_err("load_rsp_pkt trans_id is not match %d:%d\n", + network->trans_id, + load_rsp_pkt->header.trans_id); + network_put(network); + break; + } + + network->network_hdl = load_rsp_pkt->network_hdl; + network->cmd_pending = false; + network->cmd_ret_status = load_rsp_pkt->header.status; + + complete(&network->cmd_done); + network_put(network); + break; + } + case NPU_IPC_MSG_UNLOAD_DONE: + { + struct ipc_msg_unload_pkt *unload_rsp_pkt = + (struct ipc_msg_unload_pkt *)msg; + + pr_debug("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n", + unload_rsp_pkt->header.status, + unload_rsp_pkt->header.trans_id); + + /* + * The upper 8 bits in flags is the current active + * network count in fw + */ + pr_debug("Current active network count in FW is %d\n", + unload_rsp_pkt->header.flags >> 24); + + network = get_network_by_hdl(host_ctx, NULL, + unload_rsp_pkt->network_hdl); + if (!network) { + pr_err("can't find network %x\n", + unload_rsp_pkt->network_hdl); + break; + } + + if (network->trans_id != unload_rsp_pkt->header.trans_id) { + pr_err("unload_rsp_pkt trans_id is not match %d:%d\n", + network->trans_id, + unload_rsp_pkt->header.trans_id); + network_put(network); + break; + } + + network->cmd_pending = false; + network->cmd_ret_status = unload_rsp_pkt->header.status; + + complete(&network->cmd_done); + network_put(network); + break; + } + case NPU_IPC_MSG_LOOPBACK_DONE: + { + struct ipc_msg_loopback_pkt *lb_rsp_pkt = + (struct ipc_msg_loopback_pkt *)msg; + + pr_debug("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n", + lb_rsp_pkt->loopbackParams); + host_ctx->misc_pending = false; + + complete_all(&host_ctx->misc_done); + break; + } + case NPU_IPC_MSG_SET_PROPERTY_DONE: + { + struct ipc_msg_prop_pkt *prop_rsp_pkt = + (struct ipc_msg_prop_pkt *)msg; + uint32_t *param = (uint32_t *)((uint8_t *)prop_rsp_pkt + + sizeof(struct ipc_msg_prop_pkt)); + pr_debug("NPU_IPC_MSG_SET_PROPERTY_DONE %d:0x%x:%d\n", + prop_rsp_pkt->network_hdl, + prop_rsp_pkt->prop_id, + param[0]); + + host_ctx->cmd_ret_status = prop_rsp_pkt->header.status; + host_ctx->misc_pending = false; + + complete_all(&host_ctx->misc_done); + break; + } + case NPU_IPC_MSG_GET_PROPERTY_DONE: + { + struct ipc_msg_prop_pkt *prop_rsp_pkt = + (struct ipc_msg_prop_pkt *)msg; + uint32_t prop_size = 0; + uint32_t *prop_data = (uint32_t *)((uint8_t *)prop_rsp_pkt + + sizeof(struct ipc_msg_header_pkt)); + + pr_debug("NPU_IPC_MSG_GET_PROPERTY_DONE %d:0x%x:%d:%d\n", + prop_rsp_pkt->network_hdl, + prop_rsp_pkt->prop_id, + prop_rsp_pkt->num_params, + prop_rsp_pkt->prop_param[0]); + + if (prop_rsp_pkt->header.size < + sizeof(struct ipc_msg_header_pkt)) { + pr_err("Invalid rsp pkt size %d\n", + prop_rsp_pkt->header.size); + break; + } + + host_ctx->cmd_ret_status = prop_rsp_pkt->header.status; + + if (prop_rsp_pkt->num_params > 0) { + /* Copy prop data to kernel buffer */ + prop_size = prop_rsp_pkt->header.size - + sizeof(struct ipc_msg_header_pkt); + memcpy(host_ctx->prop_buf, prop_data, prop_size); + } + host_ctx->misc_pending = false; + + complete_all(&host_ctx->misc_done); + break; + } + case NPU_IPC_MSG_GENERAL_NOTIFY: + { + struct ipc_msg_general_notify_pkt *notify_msg_pkt = + (struct ipc_msg_general_notify_pkt *)msg; + + pr_debug("NPU_IPC_MSG_GENERAL_NOTIFY %d:0x%x:%d\n", + notify_msg_pkt->network_hdl, + notify_msg_pkt->notify_id, + notify_msg_pkt->notify_param[0]); + + switch (notify_msg_pkt->notify_id) { + case NPU_NOTIFY_DCVS_MODE: + pr_debug("NPU_IPC_MSG_GENERAL_NOTIFY DCVS_MODE %d\n", + notify_msg_pkt->notify_param[0]); + update_dcvs_activity(npu_dev, + notify_msg_pkt->notify_param[0]); + break; + default: + pr_err("Nothing to do\n"); + break; + } + break; + } + default: + pr_err("Not supported apps response received %d\n", + msg_id); + break; + } +} + +static void host_session_msg_hdlr(struct npu_device *npu_dev) +{ + uint32_t *msg; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + msg = kzalloc(sizeof(uint32_t) * NPU_IPC_BUF_LENGTH, GFP_KERNEL); + if (!msg) + return; + + mutex_lock(&host_ctx->lock); + if (host_ctx->fw_state == FW_DISABLED) { + pr_warn("handle npu session msg when FW is disabled\n"); + goto skip_read_msg; + } + + while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP, msg) == 0) { + pr_debug("received from msg queue\n"); + app_msg_proc(host_ctx, msg); + } + +skip_read_msg: + mutex_unlock(&host_ctx->lock); + kfree(msg); +} + + +/* ------------------------------------------------------------------------- + * Function Definitions - Functionality + * ------------------------------------------------------------------------- + */ +int32_t npu_host_get_info(struct npu_device *npu_dev, + struct msm_npu_get_info_ioctl *get_info_ioctl) +{ + get_info_ioctl->firmware_version = FIRMWARE_VERSION; + get_info_ioctl->flags = npu_dev->pwrctrl.num_pwrlevels; + return 0; +} + +int32_t npu_host_map_buf(struct npu_client *client, + struct msm_npu_map_buf_ioctl *map_ioctl) +{ + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int ret; + + mutex_lock(&host_ctx->lock); + ret = npu_mem_map(client, map_ioctl->buf_ion_hdl, map_ioctl->size, + &map_ioctl->npu_phys_addr); + mutex_unlock(&host_ctx->lock); + + return ret; +} + +int32_t npu_host_unmap_buf(struct npu_client *client, + struct msm_npu_unmap_buf_ioctl *unmap_ioctl) +{ + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + /* + * Once SSR occurs, all buffers only can be unmapped until + * fw is disabled + */ + if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) && + !wait_for_completion_timeout( + &host_ctx->fw_deinit_done, NW_CMD_TIMEOUT)) + pr_warn("npu: wait for fw_deinit_done time out\n"); + + mutex_lock(&host_ctx->lock); + npu_mem_unmap(client, unmap_ioctl->buf_ion_hdl, + unmap_ioctl->npu_phys_addr); + mutex_unlock(&host_ctx->lock); + return 0; +} + +static int npu_send_network_cmd(struct npu_device *npu_dev, + struct npu_network *network, void *cmd_ptr) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int ret = 0; + + if (network->fw_error || host_ctx->fw_error || + (host_ctx->fw_state == FW_DISABLED)) { + pr_err("fw is in error state or disabled, can't send network cmd\n"); + ret = -EIO; + } else if (network->cmd_pending) { + pr_err("Another cmd is pending\n"); + ret = -EBUSY; + } else { + pr_debug("Send cmd %d network id %lld\n", + ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type, + network->id); + network->cmd_ret_status = 0; + network->cmd_pending = true; + network->trans_id = ((struct ipc_cmd_header_pkt *)cmd_ptr)->trans_id; + ret = npu_host_ipc_send_cmd(npu_dev, + IPC_QUEUE_APPS_EXEC, cmd_ptr); + if (ret) + network->cmd_pending = false; + } + + return ret; +} + +static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx, + void *cmd_ptr) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int ret = 0; + + mutex_lock(&host_ctx->lock); + if (host_ctx->fw_error || (host_ctx->fw_state == FW_DISABLED)) { + pr_err("fw is in error state or disabled, can't send misc cmd\n"); + ret = -EIO; + } else if (host_ctx->misc_pending) { + pr_err("Another misc cmd is pending\n"); + ret = -EBUSY; + } else { + pr_debug("Send cmd %d\n", + ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type); + host_ctx->cmd_ret_status = 0; + reinit_completion(&host_ctx->misc_done); + host_ctx->misc_pending = true; + ret = npu_host_ipc_send_cmd(npu_dev, q_idx, cmd_ptr); + if (ret) + host_ctx->misc_pending = false; + } + mutex_unlock(&host_ctx->lock); + + return ret; +} + +static void host_copy_patch_data(struct npu_patch_tuple *param, uint32_t value, + struct msm_npu_layer *layer_info) +{ + param->value = value; + param->chunk_id = layer_info->patch_info.chunk_id; + param->loc_offset = layer_info->patch_info.loc_offset; + param->instruction_size_in_bytes = + layer_info->patch_info.instruction_size_in_bytes; + param->shift_value_in_bits = + layer_info->patch_info.shift_value_in_bits; + param->variable_size_in_bits = + layer_info->patch_info.variable_size_in_bits; + + pr_debug("copy_patch_data: %x %d %x %x %x %x\n", + param->value, + param->chunk_id, + param->loc_offset, + param->instruction_size_in_bytes, + param->shift_value_in_bits, + param->variable_size_in_bits); +} + +static void host_copy_patch_data_v2(struct npu_patch_tuple_v2 *param, + struct msm_npu_patch_info_v2 *patch_info) +{ + param->value = patch_info->value; + param->chunk_id = patch_info->chunk_id; + param->loc_offset = patch_info->loc_offset; + param->instruction_size_in_bytes = + patch_info->instruction_size_in_bytes; + param->shift_value_in_bits = patch_info->shift_value_in_bits; + param->variable_size_in_bits = patch_info->variable_size_in_bits; + pr_debug("copy_patch_data_v2: %x %d %x %x %x %x\n", + param->value, + param->chunk_id, + param->loc_offset, + param->instruction_size_in_bytes, + param->shift_value_in_bits, + param->variable_size_in_bits); +} + +static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx) +{ + struct npu_network *network; + uint32_t max_perf_mode = 0; + int i = 0; + + network = host_ctx->networks; + + if (!host_ctx->network_num) { + /* if no network exists, set to the lowest level */ + max_perf_mode = 1; + } else { + /* find the max level among all the networks */ + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + if ((network->id != 0) && + (network->cur_perf_mode != 0) && + (network->cur_perf_mode > max_perf_mode)) + max_perf_mode = network->cur_perf_mode; + network++; + } + } + pr_debug("max perf mode for networks: %d\n", max_perf_mode); + + return max_perf_mode; +} + +static int set_perf_mode(struct npu_device *npu_dev) +{ + int ret = 0; + uint32_t networks_perf_mode; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + networks_perf_mode = find_networks_perf_mode(host_ctx); + + if (npu_dev->pwrctrl.perf_mode_override) + networks_perf_mode = npu_dev->pwrctrl.perf_mode_override; + + if (npu_dev->pwrctrl.cur_dcvs_activity != NPU_DCVS_ACTIVITY_MAX_PERF) + networks_perf_mode = min_t(uint32_t, networks_perf_mode, + npu_dev->pwrctrl.cur_dcvs_activity); + + ret = npu_set_uc_power_level(npu_dev, networks_perf_mode); + if (ret) + pr_err("network load failed due to power level set\n"); + + return ret; +} + +static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity) +{ + npu_dev->pwrctrl.cur_dcvs_activity = activity; + pr_debug("update dcvs activity to %d\n", activity); + + return set_perf_mode(npu_dev); +} + +int32_t npu_host_set_fw_property(struct npu_device *npu_dev, + struct msm_npu_property *property) +{ + int ret = 0, i; + uint32_t prop_param, prop_id; + struct ipc_cmd_prop_pkt *prop_packet = NULL; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + uint32_t num_of_params, pkt_size; + + prop_id = property->prop_id; + num_of_params = min_t(uint32_t, property->num_of_params, + (uint32_t)PROP_PARAM_MAX_SIZE); + pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t); + prop_packet = kzalloc(pkt_size, GFP_KERNEL); + + if (!prop_packet) + return -ENOMEM; + + switch (prop_id) { + case MSM_NPU_PROP_ID_DCVS_MODE: + prop_param = min_t(uint32_t, property->prop_param[0], + (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1)); + property->prop_param[0] = prop_param; + pr_debug("setting dcvs_mode to %d\n", prop_param); + + if (property->network_hdl == 0) { + npu_dev->pwrctrl.dcvs_mode = prop_param; + pr_debug("Set global dcvs mode %d\n", prop_param); + } + break; + default: + pr_err("unsupported property received %d\n", property->prop_id); + goto set_prop_exit; + } + + ret = fw_init(npu_dev); + if (ret) { + pr_err("fw_init fail\n"); + goto set_prop_exit; + } + + prop_packet->header.cmd_type = NPU_IPC_CMD_SET_PROPERTY; + prop_packet->header.size = pkt_size; + prop_packet->header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + prop_packet->header.flags = 0; + + prop_packet->prop_id = prop_id; + prop_packet->num_params = num_of_params; + prop_packet->network_hdl = property->network_hdl; + for (i = 0; i < num_of_params; i++) + prop_packet->prop_param[i] = property->prop_param[i]; + + ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, + prop_packet); + + pr_debug("NPU_IPC_CMD_SET_PROPERTY sent status: %d\n", ret); + + if (ret) { + pr_err("NPU_IPC_CMD_SET_PROPERTY failed\n"); + goto deinit_fw; + } + + ret = wait_for_completion_interruptible_timeout( + &host_ctx->misc_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_SET_PROPERTY time out\n"); + ret = -ETIMEDOUT; + goto deinit_fw; + } else if (ret < 0) { + pr_err("Wait for set_property done interrupted by signal\n"); + goto deinit_fw; + } + + ret = host_ctx->cmd_ret_status; + if (ret) + pr_err("set fw property failed %d\n", ret); + +deinit_fw: + fw_deinit(npu_dev, false, true); +set_prop_exit: + kfree(prop_packet); + return ret; +} + +int32_t npu_host_get_fw_property(struct npu_device *npu_dev, + struct msm_npu_property *property) +{ + int ret = 0, i; + struct ipc_cmd_prop_pkt *prop_packet = NULL; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct msm_npu_property *prop_from_fw; + uint32_t num_of_params, pkt_size; + + num_of_params = min_t(uint32_t, property->num_of_params, + (uint32_t)PROP_PARAM_MAX_SIZE); + pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t); + prop_packet = kzalloc(pkt_size, GFP_KERNEL); + + if (!prop_packet) + return -ENOMEM; + + ret = fw_init(npu_dev); + if (ret) { + pr_err("fw_init fail\n"); + goto get_prop_exit; + } + + prop_packet->header.cmd_type = NPU_IPC_CMD_GET_PROPERTY; + prop_packet->header.size = pkt_size; + prop_packet->header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + prop_packet->header.flags = 0; + + prop_packet->prop_id = property->prop_id; + prop_packet->num_params = num_of_params; + prop_packet->network_hdl = property->network_hdl; + for (i = 0; i < num_of_params; i++) + prop_packet->prop_param[i] = property->prop_param[i]; + + ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, + prop_packet); + pr_debug("NPU_IPC_CMD_GET_PROPERTY sent status: %d\n", ret); + + if (ret) { + pr_err("NPU_IPC_CMD_GET_PROPERTY failed\n"); + goto deinit_fw; + } + + ret = wait_for_completion_interruptible_timeout( + &host_ctx->misc_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_GET_PROPERTY time out\n"); + ret = -ETIMEDOUT; + goto deinit_fw; + } else if (ret < 0) { + pr_err("Wait for get_property done interrupted by signal\n"); + goto deinit_fw; + } + + ret = host_ctx->cmd_ret_status; + if (!ret) { + /* Return prop data retrieved from fw to user */ + prop_from_fw = (struct msm_npu_property *)(host_ctx->prop_buf); + if (property->prop_id == prop_from_fw->prop_id && + property->network_hdl == prop_from_fw->network_hdl) { + property->num_of_params = num_of_params; + for (i = 0; i < num_of_params; i++) + property->prop_param[i] = + prop_from_fw->prop_param[i]; + } + } else { + pr_err("get fw property failed %d\n", ret); + } + +deinit_fw: + fw_deinit(npu_dev, false, true); +get_prop_exit: + kfree(prop_packet); + return ret; +} + +int32_t npu_host_load_network(struct npu_client *client, + struct msm_npu_load_network_ioctl *load_ioctl) +{ + int ret = 0; + struct npu_device *npu_dev = client->npu_dev; + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct npu_network *network; + struct ipc_cmd_load_pkt load_packet; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + ret = fw_init(npu_dev); + if (ret) + return ret; + + mutex_lock(&host_ctx->lock); + network = alloc_network(host_ctx, client); + if (!network) { + ret = -ENOMEM; + goto err_deinit_fw; + } + + network_get(network); + network->buf_hdl = load_ioctl->buf_ion_hdl; + network->size = load_ioctl->buf_size; + network->phy_add = load_ioctl->buf_phys_addr; + network->first_block_size = load_ioctl->first_block_size; + network->priority = load_ioctl->priority; + network->cur_perf_mode = network->init_perf_mode = + (load_ioctl->perf_mode == PERF_MODE_DEFAULT) ? + pwr->num_pwrlevels : load_ioctl->perf_mode; + + /* verify mapped physical address */ + if (!npu_mem_verify_addr(client, network->phy_add)) { + ret = -EINVAL; + goto error_free_network; + } + + load_packet.header.cmd_type = NPU_IPC_CMD_LOAD; + load_packet.header.size = sizeof(struct ipc_cmd_load_pkt); + load_packet.header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + load_packet.header.flags = 0; + + /* ACO Buffer. Use the npu mapped aco address */ + load_packet.buf_pkt.address = (uint64_t)network->phy_add; + load_packet.buf_pkt.buf_size = network->first_block_size; + load_packet.buf_pkt.network_id = network->id; + + set_perf_mode(npu_dev); + /* NPU_IPC_CMD_LOAD will go onto IPC_QUEUE_APPS_EXEC */ + reinit_completion(&network->cmd_done); + ret = npu_send_network_cmd(npu_dev, network, &load_packet); + if (ret) { + pr_err("NPU_IPC_CMD_LOAD sent failed: %d\n", ret); + goto error_free_network; + } + + mutex_unlock(&host_ctx->lock); + + ret = wait_for_completion_timeout( + &network->cmd_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + mutex_lock(&host_ctx->lock); + if (!ret) { + pr_err_ratelimited("NPU_IPC_CMD_LOAD time out\n"); + ret = -ETIMEDOUT; + goto error_free_network; + } + + if (network->fw_error) { + ret = -EIO; + pr_err("fw is in error state during load network\n"); + goto error_free_network; + } + + ret = network->cmd_ret_status; + if (ret) + goto error_free_network; + + load_ioctl->network_hdl = network->network_hdl; + network->is_active = true; + network_put(network); + + mutex_unlock(&host_ctx->lock); + + return ret; + +error_free_network: + network_put(network); + free_network(host_ctx, client, network->id); +err_deinit_fw: + mutex_unlock(&host_ctx->lock); + fw_deinit(npu_dev, false, true); + return ret; +} + +int32_t npu_host_load_network_v2(struct npu_client *client, + struct msm_npu_load_network_ioctl_v2 *load_ioctl, + struct msm_npu_patch_info_v2 *patch_info) +{ + int ret = 0, i; + struct npu_device *npu_dev = client->npu_dev; + struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct npu_network *network; + struct ipc_cmd_load_pkt_v2 *load_packet = NULL; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + uint32_t num_patch_params, pkt_size; + + ret = fw_init(npu_dev); + if (ret) + return ret; + + mutex_lock(&host_ctx->lock); + network = alloc_network(host_ctx, client); + if (!network) { + ret = -ENOMEM; + goto err_deinit_fw; + } + + network_get(network); + num_patch_params = load_ioctl->patch_info_num; + pkt_size = sizeof(*load_packet) + + num_patch_params * sizeof(struct npu_patch_tuple_v2); + load_packet = kzalloc(pkt_size, GFP_KERNEL); + + if (!load_packet) { + ret = -ENOMEM; + goto error_free_network; + } + + for (i = 0; i < num_patch_params; i++) + host_copy_patch_data_v2(&load_packet->patch_params[i], + &patch_info[i]); + + network->buf_hdl = load_ioctl->buf_ion_hdl; + network->size = load_ioctl->buf_size; + network->phy_add = load_ioctl->buf_phys_addr; + network->first_block_size = load_ioctl->first_block_size; + network->priority = load_ioctl->priority; + network->cur_perf_mode = network->init_perf_mode = + (load_ioctl->perf_mode == PERF_MODE_DEFAULT) ? + pwr->num_pwrlevels : load_ioctl->perf_mode; + network->num_layers = load_ioctl->num_layers; + + /* verify mapped physical address */ + if (!npu_mem_verify_addr(client, network->phy_add)) { + pr_err("Invalid network address %llx\n", network->phy_add); + ret = -EINVAL; + goto error_free_network; + } + + load_packet->header.cmd_type = NPU_IPC_CMD_LOAD_V2; + load_packet->header.size = pkt_size; + load_packet->header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + load_packet->header.flags = 0; + + /* ACO Buffer. Use the npu mapped aco address */ + load_packet->buf_pkt.address = (uint32_t)network->phy_add; + load_packet->buf_pkt.buf_size = network->first_block_size; + load_packet->buf_pkt.network_id = network->id; + load_packet->buf_pkt.num_layers = network->num_layers; + load_packet->num_patch_params = num_patch_params; + + set_perf_mode(npu_dev); + /* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */ + reinit_completion(&network->cmd_done); + ret = npu_send_network_cmd(npu_dev, network, load_packet); + if (ret) { + pr_debug("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret); + goto error_free_network; + } + + mutex_unlock(&host_ctx->lock); + + ret = wait_for_completion_timeout( + &network->cmd_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + mutex_lock(&host_ctx->lock); + + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_LOAD_V2 time out\n"); + ret = -ETIMEDOUT; + goto error_free_network; + } + + if (network->fw_error) { + ret = -EIO; + pr_err("fw is in error state during load_v2 network\n"); + goto error_free_network; + } + + ret = network->cmd_ret_status; + if (ret) + goto error_free_network; + + load_ioctl->network_hdl = network->network_hdl; + network->is_active = true; + kfree(load_packet); + network_put(network); + + mutex_unlock(&host_ctx->lock); + + return ret; + +error_free_network: + kfree(load_packet); + network_put(network); + free_network(host_ctx, client, network->id); +err_deinit_fw: + mutex_unlock(&host_ctx->lock); + fw_deinit(npu_dev, false, true); + return ret; +} + +int32_t npu_host_unload_network(struct npu_client *client, + struct msm_npu_unload_network_ioctl *unload) +{ + int ret = 0, retry_cnt = 1; + struct npu_device *npu_dev = client->npu_dev; + struct ipc_cmd_unload_pkt unload_packet; + struct npu_network *network; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + /* get the corresponding network for ipc trans id purpose */ + mutex_lock(&host_ctx->lock); + network = get_network_by_hdl(host_ctx, client, + unload->network_hdl); + if (!network) { + mutex_unlock(&host_ctx->lock); + return -EINVAL; + } + + if (!network->is_active) { + pr_err("network is not active\n"); + network_put(network); + mutex_unlock(&host_ctx->lock); + return -EINVAL; + } + + if (network->fw_error) { + pr_err("fw in error state, skip unload network in fw\n"); + goto free_network; + } + + pr_debug("Unload network %lld\n", network->id); + /* prepare IPC packet for UNLOAD */ + unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD; + unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt); + unload_packet.header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + unload_packet.header.flags = 0; + unload_packet.network_hdl = (uint32_t)network->network_hdl; + +retry: + /* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */ + reinit_completion(&network->cmd_done); + ret = npu_send_network_cmd(npu_dev, network, &unload_packet); + + if (ret) { + pr_err("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret); + /* + * If another command is running on this network, + * retry after 500ms. + */ + if ((ret == -EBUSY) && (retry_cnt > 0)) { + pr_err("Network is running, retry later\n"); + mutex_unlock(&host_ctx->lock); + retry_cnt--; + msleep(500); + mutex_lock(&host_ctx->lock); + goto retry; + } + goto free_network; + } + + mutex_unlock(&host_ctx->lock); + + ret = wait_for_completion_timeout( + &network->cmd_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + mutex_lock(&host_ctx->lock); + + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_UNLOAD time out\n"); + network->cmd_pending = false; + ret = -ETIMEDOUT; + goto free_network; + } + + if (network->fw_error) { + ret = -EIO; + pr_err("fw is in error state during unload network\n"); + } else { + ret = network->cmd_ret_status; + pr_debug("unload network status %d\n", ret); + } + +free_network: + /* + * free the network on the kernel if the corresponding ACO + * handle is unloaded on the firmware side + */ + network_put(network); + free_network(host_ctx, client, network->id); + + /* recalculate uc_power_level after unload network */ + if (npu_dev->pwrctrl.cur_dcvs_activity) + set_perf_mode(npu_dev); + + mutex_unlock(&host_ctx->lock); + if (host_ctx->fw_unload_delay_ms) { + flush_delayed_work(&host_ctx->fw_deinit_work); + atomic_inc(&host_ctx->fw_deinit_work_cnt); + queue_delayed_work(host_ctx->wq, &host_ctx->fw_deinit_work, + msecs_to_jiffies(host_ctx->fw_unload_delay_ms)); + } else { + fw_deinit(npu_dev, false, true); + } + return ret; +} + +int32_t npu_host_exec_network(struct npu_client *client, + struct msm_npu_exec_network_ioctl *exec_ioctl) +{ + struct npu_device *npu_dev = client->npu_dev; + struct ipc_cmd_execute_pkt exec_packet; + /* npu mapped addr */ + uint64_t input_off, output_off; + int32_t ret; + struct npu_network *network; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + mutex_lock(&host_ctx->lock); + network = get_network_by_hdl(host_ctx, client, + exec_ioctl->network_hdl); + + if (!network) { + mutex_unlock(&host_ctx->lock); + return -EINVAL; + } + + if (atomic_inc_return(&host_ctx->network_execute_cnt) == 1) + npu_notify_cdsprm_cxlimit_activity(npu_dev, true); + + if (!network->is_active) { + pr_err("network is not active\n"); + ret = -EINVAL; + goto exec_done; + } + + if (network->fw_error) { + pr_err("fw is in error state\n"); + ret = -EIO; + goto exec_done; + } + + pr_debug("execute network %lld\n", network->id); + memset(&exec_packet, 0, sizeof(exec_packet)); + if (exec_ioctl->patching_required) { + if ((exec_ioctl->input_layer_num != 1) || + (exec_ioctl->output_layer_num != 1)) { + pr_err("Invalid input/output layer num\n"); + ret = -EINVAL; + goto exec_done; + } + + input_off = exec_ioctl->input_layers[0].buf_phys_addr; + output_off = exec_ioctl->output_layers[0].buf_phys_addr; + /* verify mapped physical address */ + if (!npu_mem_verify_addr(client, input_off) || + !npu_mem_verify_addr(client, output_off)) { + pr_err("Invalid patch buf address\n"); + ret = -EINVAL; + goto exec_done; + } + + exec_packet.patch_params.num_params = 2; + host_copy_patch_data(&exec_packet.patch_params.param[0], + (uint32_t)input_off, &exec_ioctl->input_layers[0]); + host_copy_patch_data(&exec_packet.patch_params.param[1], + (uint32_t)output_off, &exec_ioctl->output_layers[0]); + } else { + exec_packet.patch_params.num_params = 0; + } + + exec_packet.header.cmd_type = NPU_IPC_CMD_EXECUTE; + exec_packet.header.size = sizeof(struct ipc_cmd_execute_pkt); + exec_packet.header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + exec_packet.header.flags = 0xF; + exec_packet.network_hdl = network->network_hdl; + + /* Send it on the high priority queue */ + reinit_completion(&network->cmd_done); + ret = npu_send_network_cmd(npu_dev, network, &exec_packet); + + if (ret) { + pr_err("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret); + goto exec_done; + } + + mutex_unlock(&host_ctx->lock); + + ret = wait_for_completion_timeout( + &network->cmd_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + mutex_lock(&host_ctx->lock); + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE time out\n"); + /* dump debug stats */ + npu_dump_debug_timeout_stats(npu_dev); + network->cmd_pending = false; + ret = -ETIMEDOUT; + goto exec_done; + } + + if (network->fw_error) { + ret = -EIO; + pr_err("fw is in error state during execute network\n"); + } else { + ret = network->cmd_ret_status; + pr_debug("execution status %d\n", ret); + } + +exec_done: + network_put(network); + mutex_unlock(&host_ctx->lock); + + /* + * treat network execution timed our or interrupted by signal + * as error in order to force npu fw to stop execution + */ + if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) { + pr_err("Error handling after execution failure\n"); + host_error_hdlr(npu_dev, true); + } + + if (atomic_dec_return(&host_ctx->network_execute_cnt) == 0) + npu_notify_cdsprm_cxlimit_activity(npu_dev, false); + + return ret; +} + +int32_t npu_host_exec_network_v2(struct npu_client *client, + struct msm_npu_exec_network_ioctl_v2 *exec_ioctl, + struct msm_npu_patch_buf_info *patch_buf_info) +{ + struct npu_device *npu_dev = client->npu_dev; + struct ipc_cmd_execute_pkt_v2 *exec_packet; + int32_t ret; + struct npu_network *network; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + uint32_t num_patch_params, pkt_size; + int i; + + mutex_lock(&host_ctx->lock); + network = get_network_by_hdl(host_ctx, client, + exec_ioctl->network_hdl); + + if (!network) { + mutex_unlock(&host_ctx->lock); + return -EINVAL; + } + + if (atomic_inc_return(&host_ctx->network_execute_cnt) == 1) + npu_notify_cdsprm_cxlimit_activity(npu_dev, true); + + if (!network->is_active) { + pr_err("network is not active\n"); + ret = -EINVAL; + goto exec_v2_done; + } + + if (network->fw_error) { + pr_err("fw is in error state\n"); + ret = -EIO; + goto exec_v2_done; + } + + pr_debug("execute_v2 network %lld\n", network->id); + num_patch_params = exec_ioctl->patch_buf_info_num; + pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) + + sizeof(*exec_packet); + exec_packet = kzalloc(pkt_size, GFP_KERNEL); + + if (!exec_packet) { + ret = -ENOMEM; + goto exec_v2_done; + } + + for (i = 0; i < num_patch_params; i++) { + exec_packet->patch_params[i].id = patch_buf_info[i].buf_id; + pr_debug("%d: patch_id: %x\n", i, + exec_packet->patch_params[i].id); + exec_packet->patch_params[i].value = + patch_buf_info[i].buf_phys_addr; + pr_debug("%d: patch value: %x\n", i, + exec_packet->patch_params[i].value); + + /* verify mapped physical address */ + if (!npu_mem_verify_addr(client, + patch_buf_info[i].buf_phys_addr)) { + pr_err("Invalid patch value\n"); + ret = -EINVAL; + goto free_exec_packet; + } + } + + exec_packet->header.cmd_type = NPU_IPC_CMD_EXECUTE_V2; + exec_packet->header.size = pkt_size; + exec_packet->header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + exec_packet->header.flags = host_ctx->exec_flags_override > 0 ? + host_ctx->exec_flags_override : exec_ioctl->flags; + exec_packet->network_hdl = network->network_hdl; + exec_packet->num_patch_params = num_patch_params; + + network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr; + network->stats_buf_size = exec_ioctl->stats_buf_size; + + pr_debug("Execute_v2 flags %x stats_buf_size %d\n", + exec_packet->header.flags, exec_ioctl->stats_buf_size); + + /* Send it on the high priority queue */ + reinit_completion(&network->cmd_done); + ret = npu_send_network_cmd(npu_dev, network, exec_packet); + + if (ret) { + pr_err("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret); + goto free_exec_packet; + } + + mutex_unlock(&host_ctx->lock); + + ret = wait_for_completion_timeout( + &network->cmd_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + mutex_lock(&host_ctx->lock); + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n"); + /* dump debug stats */ + npu_dump_debug_timeout_stats(npu_dev); + network->cmd_pending = false; + ret = -ETIMEDOUT; + goto free_exec_packet; + } + + if (network->fw_error) { + ret = -EIO; + pr_err("fw is in error state during execute_v2 network\n"); + goto free_exec_packet; + } + + ret = network->cmd_ret_status; + if (!ret) { + exec_ioctl->stats_buf_size = network->stats_buf_size; + if (copy_to_user( + (void __user *)exec_ioctl->stats_buf_addr, + network->stats_buf, + exec_ioctl->stats_buf_size)) { + pr_err("copy stats to user failed\n"); + exec_ioctl->stats_buf_size = 0; + } + } else { + pr_err("execution failed %d\n", ret); + } + +free_exec_packet: + kfree(exec_packet); +exec_v2_done: + network_put(network); + mutex_unlock(&host_ctx->lock); + + /* + * treat network execution timed our or interrupted by signal + * as error in order to force npu fw to stop execution + */ + if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) { + pr_err("Error handling after execution failure\n"); + host_error_hdlr(npu_dev, true); + } + + if (atomic_dec_return(&host_ctx->network_execute_cnt) == 0) + npu_notify_cdsprm_cxlimit_activity(npu_dev, false); + + return ret; +} + +int32_t npu_host_loopback_test(struct npu_device *npu_dev) +{ + struct ipc_cmd_loopback_pkt loopback_packet; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int32_t ret; + + ret = fw_init(npu_dev); + if (ret) + return ret; + + loopback_packet.header.cmd_type = NPU_IPC_CMD_LOOPBACK; + loopback_packet.header.size = sizeof(struct ipc_cmd_loopback_pkt); + loopback_packet.header.trans_id = + atomic_add_return(1, &host_ctx->ipc_trans_id); + loopback_packet.header.flags = 0; + loopback_packet.loopbackParams = 15; + + ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet); + + if (ret) { + pr_err("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret); + goto loopback_exit; + } + + ret = wait_for_completion_interruptible_timeout( + &host_ctx->misc_done, + (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ? + NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); + + if (!ret) { + pr_err_ratelimited("npu: NPU_IPC_CMD_LOOPBACK time out\n"); + ret = -ETIMEDOUT; + } else if (ret < 0) { + pr_err("Wait for loopback done interrupted by signal\n"); + } + +loopback_exit: + fw_deinit(npu_dev, false, true); + + return ret; +} + +void npu_host_cleanup_networks(struct npu_client *client) +{ + int i; + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct msm_npu_unload_network_ioctl unload_req; + struct msm_npu_unmap_buf_ioctl unmap_req; + struct npu_network *network; + struct npu_ion_buf *ion_buf; + + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + network = &host_ctx->networks[i]; + if (network->client == client) { + pr_warn("network %d is not unloaded before close\n", + network->network_hdl); + unload_req.network_hdl = network->network_hdl; + npu_host_unload_network(client, &unload_req); + } + } + + /* unmap all remaining buffers */ + while (!list_empty(&client->mapped_buffer_list)) { + ion_buf = list_first_entry(&client->mapped_buffer_list, + struct npu_ion_buf, list); + pr_warn("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova); + unmap_req.buf_ion_hdl = ion_buf->fd; + unmap_req.npu_phys_addr = ion_buf->iova; + npu_host_unmap_buf(client, &unmap_req); + } +} + +/* + * set network or global perf_mode + * if network_hdl is 0, set global perf_mode_override + * otherwise set network perf_mode: if perf_mode is 0, + * change network perf_mode to initial perf_mode from + * load_network + */ +int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl, + uint32_t perf_mode) +{ + int ret = 0; + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_network *network = NULL; + + mutex_lock(&host_ctx->lock); + + if (network_hdl == 0) { + pr_debug("change perf_mode_override to %d\n", perf_mode); + npu_dev->pwrctrl.perf_mode_override = perf_mode; + } else { + network = get_network_by_hdl(host_ctx, client, network_hdl); + if (!network) { + pr_err("invalid network handle %x\n", network_hdl); + mutex_unlock(&host_ctx->lock); + return -EINVAL; + } + + if (perf_mode == 0) { + network->cur_perf_mode = network->init_perf_mode; + pr_debug("change network %d perf_mode back to %d\n", + network_hdl, network->cur_perf_mode); + } else { + network->cur_perf_mode = perf_mode; + pr_debug("change network %d perf_mode to %d\n", + network_hdl, network->cur_perf_mode); + } + } + + ret = set_perf_mode(npu_dev); + if (ret) + pr_err("set_perf_mode failed\n"); + + if (network) + network_put(network); + mutex_unlock(&host_ctx->lock); + + return ret; +} + +/* + * get the currently set network or global perf_mode + * if network_hdl is 0, get global perf_mode_override + * otherwise get network perf_mode + */ +int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl) +{ + int param_val = 0; + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_network *network = NULL; + + mutex_lock(&host_ctx->lock); + + if (network_hdl == 0) { + param_val = npu_dev->pwrctrl.perf_mode_override; + } else { + network = get_network_by_hdl(host_ctx, client, network_hdl); + if (!network) { + pr_err("invalid network handle %x\n", network_hdl); + mutex_unlock(&host_ctx->lock); + return -EINVAL; + } + param_val = network->cur_perf_mode; + network_put(network); + } + + mutex_unlock(&host_ctx->lock); + + return param_val; +} diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h new file mode 100644 index 000000000000..ea2e24e36de9 --- /dev/null +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _NPU_MGR_H +#define _NPU_MGR_H + +/* ------------------------------------------------------------------------- + * Includes + * ------------------------------------------------------------------------- + */ +#include +#include "npu_hw_access.h" +#include "npu_common.h" + +/* ------------------------------------------------------------------------- + * Defines + * ------------------------------------------------------------------------- + */ +#define NW_CMD_TIMEOUT_MS (1000 * 60 * 5) /* set for 5 minutes */ +#define NW_CMD_TIMEOUT msecs_to_jiffies(NW_CMD_TIMEOUT_MS) +#define NW_DEBUG_TIMEOUT_MS (1000 * 60 * 30) /* set for 30 minutes */ +#define NW_DEBUG_TIMEOUT msecs_to_jiffies(NW_DEBUG_TIMEOUT_MS) +#define FIRMWARE_VERSION 0x00001000 +#define MAX_LOADED_NETWORK 32 +#define NPU_IPC_BUF_LENGTH 512 + +#define FW_DBG_MODE_PAUSE (1 << 0) +#define FW_DBG_MODE_INC_TIMEOUT (1 << 1) +#define FW_DBG_DISABLE_WDOG (1 << 2) +#define FW_DBG_ENABLE_LOGGING (1 << 3) +/* ------------------------------------------------------------------------- + * Data Structures + * ------------------------------------------------------------------------- + */ +struct npu_network { + uint64_t id; + int buf_hdl; + uint64_t phy_add; + uint32_t size; + uint32_t first_block_size; + uint32_t network_hdl; + uint32_t priority; + uint32_t cur_perf_mode; + uint32_t init_perf_mode; + uint32_t num_layers; + void *stats_buf; + void __user *stats_buf_u; + uint32_t stats_buf_size; + uint32_t trans_id; + atomic_t ref_cnt; + bool is_valid; + bool is_active; + bool fw_error; + bool cmd_pending; + bool cmd_async; + int cmd_ret_status; + struct completion cmd_done; + struct npu_client *client; +}; + +enum fw_state { + FW_DISABLED = 0, + FW_ENABLED = 1, +}; + +struct npu_host_ctx { + struct mutex lock; + void *subsystem_handle; + struct npu_device *npu_dev; + enum fw_state fw_state; + int32_t fw_ref_cnt; + int32_t npu_init_cnt; + int32_t power_vote_num; + struct work_struct irq_work; + struct delayed_work fw_deinit_work; + atomic_t fw_deinit_work_cnt; + struct workqueue_struct *wq; + struct completion misc_done; + struct completion fw_deinit_done; + bool misc_pending; + void *prop_buf; + int32_t network_num; + struct npu_network networks[MAX_LOADED_NETWORK]; + bool sys_cache_disable; + uint32_t fw_dbg_mode; + uint32_t exec_flags_override; + uint32_t fw_unload_delay_ms; + atomic_t ipc_trans_id; + atomic_t network_execute_cnt; + int cmd_ret_status; + + uint32_t err_irq_sts; + uint32_t wdg_irq_sts; + bool fw_error; +}; + +struct npu_device; + +/* ------------------------------------------------------------------------- + * Function Prototypes + * ------------------------------------------------------------------------- + */ +int npu_host_init(struct npu_device *npu_dev); +void npu_host_deinit(struct npu_device *npu_dev); + +/* Host Driver IPC Interface */ +int npu_host_ipc_pre_init(struct npu_device *npu_dev); +int npu_host_ipc_post_init(struct npu_device *npu_dev); +void npu_host_ipc_deinit(struct npu_device *npu_dev); +int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t queueIndex, + void *pCmd); +int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t queueIndex, + uint32_t *pMsg); + +int32_t npu_host_get_info(struct npu_device *npu_dev, + struct msm_npu_get_info_ioctl *get_info_ioctl); +int32_t npu_host_map_buf(struct npu_client *client, + struct msm_npu_map_buf_ioctl *map_ioctl); +int32_t npu_host_unmap_buf(struct npu_client *client, + struct msm_npu_unmap_buf_ioctl *unmap_ioctl); +int32_t npu_host_load_network(struct npu_client *client, + struct msm_npu_load_network_ioctl *load_ioctl); +int32_t npu_host_load_network_v2(struct npu_client *client, + struct msm_npu_load_network_ioctl_v2 *load_ioctl, + struct msm_npu_patch_info_v2 *patch_info); +int32_t npu_host_unload_network(struct npu_client *client, + struct msm_npu_unload_network_ioctl *unload); +int32_t npu_host_exec_network(struct npu_client *client, + struct msm_npu_exec_network_ioctl *exec_ioctl); +int32_t npu_host_exec_network_v2(struct npu_client *client, + struct msm_npu_exec_network_ioctl_v2 *exec_ioctl, + struct msm_npu_patch_buf_info *patch_buf_info); +int32_t npu_host_loopback_test(struct npu_device *npu_dev); +int32_t npu_host_set_fw_property(struct npu_device *npu_dev, + struct msm_npu_property *property); +int32_t npu_host_get_fw_property(struct npu_device *npu_dev, + struct msm_npu_property *property); +void npu_host_cleanup_networks(struct npu_client *client); +int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl, + uint32_t perf_mode); +int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl); +void npu_dump_debug_timeout_stats(struct npu_device *npu_dev); + +#endif /* _NPU_MGR_H */ diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h new file mode 100644 index 000000000000..55722daea411 --- /dev/null +++ b/include/soc/qcom/subsystem_restart.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2019 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SUBSYS_RESTART_H +#define __SUBSYS_RESTART_H + +#include +#include +#include + +struct subsys_device; +extern struct bus_type subsys_bus_type; + +enum { + RESET_SOC = 0, + RESET_SUBSYS_COUPLED, + RESET_LEVEL_MAX +}; + +enum crash_status { + CRASH_STATUS_NO_CRASH = 0, + CRASH_STATUS_ERR_FATAL, + CRASH_STATUS_WDOG_BITE, +}; + +struct device; +struct module; + +enum ssr_comm { + SUBSYS_TO_SUBSYS_SYSMON, + SUBSYS_TO_HLOS, + HLOS_TO_SUBSYS_SYSMON_SHUTDOWN, + HLOS_TO_SUBSYS_SYSMON_DSENTER, + NUM_SSR_COMMS, +}; + +/** + * struct subsys_notif_timeout - timeout data used by notification timeout hdlr + * @comm_type: Specifies if the type of communication being tracked is + * through sysmon between two subsystems, subsystem notifier call chain, or + * sysmon shutdown. + * @dest_name: subsystem to which sysmon notification is being sent to + * @source_name: subsystem which generated event that notification is being sent + * for + * @timer: timer for scheduling timeout + */ +struct subsys_notif_timeout { + enum ssr_comm comm_type; + const char *dest_name; + const char *source_name; + struct timer_list timer; +}; + +/** + * struct subsys_desc - subsystem descriptor + * @name: name of subsystem + * @fw_name: firmware name + * @pon_depends_on: subsystem this subsystem wants to power-on first. If the + * dependednt subsystem is already powered-on, the framework won't try to power + * it back up again. + * @poff_depends_on: subsystem this subsystem wants to power-off first. If the + * dependednt subsystem is already powered-off, the framework won't try to power + * it off again. + * @dev: parent device + * @owner: module the descriptor belongs to + * @shutdown: Stop a subsystem + * @powerup_notify: Notify about start of a subsystem + * @powerup: Start a subsystem + * @crash_shutdown: Shutdown a subsystem when the system crashes (can't sleep) + * @ramdump: Collect a ramdump of the subsystem + * @free_memory: Free the memory associated with this subsystem + * @no_auth: Set if subsystem does not rely on PIL to authenticate and bring + * it out of reset + * @ssctl_instance_id: Instance id used to connect with SSCTL service + * @sysmon_pid: pdev id that sysmon is probed with for the subsystem + * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown + * @system_debug: If "set", triggers a device restart when the + * subsystem's wdog bite handler is invoked. + * @ignore_ssr_failure: SSR failures are usually fatal and results in panic. If + * set will ignore failure. + * @edge: GLINK logical name of the subsystem + */ +struct subsys_desc { + const char *name; + char fw_name[256]; + const char *pon_depends_on; + const char *poff_depends_on; + struct device *dev; + struct module *owner; + + int (*shutdown)(const struct subsys_desc *desc, bool force_stop); + int (*enter_ds)(const struct subsys_desc *desc); + int (*powerup_notify)(const struct subsys_desc *desc); + int (*powerup)(const struct subsys_desc *desc); + void (*crash_shutdown)(const struct subsys_desc *desc); + int (*ramdump)(int need_dumps, const struct subsys_desc *desc); + void (*free_memory)(const struct subsys_desc *desc); + struct completion shutdown_ack; + struct completion dsentry_ack; + int err_fatal_gpio; + int force_stop_bit; + int ramdump_disable_irq; + int shutdown_ack_irq; + int dsentry_ack_irq; + int ramdump_disable; + bool no_auth; + bool pil_mss_memsetup; + int ssctl_instance_id; + u32 sysmon_pid; + int sysmon_shutdown_ret; + int sysmon_dsentry_ret; + bool system_debug; + bool ignore_ssr_failure; + const char *edge; + struct qcom_smem_state *state; +#ifdef CONFIG_SETUP_SSR_NOTIF_TIMEOUTS + struct subsys_notif_timeout timeout_data; +#endif /* CONFIG_SETUP_SSR_NOTIF_TIMEOUTS */ +}; + +/** + * struct notif_data - additional notif information + * @crashed: indicates if subsystem has crashed due to wdog bite or err fatal + * @enable_ramdump: ramdumps disabled if set to 0 + * @enable_mini_ramdumps: enable flag for minimized critical-memory-only + * ramdumps + * @no_auth: set if subsystem does not use PIL to bring it out of reset + * @pdev: subsystem platform device pointer + */ +struct notif_data { + enum crash_status crashed; + int enable_ramdump; + int enable_mini_ramdumps; + bool no_auth; + struct platform_device *pdev; +}; + +#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART) + +extern int subsys_get_restart_level(struct subsys_device *dev); +extern int subsystem_restart_dev(struct subsys_device *dev); +extern int subsystem_restart(const char *name); +extern int subsystem_crashed(const char *name); +extern int subsystem_start_notify(const char *name); +extern int subsystem_stop_notify(const char *subsystem); +extern int subsystem_ds_entry(const char *subsystem); +extern int subsystem_ds_exit(const char *name); +extern int subsystem_s2d_entry(const char *subsystem); +extern int subsystem_s2d_exit(const char *name); + +extern void *subsystem_get(const char *name); +extern void *subsystem_get_with_fwname(const char *name, const char *fw_name); +extern int subsystem_set_fwname(const char *name, const char *fw_name); +extern void subsystem_put(void *subsystem); + +extern struct subsys_device *subsys_register(struct subsys_desc *desc); +extern void subsys_unregister(struct subsys_device *dev); + +extern void subsys_set_crash_status(struct subsys_device *dev, + enum crash_status crashed); +extern enum crash_status subsys_get_crash_status(struct subsys_device *dev); +void notify_proxy_vote(struct device *device); +void notify_proxy_unvote(struct device *device); +void notify_before_auth_and_reset(struct device *device); +static inline void complete_shutdown_ack(struct subsys_desc *desc) +{ + complete(&desc->shutdown_ack); +} +static inline void complete_dsentry_ack(struct subsys_desc *desc) +{ + complete(&desc->dsentry_ack); +} +struct subsys_device *find_subsys_device(const char *str); +#else + +static inline int subsys_get_restart_level(struct subsys_device *dev) +{ + return 0; +} + +static inline int subsystem_restart_dev(struct subsys_device *dev) +{ + return 0; +} + +static inline int subsystem_restart(const char *name) +{ + return 0; +} + +static inline int subsystem_crashed(const char *name) +{ + return 0; +} + +extern int subsystem_start_notify(const char *name) +{ + return 0; +} + +extern int subsystem_stop_notify(const char *subsystem) +{ + return 0; +} + +/** + * static int subsystem_ds_entry(const char *subsystem) + * { + * return 0; + * } + * + * static int subsystem_ds_exit(const char *name) + * { + * return 0; + * } + * + * static int subsystem_s2d_exit(const char *name) + * { + * return 0; + * } + * + * static int subsystem_s2d_entry(const char *name) + * { + * return 0; + * } + */ + +static inline void *subsystem_get(const char *name) +{ + return NULL; +} + +static inline void *subsystem_get_with_fwname(const char *name, + const char *fw_name) { + return NULL; +} + +static inline int subsystem_set_fwname(const char *name, + const char *fw_name) { + return 0; +} + +static inline void subsystem_put(void *subsystem) { } + +static inline +struct subsys_device *subsys_register(struct subsys_desc *desc) +{ + return NULL; +} + +static inline void subsys_unregister(struct subsys_device *dev) { } + +static inline void subsys_set_crash_status(struct subsys_device *dev, + enum crash_status crashed) { } +static inline +enum crash_status subsys_get_crash_status(struct subsys_device *dev) +{ + return false; +} +static inline void notify_proxy_vote(struct device *device) { } +static inline void notify_proxy_unvote(struct device *device) { } +static inline void notify_before_auth_and_reset(struct device *device) { } +#endif +/* CONFIG_MSM_SUBSYSTEM_RESTART */ + +/* Helper wrappers */ +static inline void wakeup_source_trash(struct wakeup_source *ws) +{ + if (!ws) + return; + + wakeup_source_remove(ws); + __pm_relax(ws); +} + +#endif From c6d30aa4cf800e360a823cc6d69b70085c584eaa Mon Sep 17 00:00:00 2001 From: Navya Vemula Date: Thu, 18 Jul 2024 14:48:52 +0530 Subject: [PATCH 063/117] soc: qcom: socinfo: Add soc-id support for Seraph Add seraph target support to SoC name-id mapping list. Change-Id: I8baee64f03b4b4cc13ed4f77a6f09adb51ffab72 Signed-off-by: Navya Vemula --- drivers/soc/qcom/socinfo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 15f83883662a..716ee905b445 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -578,6 +578,7 @@ static const struct soc_id soc_id[] = { { 565, "BLAIRP" }, { 629, "NIOBE" }, { 652, "NIOBE" }, + { 672, "SERAPH" }, { 577, "PINEAPPLEP" }, { 578, "BLAIR-LITE" }, { 605, "SA_MONACOAU_ADAS" }, From 1b3efd2909fe050d9b2b3d550050235cf8e436fa Mon Sep 17 00:00:00 2001 From: jizho Date: Wed, 14 Aug 2024 14:42:59 +0800 Subject: [PATCH 064/117] drivers: emac_mdio_fe: Add CL45 indirect read/write API Implement new cl45 indirect read/write API in emac_mdio_fe. Change-Id: I332ec63ecdbc68ad5a7622ef0725935a62c7d9f9 Signed-off-by: jizho --- .../ethernet/stmicro/stmmac/emac_mdio_fe.c | 67 +++++++++++++++++++ .../ethernet/stmicro/stmmac/emac_mdio_fe.h | 19 ++++++ 2 files changed, 86 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c b/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c index 0e51412250ad..7695e8c37a58 100644 --- a/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c +++ b/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c @@ -454,6 +454,73 @@ int virtio_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, int regnum, } EXPORT_SYMBOL_GPL(virtio_mdio_write_c45); +int virtio_mdio_read_c45_indirect(struct mii_bus *bus, int addr, int regnum) +{ + struct phy_remote_access_t *phy_request = NULL; + unsigned long tmp; + + mutex_lock(&emac_mdio_fe_pdev->emac_mdio_fe_lock); + phy_request = &emac_mdio_fe_ctx->tx_msg.request_data; + memset(phy_request, 0, sizeof(*phy_request)); + phy_request->mdio_type = MDIO_CLAUSE_45_DIRECT; + phy_request->mdio_op_remote_type = MDIO_REMOTE_OP_TYPE_READ; + phy_request->phyaddr = addr; + phy_request->phydev = mdiobus_c45_devad(regnum); + phy_request->phyreg = mdiobus_c45_regad(regnum); + + emac_mdio_fe_ctx->tx_msg.type = VIRTIO_EMAC_MDIO_FE_REQ; + emac_mdio_fe_ctx->tx_msg.len = sizeof(struct fe_to_be_msg); + + emac_mdio_fe_xmit(emac_mdio_fe_ctx); + EMAC_MDIO_FE_DBG("Sent VIRTIO_EMAC_MDIO_FE_REQ Event Cmd\n"); + + emac_mdio_fe_ctx->phy_reply = -1; + tmp = msecs_to_jiffies(WAIT_PHY_REPLY_MAX_TIMEOUT); + if (down_timeout(&emac_mdio_fe_ctx->emac_mdio_fe_sem, tmp) == -ETIME) { + EMAC_MDIO_FE_WARN("Wait for phy reply timeout\n"); + mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock); + return -1; + } + + mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock); + return (int)emac_mdio_fe_ctx->phy_reply; +} +EXPORT_SYMBOL_GPL(virtio_mdio_read_c45_indirect); + +int virtio_mdio_write_c45_indirect(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct phy_remote_access_t *phy_request = NULL; + unsigned long tmp; + + mutex_lock(&emac_mdio_fe_pdev->emac_mdio_fe_lock); + phy_request = &emac_mdio_fe_ctx->tx_msg.request_data; + memset(phy_request, 0, sizeof(*phy_request)); + phy_request->mdio_type = MDIO_CLAUSE_45_DIRECT; + phy_request->mdio_op_remote_type = MDIO_REMOTE_OP_TYPE_WRITE; + phy_request->phyaddr = addr; + phy_request->phydev = mdiobus_c45_devad(regnum); + phy_request->phyreg = mdiobus_c45_regad(regnum); + phy_request->phydata = val; + + emac_mdio_fe_ctx->tx_msg.type = VIRTIO_EMAC_MDIO_FE_REQ; + emac_mdio_fe_ctx->tx_msg.len = sizeof(struct fe_to_be_msg); + + emac_mdio_fe_xmit(emac_mdio_fe_ctx); + EMAC_MDIO_FE_DBG("Sent VIRTIO_EMAC_MDIO_FE_REQ Event Cmd\n"); + + emac_mdio_fe_ctx->phy_reply = -1; + tmp = msecs_to_jiffies(WAIT_PHY_REPLY_MAX_TIMEOUT); + if (down_timeout(&emac_mdio_fe_ctx->emac_mdio_fe_sem, tmp) == -ETIME) { + EMAC_MDIO_FE_WARN("Wait for phy reply timeout\n"); + mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock); + return -1; + } + + mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock); + return (int)emac_mdio_fe_ctx->phy_reply; +} +EXPORT_SYMBOL_GPL(virtio_mdio_write_c45_indirect); + static int emac_mdio_fe_probe(struct virtio_device *vdev) { int ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.h b/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.h index 7a52205e14f3..db670bb6c33b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.h +++ b/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.h @@ -17,6 +17,10 @@ int virtio_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum); int virtio_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val); +int virtio_mdio_read_c45_indirect(struct mii_bus *bus, int addr, int regnum); + +int virtio_mdio_write_c45_indirect(struct mii_bus *bus, int addr, int regnum, u16 val); + #else static inline int virtio_mdio_read(struct mii_bus *bus, int addr, int regnum) { @@ -39,6 +43,21 @@ static inline int virtio_mdio_read_c45(struct mii_bus *bus, int addr, int devnum static inline int virtio_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val) + +{ + /* Not enabled */ + return 0; +} + +static inline int virtio_mdio_read_c45_indirect(struct mii_bus *bus, int addr, + int regnum) +{ + /* Not enabled */ + return 0; +} + +static inline int virtio_mdio_write_c45_indirect(struct mii_bus *bus, int addr, + int regnum, u16 val) { /* Not enabled */ return 0; From a74419fcb6a6bbc1ad80c05c19c9baf3c21bf5d6 Mon Sep 17 00:00:00 2001 From: Richard Maina Date: Wed, 7 Aug 2024 00:52:17 -0700 Subject: [PATCH 065/117] remoteproc: qcom_q6v5_pas: Add hwspinlock bust on stop When remoteproc goes down unexpectedly this results in a state where any acquired hwspinlocks will remain locked possibly resulting in deadlock. In order to ensure all locks are freed we include a call to qcom_smem_bust_hwspin_lock_by_host() during remoteproc shutdown. For qcom_q6v5_pas remoteprocs, each remoteproc has an assigned smem host_id. Remoteproc can pass this id to smem to try and bust the lock on remoteproc stop. This edge case only occurs with q6v5_pas watchdog crashes. The error fatal case has handling to clear the hwspinlock before the error fatal interrupt is triggered. Change-Id: I67bf5e740cd5e8964e5c21127dcd6c5555c54d54 Signed-off-by: Richard Maina Reviewed-by: Bjorn Andersson Signed-off-by: Chris Lew Link: https://lore.kernel.org/r/20240529-hwspinlock-bust-v3-4-c8b924ffa5a2@quicinc.com Git-commit: 568b13b65078e2b557ccf47674a354cecd1db641 Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Bjorn Andersson [quic_deesin@quicinc.com: Updated smem_host_id values only for niobe adsp and cdsp, not added the same for sm8550 and sm8650] Signed-off-by: Deepak Kumar Singh --- drivers/remoteproc/qcom_q6v5_pas.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 97ca79aa115d..e44cdee351f4 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -89,6 +89,7 @@ struct adsp_data { const char *sysmon_name; const char *qmp_name; int ssctl_id; + unsigned int smem_host_id; bool check_status; }; @@ -125,6 +126,7 @@ struct qcom_adsp { bool retry_shutdown; struct icc_path *bus_client; int crash_reason_smem; + unsigned int smem_host_id; bool has_aggre2_clk; bool dma_phys_below_32b; bool decrypt_shutdown; @@ -1115,6 +1117,9 @@ static int adsp_stop(struct rproc *rproc) if (handover) qcom_pas_handover(&adsp->q6v5); + if (adsp->smem_host_id) + ret = qcom_smem_bust_hwspin_lock_by_host(adsp->smem_host_id); + if (is_mss_ssr_hyp_assign_en(adsp)) { add_mpss_dsm_mem_ssr_dump(adsp); ret = mpss_dsm_hyp_assign_control(adsp, false); @@ -1629,6 +1634,7 @@ static int adsp_probe(struct platform_device *pdev) goto free_rproc; adsp->has_aggre2_clk = desc->has_aggre2_clk; adsp->info_name = desc->sysmon_name; + adsp->smem_host_id = desc->smem_host_id; adsp->decrypt_shutdown = desc->decrypt_shutdown; adsp->qmp_name = desc->qmp_name; adsp->dma_phys_below_32b = desc->dma_phys_below_32b; @@ -1994,6 +2000,7 @@ static const struct adsp_data niobe_adsp_resource = { .sysmon_name = "adsp", .qmp_name = "adsp", .ssctl_id = 0x14, + .smem_host_id = 2, }; static const struct adsp_data cliffs_adsp_resource = { @@ -2252,6 +2259,7 @@ static const struct adsp_data niobe_cdsp_resource = { .sysmon_name = "cdsp", .qmp_name = "cdsp", .ssctl_id = 0x17, + .smem_host_id = 5, }; static const struct adsp_data cliffs_cdsp_resource = { From a17c5a12d523f44580651d96e1a9828de0e5b864 Mon Sep 17 00:00:00 2001 From: Uppalamarthi Sowmya Date: Tue, 13 Aug 2024 09:36:09 +0530 Subject: [PATCH 066/117] net: ethernet: stmmac: Disabling vlan hash filtering Resolve the low tput issue for vlan packets by disabling the non-supported vlan filters. Change-Id: Iefb099e5bae66674556e0d672854a6b9a94a25be Signed-off-by: Uppalamarthi Sowmya --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d7b2af0fdd1a..58123275b30e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7481,6 +7481,8 @@ int stmmac_dvr_probe(struct device *device, ndev->vlan_features |= ndev->hw_features; /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; + priv->dma_cap.vlhash = 0; + priv->dma_cap.vlins = 0; if (priv->dma_cap.vlhash) { ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; From 72c4909e8974741cd1a1f34980c55b65d1f8510d Mon Sep 17 00:00:00 2001 From: Pratyush Brahma Date: Tue, 13 Aug 2024 15:16:37 +0530 Subject: [PATCH 067/117] soc: qcom: mem-offline: Remove unnecessary zone locks While mem-offline tries to get section allocated memory, it takes a zone lock to go over all pfns in the block. As the pfn walk can take significant time, the lock can be held for longer and interrupts would be disabled for long (reportedly more than 1.25 ms on some targets). This is unnecessary since approximation of the allocated bytes should suffice. Remove the unnecessary zone locks. Change-Id: I8042cca7a796823d4d580623df99fad61053e0ea Suggested-by: Pavankumar Kondeti Signed-off-by: Pratyush Brahma --- drivers/soc/qcom/mem-offline.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c index 3b50bb7031d9..23ddbf5a728b 100644 --- a/drivers/soc/qcom/mem-offline.c +++ b/drivers/soc/qcom/mem-offline.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023,2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -477,7 +477,7 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr) { unsigned long block_sz = memory_block_size_bytes(); unsigned long pages_per_blk = block_sz / PAGE_SIZE; - unsigned long tot_free_pages = 0, pfn, end_pfn, flags; + unsigned long tot_free_pages = 0, pfn, end_pfn; unsigned long used; struct zone *movable_zone = &NODE_DATA(numa_node_id())->node_zones[ZONE_MOVABLE]; struct page *page; @@ -491,7 +491,6 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr) if (!zone_intersects(movable_zone, pfn, pages_per_blk)) return 0; - spin_lock_irqsave(&movable_zone->lock, flags); while (pfn < end_pfn) { if (!pfn_valid(pfn) || !PageBuddy(pfn_to_page(pfn))) { pfn++; @@ -501,7 +500,6 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr) tot_free_pages += 1 << page_private(page); pfn += 1 << page_private(page); } - spin_unlock_irqrestore(&movable_zone->lock, flags); used = block_sz - (tot_free_pages * PAGE_SIZE); From a2d08f72efeb9176f3622fba63b2389ef52c451c Mon Sep 17 00:00:00 2001 From: Salam Abdul Date: Tue, 25 Jun 2024 07:15:54 +0530 Subject: [PATCH 068/117] clk: qcom: Enabling slpi tlmm driver Enabling slpi_tlmm driver related config. Change-Id: I2619ef913f9bfb6dfaf921614d48717c5e117908 Signed-off-by: Salam Abdul --- arch/arm64/configs/vendor/autogvm_GKI.config | 1 + autogvm.bzl | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/autogvm_GKI.config b/arch/arm64/configs/vendor/autogvm_GKI.config index f6240209e188..1ba391e219c6 100644 --- a/arch/arm64/configs/vendor/autogvm_GKI.config +++ b/arch/arm64/configs/vendor/autogvm_GKI.config @@ -66,6 +66,7 @@ CONFIG_PINCTRL_MONACO_AUTO=m CONFIG_PINCTRL_MSM=m CONFIG_PINCTRL_QCOM_SPMI_PMIC=m CONFIG_PINCTRL_SDMSHRIKE=m +CONFIG_PINCTRL_SLPI=m CONFIG_PINCTRL_SM6150=m CONFIG_PINCTRL_SM8150=m CONFIG_POWER_RESET_QCOM_VM=m diff --git a/autogvm.bzl b/autogvm.bzl index 956070e9af2c..57f6aa337ae1 100644 --- a/autogvm.bzl +++ b/autogvm.bzl @@ -66,6 +66,7 @@ def define_autogvm(): "drivers/pinctrl/qcom/pinctrl-monaco_auto.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/pinctrl/qcom/pinctrl-sdmshrike.ko", + "drivers/pinctrl/qcom/pinctrl-slpi.ko", "drivers/pinctrl/qcom/pinctrl-sm6150.ko", "drivers/pinctrl/qcom/pinctrl-sm8150.ko", "drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko", From ac46364d3c2997f948eaf5d369b12511ef5dee0a Mon Sep 17 00:00:00 2001 From: Charan Teja Reddy Date: Fri, 12 Nov 2021 17:20:12 +0530 Subject: [PATCH 069/117] soc: qcom: mem-offline: timeout mechanism for memory offline Due to temporary or long term pinning, offline may take longer due to migration and pasr hal may not respond until it's complete. Signal to stop the memory offline operation after a specified time value. Change-Id: Ia78f71c2baf040b2a127cf731c95803cec66628a Signed-off-by: Charan Teja Reddy Signed-off-by: Pratyush Brahma --- drivers/soc/qcom/mem-offline.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c index 3b50bb7031d9..05b16d34873a 100644 --- a/drivers/soc/qcom/mem-offline.c +++ b/drivers/soc/qcom/mem-offline.c @@ -62,11 +62,14 @@ static bool is_rpm_controller; static DECLARE_BITMAP(movable_bitmap, 1024); static bool has_pend_offline_req; static struct workqueue_struct *migrate_wq; +static struct timer_list mem_offline_timeout_timer; +static struct task_struct *offline_trig_task; #define MODULE_CLASS_NAME "mem-offline" #define MEMBLOCK_NAME "memory%lu" #define SEGMENT_NAME "segment%lu" #define BUF_LEN 100 #define MIGRATE_TIMEOUT_SEC 20 +#define OFFLINE_TIMEOUT_SEC 7 struct section_stat { unsigned long success_count; @@ -508,6 +511,12 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr) return used; } +static void mem_offline_timeout_cb(struct timer_list *timer) +{ + pr_info("mem-offline: SIGALRM is raised to stop the offline operation\n"); + send_sig_info(SIGALRM, SEND_SIG_PRIV, offline_trig_task); +} + static int mem_event_callback(struct notifier_block *self, unsigned long action, void *arg) { @@ -572,6 +581,8 @@ static int mem_event_callback(struct notifier_block *self, idx) / sections_per_block].fail_count; has_pend_offline_req = true; cancel_work_sync(&fill_movable_zone_work); + offline_trig_task = current; + mod_timer(&mem_offline_timeout_timer, jiffies + (OFFLINE_TIMEOUT_SEC * HZ)); cur = ktime_get(); break; case MEM_OFFLINE: @@ -592,6 +603,14 @@ static int mem_event_callback(struct notifier_block *self, pr_debug("mem-offline: Segment %d memblk_bitmap 0x%lx\n", seg_idx, segment_infos[seg_idx].bitmask_kernel_blk); totalram_pages_add(memory_block_size_bytes()/PAGE_SIZE); + del_timer_sync(&mem_offline_timeout_timer); + offline_trig_task = NULL; + break; + case MEM_CANCEL_OFFLINE: + pr_debug("mem-offline: MEM_CANCEL_OFFLINE : start = 0x%llx end = 0x%llx\n", + start_addr, end_addr); + del_timer_sync(&mem_offline_timeout_timer); + offline_trig_task = NULL; break; case MEM_CANCEL_ONLINE: pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%llx end = 0x%llx\n", @@ -1832,12 +1851,14 @@ static struct platform_driver mem_offline_driver = { static int __init mem_module_init(void) { + timer_setup(&mem_offline_timeout_timer, mem_offline_timeout_cb, 0); return platform_driver_register(&mem_offline_driver); } subsys_initcall(mem_module_init); static void __exit mem_module_exit(void) { + del_timer_sync(&mem_offline_timeout_timer); platform_driver_unregister(&mem_offline_driver); } module_exit(mem_module_exit); From 3086a7563b484e20fdd2ecb75bda9fed2301e095 Mon Sep 17 00:00:00 2001 From: Chintan Kothari Date: Tue, 23 Jul 2024 17:26:48 +0530 Subject: [PATCH 070/117] arm64: defconfig: Enable clock and gdsc drivers for NEO Enable all clock controllers, gdsc-regulator and cpufreq drivers for NEO. Change-Id: I83171ff4aa7877415557ddf0e52b3c2c1ebe94e8 Signed-off-by: Chintan Kothari --- arch/arm64/configs/vendor/neo_la_GKI.config | 12 ++++++++++++ neo_la.bzl | 13 +++++++++++++ 2 files changed, 25 insertions(+) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index 73abe9fcd2ff..9195f03d38c3 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -1,7 +1,10 @@ CONFIG_ARCH_NEO=y +CONFIG_ARM_QCOM_CPUFREQ_HW=m +CONFIG_ARM_QCOM_CPUFREQ_HW_DEBUG=m CONFIG_ARM_SMMU=m CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_SELFTEST=y +CONFIG_COMMON_CLK_QCOM=m CONFIG_CPU_IDLE_GOV_QCOM_LPM=m CONFIG_EDAC_KRYO_ARM64=m # CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE is not set @@ -36,6 +39,7 @@ CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_QCOM_CLK_RPMH=m CONFIG_QCOM_COMMAND_DB=m CONFIG_QCOM_CPUSS_SLEEP_STATS=m CONFIG_QCOM_DMABUF_HEAPS=m @@ -45,6 +49,7 @@ CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_GDSC_REGULATOR=m CONFIG_QCOM_IOMMU_DEBUG=m CONFIG_QCOM_IOMMU_UTIL=m CONFIG_QCOM_LAZY_MAPPING=m @@ -84,4 +89,11 @@ CONFIG_REGULATOR_QCOM_PM8008=m CONFIG_REGULATOR_QTI_FIXED_VOLTAGE=m CONFIG_REGULATOR_RPMH=m CONFIG_SCHED_WALT=m +CONFIG_SXR_CAMCC_NEO=m +CONFIG_SXR_DEBUGCC_NEO=m +CONFIG_SXR_DISPCC_NEO=m +CONFIG_SXR_GCC_NEO=m +CONFIG_SXR_GPUCC_NEO=m +CONFIG_SXR_TCSRCC_NEO=m +CONFIG_SXR_VIDEOCC_NEO=m CONFIG_VIRT_DRIVERS=y diff --git a/neo_la.bzl b/neo_la.bzl index 508b99f95769..aa014709e528 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -7,6 +7,19 @@ target_name = "neo-la" def define_neo_la(): _neo_in_tree_modules = [ # keep sorted + "drivers/clk/qcom/camcc-neo.ko", + "drivers/clk/qcom/clk-dummy.ko", + "drivers/clk/qcom/clk-qcom.ko", + "drivers/clk/qcom/clk-rpmh.ko", + "drivers/clk/qcom/debugcc-neo.ko", + "drivers/clk/qcom/dispcc-neo.ko", + "drivers/clk/qcom/gcc-neo.ko", + "drivers/clk/qcom/gdsc-regulator.ko", + "drivers/clk/qcom/gpucc-neo.ko", + "drivers/clk/qcom/tcsrcc-neo.ko", + "drivers/clk/qcom/videocc-neo.ko", + "drivers/cpufreq/qcom-cpufreq-hw.ko", + "drivers/cpufreq/qcom-cpufreq-hw-debug.ko", "drivers/cpuidle/governors/qcom_lpm.ko", "drivers/dma-buf/heaps/qcom_dma_heaps.ko", "drivers/edac/kryo_arm64_edac.ko", From 550ffe671ad87948faa814346d55f638d7fcfb81 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Fri, 9 Aug 2024 12:40:12 +0530 Subject: [PATCH 071/117] usb: gadget: f_fs_ipc_log: Remove status variable from ffs_ep Currently, it seems due to ffs_ep being a local struct in upstream file the ffs_ipc_log has defined a local copy of it for it's use. However any mismatch in this might lead of th dependent structures being unstable or corrupted due to this ambiguity. Fix this by aligning the local ffs_ep structure defined in upstream and downstream files. Change-Id: Ia52c141deb49c0beaba31a59fa88ae58f8aaf5ea Signed-off-by: Udipto Goswami --- drivers/usb/gadget/function/f_fs_ipc_log.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs_ipc_log.c b/drivers/usb/gadget/function/f_fs_ipc_log.c index 90586b84114f..aa7f16539add 100644 --- a/drivers/usb/gadget/function/f_fs_ipc_log.c +++ b/drivers/usb/gadget/function/f_fs_ipc_log.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -56,8 +56,6 @@ struct ffs_ep { struct usb_endpoint_descriptor *descs[3]; u8 num; - - int status; /* P: epfile->mutex */ }; /* Copied from f_fs.c */ From fe157b161a39f4e850ff94d63ed1dd893399acc8 Mon Sep 17 00:00:00 2001 From: Nikhil V Date: Mon, 11 Sep 2023 11:54:18 +0530 Subject: [PATCH 072/117] drivers: qcom: Vendor hooks to support compressed block count In case of hibernation with compression enabled, 'n' number of pages will be compressed to 'x' number of pages before being written to the disk. Keep a note of these compressed block counts so that bootloader can directly read 'x' pages and pass it on to the decompressor. An array will be maintained which will hold the count of these compressed blocks and later on written to the disk as part of the hibernation image save process. Change-Id: If48cd5c396eda674467b3d40c75ad3c2e91c2e5e Signed-off-by: Nikhil V --- drivers/soc/qcom/qcom_secure_hibernation.c | 82 +++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/qcom_secure_hibernation.c b/drivers/soc/qcom/qcom_secure_hibernation.c index d03569d804e4..42f07d2505d3 100644 --- a/drivers/soc/qcom/qcom_secure_hibernation.c +++ b/drivers/soc/qcom/qcom_secure_hibernation.c @@ -85,6 +85,9 @@ static unsigned short root_swap_dev; static struct work_struct save_params_work; static struct completion write_done; static unsigned char iv[IV_SIZE]; +static uint8_t *compressed_blk_array; +static int blk_array_pos; +static unsigned long nr_pages; static void init_sg(struct scatterlist *sg, void *data, unsigned int size) { @@ -301,6 +304,26 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb); } +/* + * Number of pages compressed at one time. This is inline with UNC_PAGES + * in kernel/power/swap.c. + */ +#define UNCMP_PAGES 32 + +static uint32_t get_size_of_compression_block_array(unsigned long pages) +{ + /* + * Get the max index based on total no. of pages. Current compression + * algorithm compresses each UNC_PAGES pages to x pages. Use this logic to + * get the max index. + */ + uint32_t max_index = DIV_ROUND_UP(pages, UNCMP_PAGES); + + uint32_t size = ALIGN((max_index * sizeof(*compressed_blk_array)), PAGE_SIZE); + + return size; +} + static void save_auth_and_params_to_disk(struct work_struct *work) { int cur_slot; @@ -309,7 +332,7 @@ static void save_auth_and_params_to_disk(struct work_struct *work) int authslot_count = 0; int authpage_count = read_authpage_count(); struct hib_bio_batch hb; - int err2; + int err2, i = 0; hib_init_batch(&hb); @@ -327,6 +350,19 @@ static void save_auth_and_params_to_disk(struct work_struct *work) } params->authslot_count = authslot_count; write_page(params, params_slot, &hb); + + /* + * Write the array holding the compressed block count to disk + */ + if (compressed_blk_array) { + uint32_t size = get_size_of_compression_block_array(nr_pages); + + for (i = 0; i < size / PAGE_SIZE; i++) { + cur_slot = alloc_swapdev_block(root_swap_dev); + write_page(compressed_blk_array + (i * PAGE_SIZE), cur_slot, &hb); + } + } + err2 = hib_wait_io(&hb); hib_finish_batch(&hb); complete_all(&write_done); @@ -457,6 +493,15 @@ void deinit_aes_encrypt(void) kfree(params); } +static void cleanup_cmp_blk_array(void) +{ + blk_array_pos = 0; + if (compressed_blk_array) { + kvfree((void *)compressed_blk_array); + compressed_blk_array = NULL; + } +} + static int hibernate_pm_notifier(struct notifier_block *nb, unsigned long event, void *unused) { @@ -492,6 +537,7 @@ static int hibernate_pm_notifier(struct notifier_block *nb, case (PM_POST_HIBERNATION): deinit_aes_encrypt(); + cleanup_cmp_blk_array(); break; default: @@ -543,6 +589,38 @@ static void init_aes_encrypt(void *data, void *unused) kfree(params); } +/* + * Bit(part of swsusp_header_flags) to indicate if the image is uncompressed + * or not. This is inline with SF_NOCOMPRESS_MODE defined in + * kernel/power/power.h. + */ +#define SF_NOCOMPRESS_MODE 2 + +static void hibernated_do_mem_alloc(void *data, unsigned long pages, + unsigned int swsusp_header_flags, int *ret) +{ + uint32_t size; + + /* total no. of pages in the snapshot image */ + nr_pages = pages; + + if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) { + size = get_size_of_compression_block_array(pages); + + compressed_blk_array = kvzalloc(size, GFP_KERNEL); + if (!compressed_blk_array) + *ret = -ENOMEM; + } +} + +static void hibernate_save_cmp_len(void *data, size_t cmp_len) +{ + uint8_t pages; + + pages = DIV_ROUND_UP(cmp_len, PAGE_SIZE); + compressed_blk_array[blk_array_pos++] = pages; +} + static int __init qcom_secure_hibernattion_init(void) { int ret; @@ -551,6 +629,8 @@ static int __init qcom_secure_hibernattion_init(void) register_trace_android_vh_init_aes_encrypt(init_aes_encrypt, NULL); register_trace_android_vh_skip_swap_map_write(skip_swap_map_write, NULL); register_trace_android_vh_post_image_save(save_params_to_disk, NULL); + register_trace_android_vh_hibernate_save_cmp_len(hibernate_save_cmp_len, NULL); + register_trace_android_vh_hibernated_do_mem_alloc(hibernated_do_mem_alloc, NULL); ret = register_pm_notifier(&pm_nb); if (ret) { From 87822fc6153ba896e7e37046a9f1237091dea5df Mon Sep 17 00:00:00 2001 From: Nikhil V Date: Thu, 14 Sep 2023 23:54:08 +0530 Subject: [PATCH 073/117] drivers: qcom: Save authentication tag slot number to disk Currently bootloader does the following to calculate the authentication tag slot number. authslot = NrMetaPages + NrCopyPages + NrSwapMapPages + HDR_SWP_INFO_NUM_PAGES However, with compression enabled, we cannot apply the above logic to get the authentication slot number. So this data should be provided to the bootloader for decryption to work. The current implementation doesn't make use of the swap_map_pages for restoring the hibernation image. Use the slot number of the first swap_map_page to store the authentication tag slot number. Change-Id: Iddfb98cc5adc7bd79c0f52f3f5d64ad282efc9b4 Signed-off-by: Nikhil V --- drivers/soc/qcom/qcom_secure_hibernation.c | 38 +++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/qcom_secure_hibernation.c b/drivers/soc/qcom/qcom_secure_hibernation.c index 42f07d2505d3..691f4b8ae42f 100644 --- a/drivers/soc/qcom/qcom_secure_hibernation.c +++ b/drivers/soc/qcom/qcom_secure_hibernation.c @@ -88,6 +88,7 @@ static unsigned char iv[IV_SIZE]; static uint8_t *compressed_blk_array; static int blk_array_pos; static unsigned long nr_pages; +static void *auth_slot; static void init_sg(struct scatterlist *sg, void *data, unsigned int size) { @@ -341,6 +342,27 @@ static void save_auth_and_params_to_disk(struct work_struct *work) */ params_slot = alloc_swapdev_block(root_swap_dev); + if (auth_slot) { + *(int *)auth_slot = params_slot + 1; + + /* Currently bootloader code does the following to + * calculate the authentication slot index. + * authslot = NrMetaPages + NrCopyPages + NrSwapMapPages + + * HDR_SWP_INFO_NUM_PAGES; + * + * However, with compression enabled, we cannot apply the + * above logic to get the authentication slot. So this + * data should be provided to the BL for decryption to work. + * + * In the current implementation, BL doesn't make use of + * the swap_map_pages for restoring the hibernation image. So these pages + * could be used for other purposes. Use this to store the + * authentication slot number. This data will be stored at index as + * that of the first swap_map_page. + */ + write_page(auth_slot, 1, &hb); + } + authpage = authslot_start; while (authslot_count < authpage_count) { cur_slot = alloc_swapdev_block(root_swap_dev); @@ -500,6 +522,11 @@ static void cleanup_cmp_blk_array(void) kvfree((void *)compressed_blk_array); compressed_blk_array = NULL; } + if (auth_slot) { + free_page((unsigned long)auth_slot); + auth_slot = NULL; + + } } static int hibernate_pm_notifier(struct notifier_block *nb, @@ -608,8 +635,17 @@ static void hibernated_do_mem_alloc(void *data, unsigned long pages, size = get_size_of_compression_block_array(pages); compressed_blk_array = kvzalloc(size, GFP_KERNEL); - if (!compressed_blk_array) + if (!compressed_blk_array) { *ret = -ENOMEM; + return; + } + + /* Allocate memory to hold authentication slot start */ + auth_slot = (void *)get_zeroed_page(GFP_KERNEL); + if (!auth_slot) { + pr_err("Failed to allocate page for storing authentication tag slot number\n"); + *ret = -ENOMEM; + } } } From 74a512434df54cbd0d26a9fd125a78f10c7ac3d6 Mon Sep 17 00:00:00 2001 From: Navya Vemula Date: Tue, 23 Jul 2024 12:03:19 +0530 Subject: [PATCH 074/117] pinctrl: qcom: Add support for Seraph SoC in pin control Add support for seraph pin configuration and control in pinctrl framework. Change-Id: I0673e080925601ba53f142279258d79af33d3aac Signed-off-by: Navya Vemula --- drivers/pinctrl/qcom/Kconfig | 11 + drivers/pinctrl/qcom/Makefile | 1 + drivers/pinctrl/qcom/pinctrl-seraph.c | 2091 +++++++++++++++++++++++++ 3 files changed, 2103 insertions(+) create mode 100644 drivers/pinctrl/qcom/pinctrl-seraph.c diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 7843f20cdd61..bf26e16b8c98 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -46,6 +46,17 @@ config PINCTRL_NIOBE Say Y here to compile statically, or M here to compile it as a module. If unsure, say N. +config PINCTRL_SERAPH + tristate "Qualcomm Technologies, Inc. SERAPH pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM) + block found on the Qualcomm Technologies Inc SERAPH platform. + Say Y here to compile statically, or M here to compile it as a + module. If unsure, say N. + config PINCTRL_CLIFFS tristate "Qualcomm Technologies, Inc. CLIFFS pin controller driver" depends on GPIOLIB && OF diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 587473d00056..2d885dbf6279 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o obj-$(CONFIG_PINCTRL_PINEAPPLE) += pinctrl-pineapple.o obj-$(CONFIG_PINCTRL_ANORAK) += pinctrl-anorak.o obj-$(CONFIG_PINCTRL_NIOBE) += pinctrl-niobe.o +obj-$(CONFIG_PINCTRL_SERAPH) += pinctrl-seraph.o obj-$(CONFIG_PINCTRL_CLIFFS) += pinctrl-cliffs.o obj-$(CONFIG_PINCTRL_KALAMA) += pinctrl-kalama.o obj-$(CONFIG_PINCTRL_BLAIR) += pinctrl-blair.o diff --git a/drivers/pinctrl/qcom/pinctrl-seraph.c b/drivers/pinctrl/qcom/pinctrl-seraph.c new file mode 100644 index 000000000000..3842d6c0af6d --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-seraph.c @@ -0,0 +1,2091 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define REG_BASE 0x100000 +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, wake_off, bit) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .ctl_reg = REG_BASE + REG_SIZE * id, \ + .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \ + .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + .wake_reg = REG_BASE + wake_off, \ + .wake_bit = bit, \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9, \ + msm_mux_##f10, \ + msm_mux_##f11 /* egpio mode */ \ + }, \ + .nfuncs = 12, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define QUP_I3C(qup_mode, qup_offset) \ + { \ + .mode = qup_mode, \ + .offset = REG_BASE + qup_offset, \ + } + + +static const struct pinctrl_pin_desc seraph_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "GPIO_150"), + PINCTRL_PIN(151, "GPIO_151"), + PINCTRL_PIN(152, "GPIO_152"), + PINCTRL_PIN(153, "GPIO_153"), + PINCTRL_PIN(154, "GPIO_154"), + PINCTRL_PIN(155, "GPIO_155"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); +DECLARE_MSM_GPIO_PINS(150); +DECLARE_MSM_GPIO_PINS(151); +DECLARE_MSM_GPIO_PINS(152); +DECLARE_MSM_GPIO_PINS(153); +DECLARE_MSM_GPIO_PINS(154); +DECLARE_MSM_GPIO_PINS(155); + + +enum seraph_functions { + msm_mux_gpio, + msm_mux_RESOUT_GPIO_N, + msm_mux_aoss_cti, + msm_mux_atest_char0, + msm_mux_atest_char1, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_atest_char_start, + msm_mux_atest_usb0, + msm_mux_atest_usb00, + msm_mux_atest_usb01, + msm_mux_atest_usb02, + msm_mux_atest_usb03, + msm_mux_audio_ext_mclk0, + msm_mux_audio_ext_mclk1, + msm_mux_audio_ref_clk, + msm_mux_cam_asc_mclk4, + msm_mux_cam_mclk, + msm_mux_cci01_async_in0, + msm_mux_cci01_async_in1, + msm_mux_cci01_async_in2, + msm_mux_cci01_timer0, + msm_mux_cci01_timer1, + msm_mux_cci01_timer2, + msm_mux_cci01_timer3, + msm_mux_cci01_timer4, + msm_mux_cci0_i2c, + msm_mux_cci0_i2c_scl0, + msm_mux_cci0_i2c_sda0, + msm_mux_cci1_i2c, + msm_mux_cci1_i2c_scl2, + msm_mux_cci1_i2c_sda2, + msm_mux_cci23_async_in0, + msm_mux_cci23_async_in1, + msm_mux_cci23_async_in2, + msm_mux_cci23_timer0, + msm_mux_cci23_timer1, + msm_mux_cci23_timer2, + msm_mux_cci23_timer3, + msm_mux_cci23_timer4, + msm_mux_cci2_i2c_scl4, + msm_mux_cci2_i2c_scl5, + msm_mux_cci2_i2c_sda4, + msm_mux_cci2_i2c_sda5, + msm_mux_cci3_i2c_scl6, + msm_mux_cci3_i2c_scl7, + msm_mux_cci3_i2c_sda6, + msm_mux_cci3_i2c_sda7, + msm_mux_dbg_out_clk, + msm_mux_ddr_bist_complete, + msm_mux_ddr_bist_fail, + msm_mux_ddr_bist_start, + msm_mux_ddr_bist_stop, + msm_mux_ddr_pxi0, + msm_mux_dp0_hot, + msm_mux_gcc_gp1, + msm_mux_gcc_gp2, + msm_mux_gcc_gp3, + msm_mux_i2s0_data0, + msm_mux_i2s0_data1, + msm_mux_i2s0_sck, + msm_mux_i2s0_ws, + msm_mux_ibi_i3c, + msm_mux_jitter_bist, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync0_out, + msm_mux_mdp_vsync1_out, + msm_mux_mdp_vsync2_out, + msm_mux_mdp_vsync3_out, + msm_mux_mdp_vsync_e, + msm_mux_pcie0_clk_req_n, + msm_mux_pcie1_clk_req_n, + msm_mux_phase_flag0, + msm_mux_phase_flag1, + msm_mux_phase_flag10, + msm_mux_phase_flag11, + msm_mux_phase_flag12, + msm_mux_phase_flag13, + msm_mux_phase_flag14, + msm_mux_phase_flag15, + msm_mux_phase_flag16, + msm_mux_phase_flag17, + msm_mux_phase_flag18, + msm_mux_phase_flag19, + msm_mux_phase_flag2, + msm_mux_phase_flag20, + msm_mux_phase_flag21, + msm_mux_phase_flag22, + msm_mux_phase_flag23, + msm_mux_phase_flag24, + msm_mux_phase_flag25, + msm_mux_phase_flag26, + msm_mux_phase_flag27, + msm_mux_phase_flag28, + msm_mux_phase_flag29, + msm_mux_phase_flag3, + msm_mux_phase_flag30, + msm_mux_phase_flag31, + msm_mux_phase_flag4, + msm_mux_phase_flag5, + msm_mux_phase_flag6, + msm_mux_phase_flag7, + msm_mux_phase_flag8, + msm_mux_phase_flag9, + msm_mux_pll_bist_sync, + msm_mux_pll_clk_aux, + msm_mux_prng_rosc0, + msm_mux_prng_rosc1, + msm_mux_prng_rosc2, + msm_mux_prng_rosc3, + msm_mux_qdss_cti, + msm_mux_qdss_gpio_traceclk, + msm_mux_qdss_gpio_tracectl, + msm_mux_qdss_gpio_tracedata0, + msm_mux_qdss_gpio_tracedata1, + msm_mux_qdss_gpio_tracedata10, + msm_mux_qdss_gpio_tracedata11, + msm_mux_qdss_gpio_tracedata12, + msm_mux_qdss_gpio_tracedata13, + msm_mux_qdss_gpio_tracedata14, + msm_mux_qdss_gpio_tracedata15, + msm_mux_qdss_gpio_tracedata2, + msm_mux_qdss_gpio_tracedata3, + msm_mux_qdss_gpio_tracedata4, + msm_mux_qdss_gpio_tracedata5, + msm_mux_qdss_gpio_tracedata6, + msm_mux_qdss_gpio_tracedata7, + msm_mux_qdss_gpio_tracedata8, + msm_mux_qdss_gpio_tracedata9, + msm_mux_qspi0_clk, + msm_mux_qspi0_cs0_n, + msm_mux_qspi0_cs1_n, + msm_mux_qspi0_data0, + msm_mux_qspi0_data1, + msm_mux_qspi0_data2, + msm_mux_qspi0_data3, + msm_mux_qup0_se0_l0, + msm_mux_qup0_se0_l1, + msm_mux_qup0_se0_l2, + msm_mux_qup0_se0_l3, + msm_mux_qup0_se0_l4, + msm_mux_qup0_se1_l0, + msm_mux_qup0_se1_l1, + msm_mux_qup0_se1_l2, + msm_mux_qup0_se1_l3, + msm_mux_qup0_se2_l0, + msm_mux_qup0_se2_l1, + msm_mux_qup0_se2_l2, + msm_mux_qup0_se2_l3, + msm_mux_qup0_se3_l0, + msm_mux_qup0_se3_l1, + msm_mux_qup0_se3_l2, + msm_mux_qup0_se3_l3, + msm_mux_qup0_se3_l4, + msm_mux_qup0_se4_l0, + msm_mux_qup0_se4_l1, + msm_mux_qup0_se4_l2, + msm_mux_qup0_se4_l3, + msm_mux_qup0_se4_l4, + msm_mux_qup0_se5_l0, + msm_mux_qup0_se5_l1, + msm_mux_qup0_se5_l2, + msm_mux_qup0_se5_l3, + msm_mux_qup1_se0_l0, + msm_mux_qup1_se0_l1, + msm_mux_qup1_se0_l2, + msm_mux_qup1_se0_l3, + msm_mux_qup1_se1_l0, + msm_mux_qup1_se1_l1, + msm_mux_qup1_se1_l2, + msm_mux_qup1_se1_l3, + msm_mux_qup1_se2_l0, + msm_mux_qup1_se2_l1, + msm_mux_qup1_se2_l2, + msm_mux_qup1_se2_l3, + msm_mux_qup1_se3_l0, + msm_mux_qup1_se3_l1, + msm_mux_qup1_se3_l2, + msm_mux_qup1_se3_l3, + msm_mux_qup1_se4_l0, + msm_mux_qup1_se4_l1, + msm_mux_qup1_se4_l2, + msm_mux_qup1_se4_l3, + msm_mux_qup1_se5_l0, + msm_mux_qup1_se5_l1, + msm_mux_qup1_se5_l2, + msm_mux_qup1_se5_l3, + msm_mux_sys_throttle_mira, + msm_mux_sys_throttle_mirb, + msm_mux_tb_trig_sdc1, + msm_mux_tgu_ch0_trigout, + msm_mux_tmess_prng0, + msm_mux_tmess_prng1, + msm_mux_tmess_prng2, + msm_mux_tmess_prng3, + msm_mux_tsense_pwm1, + msm_mux_tsense_pwm2, + msm_mux_tsense_pwm3, + msm_mux_usb0_hs, + msm_mux_usb0_phy, + msm_mux_vsense_trigger_mirnat, + msm_mux_wcn_sw, + msm_mux_wcn_sw_ctrl, + msm_mux_NA, +}; + +static const char *const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", + "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", + "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", + "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", + "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", + "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", + "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", + "gpio54", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", + "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", + "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", + "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", + "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", + "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", + "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107", + "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113", + "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119", + "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125", + "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", + "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137", + "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143", + "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149", + "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155", +}; + +static const char *const RESOUT_GPIO_N_groups[] = { + "gpio101", +}; + +static const char *const aoss_cti_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; + +static const char *const atest_char0_groups[] = { + "gpio65", +}; + +static const char *const atest_char1_groups[] = { + "gpio66", +}; + +static const char *const atest_char2_groups[] = { + "gpio67", +}; + +static const char *const atest_char3_groups[] = { + "gpio68", +}; + +static const char *const atest_char_start_groups[] = { + "gpio77", +}; + +static const char *const atest_usb0_groups[] = { + "gpio129", +}; + +static const char *const atest_usb00_groups[] = { + "gpio72", +}; + +static const char *const atest_usb01_groups[] = { + "gpio73", +}; + +static const char *const atest_usb02_groups[] = { + "gpio74", +}; + +static const char *const atest_usb03_groups[] = { + "gpio75", +}; + +static const char *const audio_ext_mclk0_groups[] = { + "gpio104", +}; + +static const char *const audio_ext_mclk1_groups[] = { + "gpio103", +}; + +static const char *const audio_ref_clk_groups[] = { + "gpio103", +}; + +static const char *const cam_asc_mclk4_groups[] = { + "gpio73", +}; + +static const char *const cam_mclk_groups[] = { + "gpio69", "gpio70", "gpio71", "gpio72", "gpio74", "gpio75", +}; + +static const char *const cci01_async_in0_groups[] = { + "gpio82", +}; + +static const char *const cci01_async_in1_groups[] = { + "gpio80", +}; + +static const char *const cci01_async_in2_groups[] = { + "gpio81", +}; + +static const char *const cci01_timer0_groups[] = { + "gpio77", +}; + +static const char *const cci01_timer1_groups[] = { + "gpio78", +}; + +static const char *const cci01_timer2_groups[] = { + "gpio79", +}; + +static const char *const cci01_timer3_groups[] = { + "gpio80", +}; + +static const char *const cci01_timer4_groups[] = { + "gpio81", +}; + +static const char *const cci0_i2c_groups[] = { + "gpio87", "gpio88", "gpio111", "gpio112", +}; + +static const char *const cci0_i2c_scl0_groups[] = { + "gpio86", +}; + +static const char *const cci0_i2c_sda0_groups[] = { + "gpio85", +}; + +static const char *const cci1_i2c_groups[] = { + "gpio83", "gpio84", "gpio113", "gpio114", +}; + +static const char *const cci1_i2c_scl2_groups[] = { + "gpio90", +}; + +static const char *const cci1_i2c_sda2_groups[] = { + "gpio89", +}; + +static const char *const cci23_async_in0_groups[] = { + "gpio116", +}; + +static const char *const cci23_async_in1_groups[] = { + "gpio117", +}; + +static const char *const cci23_async_in2_groups[] = { + "gpio118", +}; + +static const char *const cci23_timer0_groups[] = { + "gpio104", +}; + +static const char *const cci23_timer1_groups[] = { + "gpio105", +}; + +static const char *const cci23_timer2_groups[] = { + "gpio106", +}; + +static const char *const cci23_timer3_groups[] = { + "gpio107", +}; + +static const char *const cci23_timer4_groups[] = { + "gpio108", +}; + +static const char *const cci2_i2c_scl4_groups[] = { + "gpio92", +}; + +static const char *const cci2_i2c_scl5_groups[] = { + "gpio109", +}; + +static const char *const cci2_i2c_sda4_groups[] = { + "gpio91", +}; + +static const char *const cci2_i2c_sda5_groups[] = { + "gpio110", +}; + +static const char *const cci3_i2c_scl6_groups[] = { + "gpio79", +}; + +static const char *const cci3_i2c_scl7_groups[] = { + "gpio81", +}; + +static const char *const cci3_i2c_sda6_groups[] = { + "gpio78", +}; + +static const char *const cci3_i2c_sda7_groups[] = { + "gpio80", +}; + +static const char *const dbg_out_clk_groups[] = { + "gpio75", +}; + +static const char *const ddr_bist_complete_groups[] = { + "gpio44", +}; + +static const char *const ddr_bist_fail_groups[] = { + "gpio40", +}; + +static const char *const ddr_bist_start_groups[] = { + "gpio41", +}; + +static const char *const ddr_bist_stop_groups[] = { + "gpio45", +}; + +static const char *const ddr_pxi0_groups[] = { + "gpio54", "gpio55", +}; + +static const char *const dp0_hot_groups[] = { + "gpio45", "gpio103", +}; + +static const char *const gcc_gp1_groups[] = { + "gpio130", "gpio149", +}; + +static const char *const gcc_gp2_groups[] = { + "gpio91", "gpio131", +}; + +static const char *const gcc_gp3_groups[] = { + "gpio92", "gpio132", +}; + +static const char *const i2s0_data0_groups[] = { + "gpio106", +}; + +static const char *const i2s0_data1_groups[] = { + "gpio107", +}; + +static const char *const i2s0_sck_groups[] = { + "gpio105", +}; + +static const char *const i2s0_ws_groups[] = { + "gpio108", +}; + +static const char *const ibi_i3c_groups[] = { + "gpio0", "gpio1", "gpio4", "gpio5", "gpio20", "gpio21", +}; + +static const char *const jitter_bist_groups[] = { + "gpio73", +}; + +static const char *const mdp_vsync_groups[] = { + "gpio49", "gpio50", "gpio97", "gpio98", +}; + +static const char *const mdp_vsync0_out_groups[] = { + "gpio49", +}; + +static const char *const mdp_vsync1_out_groups[] = { + "gpio49", +}; + +static const char *const mdp_vsync2_out_groups[] = { + "gpio50", +}; + +static const char *const mdp_vsync3_out_groups[] = { + "gpio50", +}; + +static const char *const mdp_vsync_e_groups[] = { + "gpio88", +}; + +static const char *const pcie0_clk_req_n_groups[] = { + "gpio56", +}; + +static const char *const pcie1_clk_req_n_groups[] = { + "gpio59", +}; + +static const char *const phase_flag0_groups[] = { + "gpio155", +}; + +static const char *const phase_flag1_groups[] = { + "gpio141", +}; + +static const char *const phase_flag10_groups[] = { + "gpio137", +}; + +static const char *const phase_flag11_groups[] = { + "gpio136", +}; + +static const char *const phase_flag12_groups[] = { + "gpio134", +}; + +static const char *const phase_flag13_groups[] = { + "gpio125", +}; + +static const char *const phase_flag14_groups[] = { + "gpio144", +}; + +static const char *const phase_flag15_groups[] = { + "gpio142", +}; + +static const char *const phase_flag16_groups[] = { + "gpio139", +}; + +static const char *const phase_flag17_groups[] = { + "gpio138", +}; + +static const char *const phase_flag18_groups[] = { + "gpio130", +}; + +static const char *const phase_flag19_groups[] = { + "gpio150", +}; + +static const char *const phase_flag2_groups[] = { + "gpio154", +}; + +static const char *const phase_flag20_groups[] = { + "gpio151", +}; + +static const char *const phase_flag21_groups[] = { + "gpio131", +}; + +static const char *const phase_flag22_groups[] = { + "gpio124", +}; + +static const char *const phase_flag23_groups[] = { + "gpio152", +}; + +static const char *const phase_flag24_groups[] = { + "gpio120", +}; + +static const char *const phase_flag25_groups[] = { + "gpio119", +}; + +static const char *const phase_flag26_groups[] = { + "gpio117", +}; + +static const char *const phase_flag27_groups[] = { + "gpio118", +}; + +static const char *const phase_flag28_groups[] = { + "gpio153", +}; + +static const char *const phase_flag29_groups[] = { + "gpio148", +}; + +static const char *const phase_flag3_groups[] = { + "gpio147", +}; + +static const char *const phase_flag30_groups[] = { + "gpio146", +}; + +static const char *const phase_flag31_groups[] = { + "gpio145", +}; + +static const char *const phase_flag4_groups[] = { + "gpio149", +}; + +static const char *const phase_flag5_groups[] = { + "gpio129", +}; + +static const char *const phase_flag6_groups[] = { + "gpio135", +}; + +static const char *const phase_flag7_groups[] = { + "gpio133", +}; + +static const char *const phase_flag8_groups[] = { + "gpio143", +}; + +static const char *const phase_flag9_groups[] = { + "gpio140", +}; + +static const char *const pll_bist_sync_groups[] = { + "gpio104", +}; + +static const char *const pll_clk_aux_groups[] = { + "gpio97", +}; + +static const char *const prng_rosc0_groups[] = { + "gpio85", +}; + +static const char *const prng_rosc1_groups[] = { + "gpio64", +}; + +static const char *const prng_rosc2_groups[] = { + "gpio65", +}; + +static const char *const prng_rosc3_groups[] = { + "gpio66", +}; + +static const char *const qdss_cti_groups[] = { + "gpio27", "gpio31", "gpio77", "gpio78", "gpio82", "gpio83", + "gpio146", "gpio151", +}; + +static const char *const qdss_gpio_traceclk_groups[] = { + "gpio128", +}; + +static const char *const qdss_gpio_tracectl_groups[] = { + "gpio127", +}; + +static const char *const qdss_gpio_tracedata0_groups[] = { + "gpio38", +}; + +static const char *const qdss_gpio_tracedata1_groups[] = { + "gpio39", +}; + +static const char *const qdss_gpio_tracedata10_groups[] = { + "gpio130", +}; + +static const char *const qdss_gpio_tracedata11_groups[] = { + "gpio131", +}; + +static const char *const qdss_gpio_tracedata12_groups[] = { + "gpio132", +}; + +static const char *const qdss_gpio_tracedata13_groups[] = { + "gpio133", +}; + +static const char *const qdss_gpio_tracedata14_groups[] = { + "gpio129", +}; + +static const char *const qdss_gpio_tracedata15_groups[] = { + "gpio126", +}; + +static const char *const qdss_gpio_tracedata2_groups[] = { + "gpio68", +}; + +static const char *const qdss_gpio_tracedata3_groups[] = { + "gpio69", +}; + +static const char *const qdss_gpio_tracedata4_groups[] = { + "gpio62", +}; + +static const char *const qdss_gpio_tracedata5_groups[] = { + "gpio63", +}; + +static const char *const qdss_gpio_tracedata6_groups[] = { + "gpio40", +}; + +static const char *const qdss_gpio_tracedata7_groups[] = { + "gpio41", +}; + +static const char *const qdss_gpio_tracedata8_groups[] = { + "gpio42", +}; + +static const char *const qdss_gpio_tracedata9_groups[] = { + "gpio43", +}; + +static const char *const qspi0_clk_groups[] = { + "gpio35", +}; + +static const char *const qspi0_cs0_n_groups[] = { + "gpio36", +}; + +static const char *const qspi0_cs1_n_groups[] = { + "gpio38", +}; + +static const char *const qspi0_data0_groups[] = { + "gpio32", +}; + +static const char *const qspi0_data1_groups[] = { + "gpio37", +}; + +static const char *const qspi0_data2_groups[] = { + "gpio33", +}; + +static const char *const qspi0_data3_groups[] = { + "gpio34", +}; + +static const char *const qup0_se0_l0_groups[] = { + "gpio0", +}; + +static const char *const qup0_se0_l1_groups[] = { + "gpio1", +}; + +static const char *const qup0_se0_l2_groups[] = { + "gpio2", +}; + +static const char *const qup0_se0_l3_groups[] = { + "gpio3", +}; + +static const char *const qup0_se0_l4_groups[] = { + "gpio93", +}; + +static const char *const qup0_se1_l0_groups[] = { + "gpio2", +}; + +static const char *const qup0_se1_l1_groups[] = { + "gpio3", +}; + +static const char *const qup0_se1_l2_groups[] = { + "gpio61", +}; + +static const char *const qup0_se1_l3_groups[] = { + "gpio62", +}; + +static const char *const qup0_se2_l0_groups[] = { + "gpio22", +}; + +static const char *const qup0_se2_l1_groups[] = { + "gpio23", +}; + +static const char *const qup0_se2_l2_groups[] = { + "gpio12", +}; + +static const char *const qup0_se2_l3_groups[] = { + "gpio13", +}; + +static const char *const qup0_se3_l0_groups[] = { + "gpio16", +}; + +static const char *const qup0_se3_l1_groups[] = { + "gpio17", +}; + +static const char *const qup0_se3_l2_groups[] = { + "gpio18", +}; + +static const char *const qup0_se3_l3_groups[] = { + "gpio19", +}; + +static const char *const qup0_se3_l4_groups[] = { + "gpio41", +}; + +static const char *const qup0_se4_l0_groups[] = { + "gpio20", +}; + +static const char *const qup0_se4_l1_groups[] = { + "gpio21", +}; + +static const char *const qup0_se4_l2_groups[] = { + "gpio22", +}; + +static const char *const qup0_se4_l3_groups[] = { + "gpio23", +}; + +static const char *const qup0_se4_l4_groups[] = { + "gpio94", +}; + +static const char *const qup0_se5_l0_groups[] = { + "gpio95", +}; + +static const char *const qup0_se5_l1_groups[] = { + "gpio96", +}; + +static const char *const qup0_se5_l2_groups[] = { + "gpio97", +}; + +static const char *const qup0_se5_l3_groups[] = { + "gpio98", +}; + +static const char *const qup1_se0_l0_groups[] = { + "gpio4", +}; + +static const char *const qup1_se0_l1_groups[] = { + "gpio5", +}; + +static const char *const qup1_se0_l2_groups[] = { + "gpio63", +}; + +static const char *const qup1_se0_l3_groups[] = { + "gpio64", +}; + +static const char *const qup1_se1_l0_groups[] = { + "gpio24", +}; + +static const char *const qup1_se1_l1_groups[] = { + "gpio25", +}; + +static const char *const qup1_se1_l2_groups[] = { + "gpio26", +}; + +static const char *const qup1_se1_l3_groups[] = { + "gpio27", +}; + +static const char *const qup1_se2_l0_groups[] = { + "gpio8", +}; + +static const char *const qup1_se2_l1_groups[] = { + "gpio9", +}; + +static const char *const qup1_se2_l2_groups[] = { + "gpio10", +}; + +static const char *const qup1_se2_l3_groups[] = { + "gpio11", +}; + +static const char *const qup1_se3_l0_groups[] = { + "gpio109", +}; + +static const char *const qup1_se3_l1_groups[] = { + "gpio110", +}; + +static const char *const qup1_se3_l2_groups[] = { + "gpio35", +}; + +static const char *const qup1_se3_l3_groups[] = { + "gpio36", +}; + +static const char *const qup1_se4_l0_groups[] = { + "gpio4", +}; + +static const char *const qup1_se4_l1_groups[] = { + "gpio5", +}; + +static const char *const qup1_se4_l2_groups[] = { + "gpio6", +}; + +static const char *const qup1_se4_l3_groups[] = { + "gpio7", +}; + +static const char *const qup1_se5_l0_groups[] = { + "gpio14", +}; + +static const char *const qup1_se5_l1_groups[] = { + "gpio15", +}; + +static const char *const qup1_se5_l2_groups[] = { + "gpio14", +}; + +static const char *const qup1_se5_l3_groups[] = { + "gpio15", +}; + +static const char *const sys_throttle_mira_groups[] = { + "gpio95", +}; + +static const char *const sys_throttle_mirb_groups[] = { + "gpio96", +}; + +static const char *const tb_trig_sdc1_groups[] = { + "gpio88", +}; + +static const char *const tgu_ch0_trigout_groups[] = { + "gpio51", +}; + +static const char *const tmess_prng0_groups[] = { + "gpio85", +}; + +static const char *const tmess_prng1_groups[] = { + "gpio64", +}; + +static const char *const tmess_prng2_groups[] = { + "gpio65", +}; + +static const char *const tmess_prng3_groups[] = { + "gpio66", +}; + +static const char *const tsense_pwm1_groups[] = { + "gpio60", +}; + +static const char *const tsense_pwm2_groups[] = { + "gpio60", +}; + +static const char *const tsense_pwm3_groups[] = { + "gpio60", +}; + +static const char *const usb0_hs_groups[] = { + "gpio76", +}; + +static const char *const usb0_phy_groups[] = { + "gpio99", "gpio100", +}; + +static const char *const vsense_trigger_mirnat_groups[] = { + "gpio72", +}; + +static const char *const wcn_sw_groups[] = { + "gpio31", +}; + +static const char *const wcn_sw_ctrl_groups[] = { + "gpio30", +}; + + +static const struct msm_function seraph_functions[] = { + FUNCTION(gpio), + FUNCTION(RESOUT_GPIO_N), + FUNCTION(aoss_cti), + FUNCTION(atest_char0), + FUNCTION(atest_char1), + FUNCTION(atest_char2), + FUNCTION(atest_char3), + FUNCTION(atest_char_start), + FUNCTION(atest_usb0), + FUNCTION(atest_usb00), + FUNCTION(atest_usb01), + FUNCTION(atest_usb02), + FUNCTION(atest_usb03), + FUNCTION(audio_ext_mclk0), + FUNCTION(audio_ext_mclk1), + FUNCTION(audio_ref_clk), + FUNCTION(cam_asc_mclk4), + FUNCTION(cam_mclk), + FUNCTION(cci01_async_in0), + FUNCTION(cci01_async_in1), + FUNCTION(cci01_async_in2), + FUNCTION(cci01_timer0), + FUNCTION(cci01_timer1), + FUNCTION(cci01_timer2), + FUNCTION(cci01_timer3), + FUNCTION(cci01_timer4), + FUNCTION(cci0_i2c), + FUNCTION(cci0_i2c_scl0), + FUNCTION(cci0_i2c_sda0), + FUNCTION(cci1_i2c), + FUNCTION(cci1_i2c_scl2), + FUNCTION(cci1_i2c_sda2), + FUNCTION(cci23_async_in0), + FUNCTION(cci23_async_in1), + FUNCTION(cci23_async_in2), + FUNCTION(cci23_timer0), + FUNCTION(cci23_timer1), + FUNCTION(cci23_timer2), + FUNCTION(cci23_timer3), + FUNCTION(cci23_timer4), + FUNCTION(cci2_i2c_scl4), + FUNCTION(cci2_i2c_scl5), + FUNCTION(cci2_i2c_sda4), + FUNCTION(cci2_i2c_sda5), + FUNCTION(cci3_i2c_scl6), + FUNCTION(cci3_i2c_scl7), + FUNCTION(cci3_i2c_sda6), + FUNCTION(cci3_i2c_sda7), + FUNCTION(dbg_out_clk), + FUNCTION(ddr_bist_complete), + FUNCTION(ddr_bist_fail), + FUNCTION(ddr_bist_start), + FUNCTION(ddr_bist_stop), + FUNCTION(ddr_pxi0), + FUNCTION(dp0_hot), + FUNCTION(gcc_gp1), + FUNCTION(gcc_gp2), + FUNCTION(gcc_gp3), + FUNCTION(i2s0_data0), + FUNCTION(i2s0_data1), + FUNCTION(i2s0_sck), + FUNCTION(i2s0_ws), + FUNCTION(ibi_i3c), + FUNCTION(jitter_bist), + FUNCTION(mdp_vsync), + FUNCTION(mdp_vsync0_out), + FUNCTION(mdp_vsync1_out), + FUNCTION(mdp_vsync2_out), + FUNCTION(mdp_vsync3_out), + FUNCTION(mdp_vsync_e), + FUNCTION(pcie0_clk_req_n), + FUNCTION(pcie1_clk_req_n), + FUNCTION(phase_flag0), + FUNCTION(phase_flag1), + FUNCTION(phase_flag10), + FUNCTION(phase_flag11), + FUNCTION(phase_flag12), + FUNCTION(phase_flag13), + FUNCTION(phase_flag14), + FUNCTION(phase_flag15), + FUNCTION(phase_flag16), + FUNCTION(phase_flag17), + FUNCTION(phase_flag18), + FUNCTION(phase_flag19), + FUNCTION(phase_flag2), + FUNCTION(phase_flag20), + FUNCTION(phase_flag21), + FUNCTION(phase_flag22), + FUNCTION(phase_flag23), + FUNCTION(phase_flag24), + FUNCTION(phase_flag25), + FUNCTION(phase_flag26), + FUNCTION(phase_flag27), + FUNCTION(phase_flag28), + FUNCTION(phase_flag29), + FUNCTION(phase_flag3), + FUNCTION(phase_flag30), + FUNCTION(phase_flag31), + FUNCTION(phase_flag4), + FUNCTION(phase_flag5), + FUNCTION(phase_flag6), + FUNCTION(phase_flag7), + FUNCTION(phase_flag8), + FUNCTION(phase_flag9), + FUNCTION(pll_bist_sync), + FUNCTION(pll_clk_aux), + FUNCTION(prng_rosc0), + FUNCTION(prng_rosc1), + FUNCTION(prng_rosc2), + FUNCTION(prng_rosc3), + FUNCTION(qdss_cti), + FUNCTION(qdss_gpio_traceclk), + FUNCTION(qdss_gpio_tracectl), + FUNCTION(qdss_gpio_tracedata0), + FUNCTION(qdss_gpio_tracedata1), + FUNCTION(qdss_gpio_tracedata10), + FUNCTION(qdss_gpio_tracedata11), + FUNCTION(qdss_gpio_tracedata12), + FUNCTION(qdss_gpio_tracedata13), + FUNCTION(qdss_gpio_tracedata14), + FUNCTION(qdss_gpio_tracedata15), + FUNCTION(qdss_gpio_tracedata2), + FUNCTION(qdss_gpio_tracedata3), + FUNCTION(qdss_gpio_tracedata4), + FUNCTION(qdss_gpio_tracedata5), + FUNCTION(qdss_gpio_tracedata6), + FUNCTION(qdss_gpio_tracedata7), + FUNCTION(qdss_gpio_tracedata8), + FUNCTION(qdss_gpio_tracedata9), + FUNCTION(qspi0_clk), + FUNCTION(qspi0_cs0_n), + FUNCTION(qspi0_cs1_n), + FUNCTION(qspi0_data0), + FUNCTION(qspi0_data1), + FUNCTION(qspi0_data2), + FUNCTION(qspi0_data3), + FUNCTION(qup0_se0_l0), + FUNCTION(qup0_se0_l1), + FUNCTION(qup0_se0_l2), + FUNCTION(qup0_se0_l3), + FUNCTION(qup0_se0_l4), + FUNCTION(qup0_se1_l0), + FUNCTION(qup0_se1_l1), + FUNCTION(qup0_se1_l2), + FUNCTION(qup0_se1_l3), + FUNCTION(qup0_se2_l0), + FUNCTION(qup0_se2_l1), + FUNCTION(qup0_se2_l2), + FUNCTION(qup0_se2_l3), + FUNCTION(qup0_se3_l0), + FUNCTION(qup0_se3_l1), + FUNCTION(qup0_se3_l2), + FUNCTION(qup0_se3_l3), + FUNCTION(qup0_se3_l4), + FUNCTION(qup0_se4_l0), + FUNCTION(qup0_se4_l1), + FUNCTION(qup0_se4_l2), + FUNCTION(qup0_se4_l3), + FUNCTION(qup0_se4_l4), + FUNCTION(qup0_se5_l0), + FUNCTION(qup0_se5_l1), + FUNCTION(qup0_se5_l2), + FUNCTION(qup0_se5_l3), + FUNCTION(qup1_se0_l0), + FUNCTION(qup1_se0_l1), + FUNCTION(qup1_se0_l2), + FUNCTION(qup1_se0_l3), + FUNCTION(qup1_se1_l0), + FUNCTION(qup1_se1_l1), + FUNCTION(qup1_se1_l2), + FUNCTION(qup1_se1_l3), + FUNCTION(qup1_se2_l0), + FUNCTION(qup1_se2_l1), + FUNCTION(qup1_se2_l2), + FUNCTION(qup1_se2_l3), + FUNCTION(qup1_se3_l0), + FUNCTION(qup1_se3_l1), + FUNCTION(qup1_se3_l2), + FUNCTION(qup1_se3_l3), + FUNCTION(qup1_se4_l0), + FUNCTION(qup1_se4_l1), + FUNCTION(qup1_se4_l2), + FUNCTION(qup1_se4_l3), + FUNCTION(qup1_se5_l0), + FUNCTION(qup1_se5_l1), + FUNCTION(qup1_se5_l2), + FUNCTION(qup1_se5_l3), + FUNCTION(sys_throttle_mira), + FUNCTION(sys_throttle_mirb), + FUNCTION(tb_trig_sdc1), + FUNCTION(tgu_ch0_trigout), + FUNCTION(tmess_prng0), + FUNCTION(tmess_prng1), + FUNCTION(tmess_prng2), + FUNCTION(tmess_prng3), + FUNCTION(tsense_pwm1), + FUNCTION(tsense_pwm2), + FUNCTION(tsense_pwm3), + FUNCTION(usb0_hs), + FUNCTION(usb0_phy), + FUNCTION(vsense_trigger_mirnat), + FUNCTION(wcn_sw), + FUNCTION(wcn_sw_ctrl), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup seraph_groups[] = { + [0] = PINGROUP(0, qup0_se0_l0, ibi_i3c, aoss_cti, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 0), + [1] = PINGROUP(1, qup0_se0_l1, ibi_i3c, aoss_cti, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 1), + [2] = PINGROUP(2, qup0_se0_l2, qup0_se1_l0, aoss_cti, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84010, 2), + [3] = PINGROUP(3, qup0_se0_l3, qup0_se1_l1, aoss_cti, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84010, 3), + [4] = PINGROUP(4, qup1_se4_l0, qup1_se0_l0, ibi_i3c, ibi_i3c, NA, NA, + NA, NA, NA, NA, NA, 0x84000, 11), + [5] = PINGROUP(5, qup1_se4_l1, qup1_se0_l1, ibi_i3c, ibi_i3c, NA, NA, + NA, NA, NA, NA, NA, 0x84000, 12), + [6] = PINGROUP(6, qup1_se4_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84000, 13), + [7] = PINGROUP(7, qup1_se4_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84000, 14), + [8] = PINGROUP(8, qup1_se2_l0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [9] = PINGROUP(9, qup1_se2_l1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [10] = PINGROUP(10, qup1_se2_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [11] = PINGROUP(11, qup1_se2_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84000, 15), + [12] = PINGROUP(12, qup0_se2_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 4), + [13] = PINGROUP(13, qup0_se2_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 5), + [14] = PINGROUP(14, qup1_se5_l2, qup1_se5_l0, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [15] = PINGROUP(15, qup1_se5_l3, qup1_se5_l1, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84004, 0), + [16] = PINGROUP(16, qup0_se3_l0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 6), + [17] = PINGROUP(17, qup0_se3_l1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 7), + [18] = PINGROUP(18, qup0_se3_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 8), + [19] = PINGROUP(19, qup0_se3_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 9), + [20] = PINGROUP(20, qup0_se4_l0, ibi_i3c, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84010, 10), + [21] = PINGROUP(21, qup0_se4_l1, ibi_i3c, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84010, 11), + [22] = PINGROUP(22, qup0_se4_l2, qup0_se2_l0, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 12), + [23] = PINGROUP(23, qup0_se4_l3, qup0_se2_l1, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 13), + [24] = PINGROUP(24, qup1_se1_l0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84004, 1), + [25] = PINGROUP(25, qup1_se1_l1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [26] = PINGROUP(26, qup1_se1_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [27] = PINGROUP(27, qup1_se1_l3, qdss_cti, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84004, 2), + [28] = PINGROUP(28, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84004, + 3), + [29] = PINGROUP(29, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84004, + 4), + [30] = PINGROUP(30, wcn_sw_ctrl, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 14), + [31] = PINGROUP(31, wcn_sw, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84004, 5), + [32] = PINGROUP(32, qspi0_data0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84004, 6), + [33] = PINGROUP(33, qspi0_data2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84004, 7), + [34] = PINGROUP(34, qspi0_data3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84010, 15), + [35] = PINGROUP(35, qspi0_clk, qup1_se3_l2, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84014, 0), + [36] = PINGROUP(36, qspi0_cs0_n, qup1_se3_l3, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [37] = PINGROUP(37, qspi0_data1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84004, 8), + [38] = PINGROUP(38, qspi0_cs1_n, NA, qdss_gpio_tracedata0, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84004, 9), + [39] = PINGROUP(39, NA, qdss_gpio_tracedata1, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84004, 10), + [40] = PINGROUP(40, ddr_bist_fail, qdss_gpio_tracedata6, NA, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84004, 11), + [41] = PINGROUP(41, qup0_se3_l4, ddr_bist_start, NA, + qdss_gpio_tracedata7, NA, NA, NA, NA, NA, NA, NA, + 0x84014, 1), + [42] = PINGROUP(42, qdss_gpio_tracedata8, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84014, 2), + [43] = PINGROUP(43, NA, qdss_gpio_tracedata9, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84004, 12), + [44] = PINGROUP(44, ddr_bist_complete, NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84004, 13), + [45] = PINGROUP(45, dp0_hot, ddr_bist_stop, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84004, 14), + [46] = PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [47] = PINGROUP(47, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84004, + 15), + [48] = PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, + 0), + [49] = PINGROUP(49, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, NA, NA, + NA, NA, NA, NA, NA, NA, 0, -1), + [50] = PINGROUP(50, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, NA, NA, + NA, NA, NA, NA, NA, NA, 0x84008, 1), + [51] = PINGROUP(51, tgu_ch0_trigout, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84008, 2), + [52] = PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [53] = PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [54] = PINGROUP(54, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84008, 3), + [55] = PINGROUP(55, ddr_pxi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84008, 4), + [56] = PINGROUP(56, pcie0_clk_req_n, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84008, 5), + [57] = PINGROUP(57, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, + 6), + [58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, + 7), + [59] = PINGROUP(59, pcie1_clk_req_n, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84008, 8), + [60] = PINGROUP(60, tsense_pwm1, tsense_pwm2, tsense_pwm3, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84008, 9), + [61] = PINGROUP(61, qup0_se1_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84008, 10), + [62] = PINGROUP(62, qup0_se1_l3, NA, qdss_gpio_tracedata4, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [63] = PINGROUP(63, qup1_se0_l2, NA, qdss_gpio_tracedata5, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84014, 3), + [64] = PINGROUP(64, qup1_se0_l3, prng_rosc1, tmess_prng1, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84014, 4), + [65] = PINGROUP(65, prng_rosc2, tmess_prng2, NA, atest_char0, NA, NA, + NA, NA, NA, NA, NA, 0x84014, 5), + [66] = PINGROUP(66, prng_rosc3, tmess_prng3, NA, atest_char1, NA, NA, + NA, NA, NA, NA, NA, 0x84014, 6), + [67] = PINGROUP(67, NA, atest_char2, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84014, 7), + [68] = PINGROUP(68, NA, qdss_gpio_tracedata2, atest_char3, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84014, 8), + [69] = PINGROUP(69, cam_mclk, qdss_gpio_tracedata3, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84014, 9), + [70] = PINGROUP(70, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84014, 10), + [71] = PINGROUP(71, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84008, 11), + [72] = PINGROUP(72, cam_mclk, NA, vsense_trigger_mirnat, atest_usb00, + NA, NA, NA, NA, NA, NA, NA, 0x84008, 12), + [73] = PINGROUP(73, cam_asc_mclk4, jitter_bist, atest_usb01, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [74] = PINGROUP(74, cam_mclk, NA, atest_usb02, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84008, 13), + [75] = PINGROUP(75, cam_mclk, dbg_out_clk, NA, atest_usb03, NA, NA, NA, + NA, NA, NA, NA, 0x84014, 11), + [76] = PINGROUP(76, usb0_hs, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84014, 12), + [77] = PINGROUP(77, cci01_timer0, qdss_cti, NA, atest_char_start, NA, + NA, NA, NA, NA, NA, NA, 0x84014, 13), + [78] = PINGROUP(78, cci01_timer1, cci3_i2c_sda6, qdss_cti, NA, NA, NA, + NA, NA, NA, NA, NA, 0x84014, 14), + [79] = PINGROUP(79, cci01_timer2, cci3_i2c_scl6, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84014, 15), + [80] = PINGROUP(80, cci01_timer3, cci3_i2c_sda7, cci01_async_in1, NA, + NA, NA, NA, NA, NA, NA, NA, 0, -1), + [81] = PINGROUP(81, cci01_timer4, cci3_i2c_scl7, cci01_async_in2, NA, + NA, NA, NA, NA, NA, NA, NA, 0x84018, 0), + [82] = PINGROUP(82, cci01_async_in0, qdss_cti, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84018, 1), + [83] = PINGROUP(83, cci1_i2c, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [84] = PINGROUP(84, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 2), + [85] = PINGROUP(85, cci0_i2c_sda0, prng_rosc0, tmess_prng0, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [86] = PINGROUP(86, cci0_i2c_scl0, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84018, 3), + [87] = PINGROUP(87, cci0_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 4), + [88] = PINGROUP(88, cci0_i2c, mdp_vsync_e, tb_trig_sdc1, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84018, 5), + [89] = PINGROUP(89, cci1_i2c_sda2, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [90] = PINGROUP(90, cci1_i2c_scl2, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [91] = PINGROUP(91, cci2_i2c_sda4, gcc_gp2, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84018, 6), + [92] = PINGROUP(92, cci2_i2c_scl4, gcc_gp3, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84018, 7), + [93] = PINGROUP(93, qup0_se0_l4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [94] = PINGROUP(94, qup0_se4_l4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [95] = PINGROUP(95, qup0_se5_l0, sys_throttle_mira, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84018, 8), + [96] = PINGROUP(96, qup0_se5_l1, sys_throttle_mirb, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84018, 9), + [97] = PINGROUP(97, qup0_se5_l2, mdp_vsync, pll_clk_aux, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84018, 10), + [98] = PINGROUP(98, qup0_se5_l3, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84018, 11), + [99] = PINGROUP(99, usb0_phy, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [100] = PINGROUP(100, usb0_phy, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 12), + [101] = PINGROUP(101, RESOUT_GPIO_N, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [102] = PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 13), + [103] = PINGROUP(103, dp0_hot, audio_ext_mclk1, audio_ref_clk, NA, NA, + NA, NA, NA, NA, NA, NA, 0x84018, 14), + [104] = PINGROUP(104, audio_ext_mclk0, cci23_timer0, pll_bist_sync, NA, + NA, NA, NA, NA, NA, NA, NA, 0x84018, 15), + [105] = PINGROUP(105, i2s0_sck, cci23_timer1, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [106] = PINGROUP(106, i2s0_data0, cci23_timer2, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x8401C, 0), + [107] = PINGROUP(107, i2s0_data1, cci23_timer3, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x8401C, 1), + [108] = PINGROUP(108, i2s0_ws, cci23_timer4, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [109] = PINGROUP(109, qup1_se3_l0, cci2_i2c_scl5, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0, -1), + [110] = PINGROUP(110, qup1_se3_l1, cci2_i2c_sda5, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0, -1), + [111] = PINGROUP(111, cci0_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [112] = PINGROUP(112, cci0_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84008, 14), + [113] = PINGROUP(113, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [114] = PINGROUP(114, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [115] = PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [116] = PINGROUP(116, cci23_async_in0, NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [117] = PINGROUP(117, cci23_async_in1, phase_flag26, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0, -1), + [118] = PINGROUP(118, cci23_async_in2, phase_flag27, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x8401C, 2), + [119] = PINGROUP(119, phase_flag25, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84008, 15), + [120] = PINGROUP(120, phase_flag24, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x8400C, 0), + [121] = PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [122] = PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [123] = PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [124] = PINGROUP(124, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [125] = PINGROUP(125, phase_flag13, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x8400C, 1), + [126] = PINGROUP(126, qdss_gpio_tracedata15, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [127] = PINGROUP(127, qdss_gpio_tracectl, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [128] = PINGROUP(128, qdss_gpio_traceclk, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [129] = PINGROUP(129, phase_flag5, qdss_gpio_tracedata14, atest_usb0, + NA, NA, NA, NA, NA, NA, NA, NA, 0x8400C, 2), + [130] = PINGROUP(130, gcc_gp1, phase_flag18, qdss_gpio_tracedata10, NA, + NA, NA, NA, NA, NA, NA, NA, 0, -1), + [131] = PINGROUP(131, gcc_gp2, phase_flag21, qdss_gpio_tracedata11, NA, + NA, NA, NA, NA, NA, NA, NA, 0x8401C, 3), + [132] = PINGROUP(132, gcc_gp3, qdss_gpio_tracedata12, NA, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [133] = PINGROUP(133, phase_flag7, qdss_gpio_tracedata13, NA, NA, NA, + NA, NA, NA, NA, NA, NA, 0, -1), + [134] = PINGROUP(134, phase_flag12, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [135] = PINGROUP(135, phase_flag6, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [136] = PINGROUP(136, phase_flag11, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [137] = PINGROUP(137, phase_flag10, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [138] = PINGROUP(138, phase_flag17, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [139] = PINGROUP(139, phase_flag16, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [140] = PINGROUP(140, phase_flag9, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [141] = PINGROUP(141, phase_flag1, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [142] = PINGROUP(142, phase_flag15, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [143] = PINGROUP(143, phase_flag8, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [144] = PINGROUP(144, phase_flag14, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [145] = PINGROUP(145, phase_flag31, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [146] = PINGROUP(146, qdss_cti, phase_flag30, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [147] = PINGROUP(147, phase_flag3, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [148] = PINGROUP(148, phase_flag29, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [149] = PINGROUP(149, gcc_gp1, phase_flag4, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [150] = PINGROUP(150, phase_flag19, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [151] = PINGROUP(151, qdss_cti, phase_flag20, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [152] = PINGROUP(152, phase_flag23, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [153] = PINGROUP(153, phase_flag28, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [154] = PINGROUP(154, phase_flag2, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [155] = PINGROUP(155, phase_flag0, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), +}; + +static struct pinctrl_qup seraph_qup_regs[] = { +}; + +static const struct msm_gpio_wakeirq_map seraph_pdc_map[] = { + { 0, 70 }, { 1, 147 }, { 2, 154 }, { 3, 94 }, { 4, 101 }, { 5, 86 }, + { 6, 159 }, { 7, 124 }, { 11, 75 }, { 12, 151 }, { 13, 80 }, { 15, 84 }, + { 16, 152 }, { 17, 153 }, { 18, 142 }, { 19, 88 }, { 20, 89 }, { 21, 143 }, + { 22, 71 }, { 23, 90 }, { 24, 144 }, { 27, 91 }, { 28, 92 }, { 29, 93 }, + { 30, 95 }, { 31, 96 }, { 32, 97 }, { 33, 155 }, { 34, 156 }, { 35, 98 }, + { 37, 157 }, { 38, 81 }, { 39, 100 }, { 40, 82 }, { 41, 158 }, { 42, 83 }, + { 43, 102 }, { 44, 85 }, { 45, 103 }, { 47, 72 }, { 48, 146 }, { 50, 161 }, + { 51, 107 }, { 54, 108 }, { 55, 109 }, { 56, 110 }, { 57, 111 }, { 58, 112 }, + { 59, 113 }, { 60, 114 }, { 61, 115 }, { 63, 117 }, { 64, 118 }, { 65, 119 }, + { 66, 120 }, { 67, 121 }, { 68, 122 }, { 69, 123 }, { 70, 87 }, { 71, 104 }, + { 72, 105 }, { 74, 106 }, { 75, 125 }, { 76, 126 }, { 77, 162 }, { 78, 163 }, + { 79, 128 }, { 81, 130 }, { 82, 127 }, { 84, 131 }, { 86, 164 }, { 87, 134 }, + { 88, 165 }, { 91, 132 }, { 92, 133 }, { 95, 136 }, { 96, 149 }, { 97, 99 }, + { 98, 139 }, { 100, 148 }, { 102, 116 }, { 103, 73 }, { 104, 129 }, { 106, 74 }, + { 107, 145 }, { 112, 76 }, { 118, 135 }, { 119, 150 }, { 120, 77 }, { 125, 78 }, + { 129, 79 }, { 131, 140 }, +}; + +static const struct msm_pinctrl_soc_data seraph_pinctrl = { + .pins = seraph_pins, + .npins = ARRAY_SIZE(seraph_pins), + .functions = seraph_functions, + .nfunctions = ARRAY_SIZE(seraph_functions), + .groups = seraph_groups, + .ngroups = ARRAY_SIZE(seraph_groups), + .ngpios = 156, + .qup_regs = seraph_qup_regs, + .nqup_regs = ARRAY_SIZE(seraph_qup_regs), + .wakeirq_map = seraph_pdc_map, + .nwakeirq_map = ARRAY_SIZE(seraph_pdc_map), + .egpio_func = 11, +}; + +static const struct of_device_id seraph_pinctrl_of_match[] = { + { .compatible = "qcom,seraph-pinctrl", .data = &seraph_pinctrl}, + {}, +}; + +static int seraph_pinctrl_probe(struct platform_device *pdev) +{ + const struct msm_pinctrl_soc_data *pinctrl_data; + struct device *dev = &pdev->dev; + + pinctrl_data = of_device_get_match_data(dev); + if (!pinctrl_data) + return -EINVAL; + + return msm_pinctrl_probe(pdev, pinctrl_data); +} + +static struct platform_driver seraph_pinctrl_driver = { + .driver = { + .name = "seraph-pinctrl", + .of_match_table = seraph_pinctrl_of_match, + }, + .probe = seraph_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init seraph_pinctrl_init(void) +{ + return platform_driver_register(&seraph_pinctrl_driver); +} +arch_initcall(seraph_pinctrl_init); + +static void __exit seraph_pinctrl_exit(void) +{ + platform_driver_unregister(&seraph_pinctrl_driver); +} +module_exit(seraph_pinctrl_exit); + +MODULE_DESCRIPTION("QTI seraph pinctrl driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, seraph_pinctrl_of_match); From 46cd2cb9810d7fad43c70aaaf8c74979004c8bc5 Mon Sep 17 00:00:00 2001 From: Chintan Kothari Date: Tue, 23 Jul 2024 17:35:59 +0530 Subject: [PATCH 075/117] modules.list.msm.neo-la: Add tcsrcc and gdsc modules to first stage Add tcsrcc and gdsc-regulator modules to modules list on NEO platform, to enable it to load during first stage init. Change-Id: I8feb175c570abbc3e0a458615f2d1f08e0444f71 Signed-off-by: Chintan Kothari --- modules.list.msm.neo-la | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules.list.msm.neo-la b/modules.list.msm.neo-la index 2035ea88956e..64c3ab7c6381 100644 --- a/modules.list.msm.neo-la +++ b/modules.list.msm.neo-la @@ -76,6 +76,7 @@ dispcc-neo.ko secure_buffer.ko qcom-cpufreq-hw.ko sched-walt-debug.ko +tcsrcc-neo.ko qcom-i2c-pmic.ko qcom-spmi-pmic.ko qcom-reboot-reason.ko @@ -93,3 +94,4 @@ qcom_pm8008-regulator.ko rpmh-regulator.ko debug-regulator.ko qcom-pdc.ko +gdsc-regulator.ko From e8affcf0dbf2f00a369cbd25b5a6b9202bf4d9b4 Mon Sep 17 00:00:00 2001 From: Chintan Kothari Date: Wed, 24 Jul 2024 13:11:16 +0530 Subject: [PATCH 076/117] defconfig: Enable interconnect driver for NEO Enable the interconnect driver so that consumers are able to obtain their path handles properly. Change-Id: Iaddfe9f00152d68a8ed2ee655df487ded0c2a5c0 Signed-off-by: Chintan Kothari --- arch/arm64/configs/vendor/neo_la_GKI.config | 5 +++++ neo_la.bzl | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index 9195f03d38c3..feba6d9a6d62 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -25,6 +25,11 @@ CONFIG_GUNYAH_DRIVERS=y # CONFIG_HVC_GUNYAH is not set CONFIG_HWSPINLOCK_QCOM=m CONFIG_INIT_ON_FREE_DEFAULT_ON=y +CONFIG_INTERCONNECT_QCOM_BCM_VOTER=m +CONFIG_INTERCONNECT_QCOM_DEBUG=m +CONFIG_INTERCONNECT_QCOM_NEO=m +CONFIG_INTERCONNECT_QCOM_QOS=m +CONFIG_INTERCONNECT_QCOM_RPMH=m CONFIG_IOMMU_IO_PGTABLE_FAST=y CONFIG_LOCALVERSION="-gki" CONFIG_MFD_I2C_PMIC=m diff --git a/neo_la.bzl b/neo_la.bzl index aa014709e528..8706c23be8e6 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -26,6 +26,11 @@ def define_neo_la(): "drivers/edac/qcom_edac.ko", "drivers/firmware/qcom-scm.ko", "drivers/hwspinlock/qcom_hwspinlock.ko", + "drivers/interconnect/qcom/icc-bcm-voter.ko", + "drivers/interconnect/qcom/icc-debug.ko", + "drivers/interconnect/qcom/icc-rpmh.ko", + "drivers/interconnect/qcom/qnoc-neo.ko", + "drivers/interconnect/qcom/qnoc-qos.ko", "drivers/iommu/arm/arm-smmu/arm_smmu.ko", "drivers/iommu/iommu-logger.ko", "drivers/iommu/msm_dma_iommu_mapping.ko", From 9afed5bf20a7eae7c29456f82abdc12aa645dcf9 Mon Sep 17 00:00:00 2001 From: Suraj Jaiswal Date: Thu, 22 Aug 2024 17:35:38 +0530 Subject: [PATCH 077/117] kernel: msm: Adding PPS2/PPS3 Add PPS2/PPS3 IRQ support. Change-Id: I78b89a64725ce54582c0df3d99c8c71c2fb1f350 Signed-off-by: Suraj Jaiswal --- drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 7fd3fe6a83d6..eeaf53173b24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -2377,7 +2377,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) } else { ETHQOSERR("Phy interrupt configuration failed"); } - if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG) { + if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG || ethqos->emac_ver == EMAC_HW_v2_3_1) { ethqos_pps_irq_config(ethqos); create_pps_interrupt_device_node(ðqos->avb_class_a_dev_t, ðqos->avb_class_a_cdev, From d40c483b77f979804e71f1adab6528b33440d7f6 Mon Sep 17 00:00:00 2001 From: Jishnu Prakash Date: Mon, 19 Aug 2024 18:30:31 +0530 Subject: [PATCH 078/117] regulator: ap72200: avoid keeping EN pin always high Keeping enable GPIO always high leads to higher power consumption, even in RBSC, when the regulator is not in use. Toggle GPIO to high state only when regulator is enabled and toggle it low after regulator disable to avoid power consumption when the regulator is not in use. Change-Id: Ic2f9c0ef350051776f094e55e6fb4967b0d45248 Signed-off-by: Jishnu Prakash --- drivers/regulator/ap72200-regulator.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/regulator/ap72200-regulator.c b/drivers/regulator/ap72200-regulator.c index ef7cee84d802..59add353bea0 100644 --- a/drivers/regulator/ap72200-regulator.c +++ b/drivers/regulator/ap72200-regulator.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. */ +/* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "ap72200-reg: %s: " fmt, __func__ @@ -44,6 +44,8 @@ static int ap72200_vreg_enable(struct regulator_dev *rdev) struct ap72200_vreg *vreg = rdev_get_drvdata(rdev); int rc, val; + gpiod_set_value_cansleep(vreg->ena_gpiod, 1); + val = DIV_ROUND_UP(vreg->rdesc.fixed_uV - AP72200_MIN_UV, AP72200_STEP_UV); /* Set the voltage */ @@ -82,6 +84,8 @@ static int ap72200_vreg_disable(struct regulator_dev *rdev) vreg->is_enabled = false; + gpiod_set_value_cansleep(vreg->ena_gpiod, 0); + return rc; } @@ -157,9 +161,6 @@ static int ap72200_probe(struct i2c_client *client, return PTR_ERR(vreg->ena_gpiod); } - /* Keep the EN pin of this regulator always high */ - gpiod_set_value_cansleep(vreg->ena_gpiod, 1); - vreg->rdev = devm_regulator_register(vreg->dev, &vreg->rdesc, ®_config); if (IS_ERR(vreg->rdev)) { ret = PTR_ERR(vreg->rdev); From 6a4cd79d0bb451127bcd2769afa31cf6632ba031 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Thu, 1 Aug 2024 05:21:52 -0700 Subject: [PATCH 079/117] q2spi-msm-geni: Add delay to the next q2spi transfer to soc after sleep command When Slave receives sleep command from host it requires 1msec to handle the sleep due to HW limitation. Host should wait some time >1ms after sending sleep command and before initiating next command to slave. Added changes to check for slave_sleep_lock and wait for 2msec to initiate transfers from host post sleep command. Change-Id: Id333e2acecdb0ab169565f343b27d61952fb9471 Signed-off-by: Chandana Kishori Chiluveru --- drivers/spi/q2spi-msm-geni.c | 13 +++++++++++++ drivers/spi/q2spi-msm.h | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index 90138f3154dc..ab1a5ba52060 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -2200,6 +2200,7 @@ static int q2spi_transfer_check(struct q2spi_geni *q2spi, struct q2spi_request * */ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t len, loff_t *f_pos) { + int retries = Q2SPI_SLAVE_SLEEP_WAIT_TIME; struct q2spi_geni *q2spi; struct q2spi_request q2spi_req; struct q2spi_packet *cur_q2spi_pkt; @@ -2218,6 +2219,14 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t if (ret) goto err; + while (retries--) { + /* add 2msec delay for slave to process the sleep packet */ + if (mutex_is_locked(&q2spi->slave_sleep_lock)) + usleep_range(100, 150); + else + break; + } + if (q2spi_req.cmd == HRF_WRITE) { q2spi_req.addr = Q2SPI_HRF_PUSH_ADDRESS; q2spi_req.sync = 1; @@ -4389,6 +4398,7 @@ static int q2spi_geni_probe(struct platform_device *pdev) INIT_LIST_HEAD(&q2spi->tx_queue_list); mutex_init(&q2spi->gsi_lock); mutex_init(&q2spi->port_lock); + mutex_init(&q2spi->slave_sleep_lock); spin_lock_init(&q2spi->txn_lock); mutex_init(&q2spi->queue_lock); mutex_init(&q2spi->send_msgs_lock); @@ -4648,12 +4658,14 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) } Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid); q2spi_pkt->is_client_sleep_pkt = true; + mutex_lock(&q2spi->slave_sleep_lock); ret = __q2spi_transfer(q2spi, q2spi_req, q2spi_pkt, 0); if (ret) { Q2SPI_DEBUG(q2spi, "%s __q2spi_transfer q2spi_pkt:%p ret%d\n", __func__, q2spi_pkt, ret); if (q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s Err Port in closed state, return\n", __func__); + mutex_unlock(&q2spi->slave_sleep_lock); return -ENOENT; } } @@ -4664,6 +4676,7 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) atomic_set(&q2spi->slave_in_sleep, 1); /* add 2msec delay for slave to process the sleep packet */ usleep_range(2000, 3000); + mutex_unlock(&q2spi->slave_sleep_lock); Q2SPI_DEBUG(q2spi, "%s: PID=%d End slave_in_sleep:%d\n", __func__, current->pid, atomic_read(&q2spi->slave_in_sleep)); err: diff --git a/drivers/spi/q2spi-msm.h b/drivers/spi/q2spi-msm.h index 55cf9529a538..2745e7297073 100644 --- a/drivers/spi/q2spi-msm.h +++ b/drivers/spi/q2spi-msm.h @@ -184,6 +184,7 @@ #define Q2SPI_MAX_DEV 2 #define Q2SPI_DEV_NAME_MAX_LEN 64 +#define Q2SPI_SLAVE_SLEEP_WAIT_TIME (20) #define Q2SPI_RESP_BUF_RETRIES (100) #define Q2SPI_INFO(q2spi_ptr, x...) do { \ @@ -516,6 +517,7 @@ struct q2spi_dma_transfer { * @q2spi_cr_txn_err: reflects Q2SPI_CR_TRANSACTION_ERROR in CR body * @q2spi_sleep_cmd_enable: reflects start sending the sleep command to slave * @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header + * @slave_sleep_lock: lock to wait for 3msec after sleep packet before initiating next transfer. */ struct q2spi_geni { struct device *wrapper_dev; @@ -622,6 +624,8 @@ struct q2spi_geni { bool q2spi_cr_txn_err; bool q2spi_sleep_cmd_enable; bool q2spi_cr_hdr_err; + /* lock to protect sleep cmd to slave and next transfer */ + struct mutex slave_sleep_lock; }; /** From b8fcfd2679b63bb977a47a43c5f80c9312b0f2c9 Mon Sep 17 00:00:00 2001 From: Kamati Srinivas Date: Tue, 6 Aug 2024 11:14:19 +0530 Subject: [PATCH 080/117] Revert "remoteproc: pas: Check running ack for D0 transition" This reverts commit a8ce5c255285d7e0e1753ee9803ed782b580df65. TCSR register read can be solely relied upon for D0 confirmation. Change-Id: I9cb11a13313d2a9964efdc02a77b698b62268070 Signed-off-by: Kamati Srinivas --- drivers/remoteproc/qcom_q6v5.h | 2 -- drivers/remoteproc/qcom_q6v5_pas.c | 37 ------------------------------ 2 files changed, 39 deletions(-) diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h index c072ab8bb792..709f8fc8256f 100644 --- a/drivers/remoteproc/qcom_q6v5.h +++ b/drivers/remoteproc/qcom_q6v5.h @@ -33,7 +33,6 @@ struct qcom_q6v5 { int ready_irq; int handover_irq; int stop_irq; - int active_state_ack_irq; struct rproc_subdev *ssr_subdev; @@ -43,7 +42,6 @@ struct qcom_q6v5 { struct completion start_done; struct completion stop_done; - struct completion running_ack; int crash_reason; diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 563e4f993137..7e6733be98e4 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -814,15 +814,6 @@ static int adsp_start(struct rproc *rproc) return ret; } -static irqreturn_t soccp_running_ack(int irq, void *data) -{ - struct qcom_q6v5 *q6v5 = data; - - complete(&q6v5->running_ack); - - return IRQ_HANDLED; -} - /** * rproc_config_check() - Check back the config register * @state: new state of the rproc @@ -971,8 +962,6 @@ int rproc_set_state(struct rproc *rproc, bool state) goto soccp_out; } - reinit_completion(&(adsp->q6v5.running_ack)); - ret = qcom_smem_state_update_bits(adsp->wake_state, SOCCP_STATE_MASK, BIT(adsp->wake_bit)); @@ -989,15 +978,6 @@ int rproc_set_state(struct rproc *rproc, bool state) goto soccp_out; } - ret = wait_for_completion_timeout(&adsp->q6v5.running_ack, msecs_to_jiffies(5)); - if (!ret) { - dev_err(adsp->dev, "%s requested D3->D0: failed to get wake ack\n", - current->comm); - ret = -ETIMEDOUT; - goto soccp_out; - } else - ret = 0; - adsp->current_users = 1; } else { if (users > 1) { @@ -1736,26 +1716,9 @@ static int adsp_probe(struct platform_device *pdev) goto detach_proxy_pds; } - adsp->q6v5.active_state_ack_irq = platform_get_irq_byname(pdev, "wake-ack"); - if (adsp->q6v5.active_state_ack_irq < 0) { - dev_err(&pdev->dev, "failed to acquire readyack irq\n"); - goto detach_proxy_pds; - } - - ret = devm_request_threaded_irq(&pdev->dev, adsp->q6v5.active_state_ack_irq, - NULL, soccp_running_ack, - IRQF_TRIGGER_RISING | IRQF_ONESHOT, - "qcom_q6v5_pas", &adsp->q6v5); - if (ret) { - dev_err(&pdev->dev, "failed to acquire ready ack IRQ\n"); - goto detach_proxy_pds; - } - mutex_init(&adsp->adsp_lock); - init_completion(&(adsp->q6v5.running_ack)); adsp->current_users = 0; - } qcom_q6v5_register_ssr_subdev(&adsp->q6v5, &adsp->ssr_subdev.subdev); From 1bb038ef633d5de34a9d4711a1a6f13144a2fee6 Mon Sep 17 00:00:00 2001 From: Gokul krishna Krishnakumar Date: Tue, 25 Jun 2024 13:06:07 -0700 Subject: [PATCH 081/117] remoteproc: pas: Clear master kernel if D request fails Clear the master kernel bit if the SOCCP does not honour the APPS request for a state change. Change-Id: I5a6747973ed87e4c7f0d9074ef8da56925d2a927 Signed-off-by: Gokul krishna Krishnakumar Signed-off-by: Kamati Srinivas --- drivers/remoteproc/qcom_q6v5_pas.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 7e6733be98e4..a755f1ed955c 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -972,6 +972,12 @@ int rproc_set_state(struct rproc *rproc, bool state) ret = rproc_config_check(adsp, SOCCP_D0); if (ret) { + ret = qcom_smem_state_update_bits(adsp->wake_state, + SOCCP_STATE_MASK, + !BIT(adsp->wake_bit)); + if (ret) + dev_err(adsp->dev, "failed to clear smem bits after a failed D0 request\n"); + dsb(sy); dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update tcsr val=%d\n", current->comm, readl(adsp->config_addr)); @@ -995,6 +1001,12 @@ int rproc_set_state(struct rproc *rproc, bool state) ret = rproc_config_check(adsp, SOCCP_D3); if (ret) { + ret = qcom_smem_state_update_bits(adsp->sleep_state, + SOCCP_STATE_MASK, + !BIT(adsp->sleep_bit)); + if (ret) + dev_err(adsp->dev, "failed to clear smem bits after a failed D3 request\n"); + dsb(sy); dev_err(adsp->dev, "%s requested D0->D3 failed: TCSR value:%d\n", current->comm, readl(adsp->config_addr)); From a7975a4fd92a0aa4a3d40d7cc56fe745f3070523 Mon Sep 17 00:00:00 2001 From: Kamati Srinivas Date: Tue, 6 Aug 2024 13:35:16 +0530 Subject: [PATCH 082/117] remoteproc: qcom: pas: Use SOCCP_SPARE register to check D0 state TCSR_SOCCP_SLEEP_STATUS is updated when SOCCP starts wakeup process and is not done processing the sleep request, Check the D0 state transition by polling on SOCCP_SPARE register. Change-Id: I7a00ec58f99ca748857e93ce4aab5a8dcc126faf Signed-off-by: Gokul krishna Krishnakumar Signed-off-by: Kamati Srinivas --- drivers/remoteproc/qcom_q6v5_pas.c | 96 +++++++++++++++++++----------- 1 file changed, 61 insertions(+), 35 deletions(-) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index a755f1ed955c..a117ce953b82 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -61,6 +61,7 @@ static bool recovery_set_cb; #define SOCCP_SLEEP_US 100 #define SOCCP_TIMEOUT_US 10000 #define SOCCP_STATE_MASK 0x600 +#define SPARE_REG_SOCCP_D0 0x1 #define SOCCP_D0 0x2 #define SOCCP_D1 0x4 #define SOCCP_D3 0x8 @@ -164,7 +165,8 @@ struct qcom_adsp { unsigned int wake_bit; unsigned int sleep_bit; int current_users; - void *config_addr; + void *tcsr_addr; + void *spare_reg_addr; bool check_status; }; @@ -818,35 +820,33 @@ static int adsp_start(struct rproc *rproc) * rproc_config_check() - Check back the config register * @state: new state of the rproc * - * Call this function after there has been a request to change of - * state of rproc. This function takes in the new state to which the - * rproc has transitioned, and poll the WFI status register to check - * if the state request change has been accepted successfully by the - * rproc. The poll is timed out after 10 milliseconds. + * Polled read on a register till with a 5ms timeout and 100-200Us interval. + * Returns immediately if the expected value is read back from the addr. * - * Return: 0 if the WFI status register reflects the requested state. + * state: new state of the rproc + * + * addr: Address to poll on + * + * return: 0 if the expected value is read back from the address + * -ETIMEDOUT is the value was not read in 5ms */ -static int rproc_config_check(struct qcom_adsp *adsp, u32 state) +static int rproc_config_check(struct qcom_adsp *adsp, u32 state, void *addr) { unsigned int retry_num = 50; u32 val; do { usleep_range(SOCCP_SLEEP_US, SOCCP_SLEEP_US + 100); - /* Making sure the mem mapped io is read correctly*/ - dsb(sy); - val = readl(adsp->config_addr); - if ((state == SOCCP_D0) && (val == SOCCP_D1)) - return 0; - } while (val != state && --retry_num); + val = readl(addr); + } while (!(val && state) && --retry_num); - return (val == state) ? 0 : -ETIMEDOUT; + return (val & state) ? 0 : -ETIMEDOUT; } -static int rproc_config_check_atomic(struct qcom_adsp *adsp, u32 state) +static int rproc_config_check_atomic(struct qcom_adsp *adsp, u32 state, void *addr) { u32 val; - return readx_poll_timeout_atomic(readl, adsp->config_addr, val, + return readx_poll_timeout_atomic(readl, addr, val, val == state, SOCCP_SLEEP_US, SOCCP_TIMEOUT_US); } @@ -862,31 +862,49 @@ static int rproc_find_status_register(struct qcom_adsp *adsp) { struct device_node *tcsr; struct device_node *np = adsp->dev->of_node; - u32 offset; + struct resource res; + u32 offset, addr; int ret; void *tcsr_base; - tcsr = of_parse_phandle(np, "soccp-config", 0); + tcsr = of_parse_phandle(np, "soccp-tcsr", 0); if (!tcsr) { dev_err(adsp->dev, "Unable to find the soccp config register\n"); return -EINVAL; } - tcsr_base = of_iomap(tcsr, 0); + ret = of_address_to_resource(tcsr, 0, &res); of_node_put(tcsr); + if (ret) { + dev_err(adsp->dev, "Unable to find the tcsr base addr\n"); + return ret; + } + + tcsr_base = ioremap_wc(res.start, resource_size(&res)); if (!tcsr_base) { dev_err(adsp->dev, "Unable to find the tcsr base addr\n"); return -ENOMEM; } - ret = of_property_read_u32_index(np, "soccp-config", 1, &offset); + ret = of_property_read_u32_index(np, "soccp-tcsr", 1, &offset); if (ret < 0) { - dev_err(adsp->dev, "Unable to find the tcsr offset addr\n"); + dev_err(adsp->dev, "Unable to find the tcsr config offset addr\n"); iounmap(tcsr_base); return ret; } + adsp->tcsr_addr = tcsr_base + offset; - adsp->config_addr = tcsr_base + offset; + ret = of_property_read_u32(np, "soccp-spare", &addr); + if (!addr) { + dev_err(adsp->dev, "Unable to find the running config register\n"); + return -EINVAL; + } + + adsp->spare_reg_addr = ioremap_wc(addr, 4); + if (!adsp->spare_reg_addr) { + dev_err(adsp->dev, "Unable to find the tcsr base addr\n"); + return -ENOMEM; + } return 0; } @@ -970,7 +988,14 @@ int rproc_set_state(struct rproc *rproc, bool state) goto soccp_out; } - ret = rproc_config_check(adsp, SOCCP_D0); + ret = rproc_config_check(adsp, SOCCP_D0 | SOCCP_D1, adsp->tcsr_addr); + if (ret) { + dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update tcsr val=%d\n", + current->comm, readl(adsp->tcsr_addr)); + goto soccp_out; + } + + ret = rproc_config_check(adsp, SPARE_REG_SOCCP_D0, adsp->spare_reg_addr); if (ret) { ret = qcom_smem_state_update_bits(adsp->wake_state, SOCCP_STATE_MASK, @@ -978,9 +1003,8 @@ int rproc_set_state(struct rproc *rproc, bool state) if (ret) dev_err(adsp->dev, "failed to clear smem bits after a failed D0 request\n"); - dsb(sy); - dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update tcsr val=%d\n", - current->comm, readl(adsp->config_addr)); + dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update spare reg val=%d\n", + current->comm, readl(adsp->spare_reg_addr)); goto soccp_out; } @@ -999,7 +1023,7 @@ int rproc_set_state(struct rproc *rproc, bool state) goto soccp_out; } - ret = rproc_config_check(adsp, SOCCP_D3); + ret = rproc_config_check(adsp, SOCCP_D3, adsp->tcsr_addr); if (ret) { ret = qcom_smem_state_update_bits(adsp->sleep_state, SOCCP_STATE_MASK, @@ -1007,9 +1031,8 @@ int rproc_set_state(struct rproc *rproc, bool state) if (ret) dev_err(adsp->dev, "failed to clear smem bits after a failed D3 request\n"); - dsb(sy); dev_err(adsp->dev, "%s requested D0->D3 failed: TCSR value:%d\n", - current->comm, readl(adsp->config_addr)); + current->comm, readl(adsp->tcsr_addr)); goto soccp_out; } disable_regulators(adsp); @@ -1054,7 +1077,11 @@ static int rproc_panic_handler(struct notifier_block *this, dev_err(adsp->dev, "failed to update smem bits for D3 to D0\n"); goto done; } - ret = rproc_config_check_atomic(adsp, SOCCP_D0); + ret = rproc_config_check_atomic(adsp, SOCCP_D0, adsp->tcsr_addr); + if (ret) + dev_err(adsp->dev, "failed to change to D0\n"); + + ret = rproc_config_check_atomic(adsp, SPARE_REG_SOCCP_D0, adsp->spare_reg_addr); if (ret) dev_err(adsp->dev, "failed to change to D0\n"); done: @@ -1067,14 +1094,13 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5) int ret; if (adsp->check_status) { - ret = rproc_config_check(adsp, SOCCP_D3); - dsb(sy); + ret = rproc_config_check(adsp, SOCCP_D3, adsp->tcsr_addr); if (ret) dev_err(adsp->dev, "state not changed in handover TCSR val = %d\n", - readl(adsp->config_addr)); + readl(adsp->tcsr_addr)); else dev_info(adsp->dev, "state changed in handover for soccp! TCSR val = %d\n", - readl(adsp->config_addr)); + readl(adsp->tcsr_addr)); } disable_regulators(adsp); clk_disable_unprepare(adsp->aggre2_clk); From f7ab05e15e2ff884df7089a51a497b5209268725 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Tue, 6 Aug 2024 10:40:25 +0530 Subject: [PATCH 083/117] pwm: qcom: Add reset support functionality Add pwm reset support so that for each frame, period and duty_cycle can be changed dynamically. While at it, also update the pdm_pwm_free API with PWM disable functionality. Change-Id: I64ecd4a8cec948d56cb89e7a5ae4b30e70cb9f3e Signed-off-by: Kalpak Kawadkar --- drivers/pwm/pwm-qcom.c | 77 +++++++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 23 deletions(-) diff --git a/drivers/pwm/pwm-qcom.c b/drivers/pwm/pwm-qcom.c index 06f7c57eda5b..dc8ac779361b 100644 --- a/drivers/pwm/pwm-qcom.c +++ b/drivers/pwm/pwm-qcom.c @@ -29,8 +29,11 @@ #define PWM_CYC_CFG 0xC #define PWM_UPDATE 0x10 #define PWM_PERIOD_CNT 0x14 +#define PWM_RESET 0x18 -#define PWM_FRAME_POLARITY_BIT 0 +#define PWM_FRAME_POLARITY_BIT BIT(0) +#define PWM_FRAME_ROLLOVER_CNT_BIT BIT(4) +#define PWM_FRAME_RESET_BIT BIT(0) enum { ENABLE_STATUS0, @@ -42,6 +45,8 @@ enum { struct pdm_pwm_priv_data { unsigned int max_channels; const u16 *status_reg_offsets; + bool pwm_reset_support; + bool pwm_cnt_rollover; }; /* @@ -54,6 +59,7 @@ struct pdm_pwm_priv_data { * @current_freq: Current frequency of frame. * @freq_set: This bool flag is responsible for setting period once per frame. * @mutex: mutex lock per frame. + * @cnt_rollover_en: This bool flag is used to set rollover bit per frame. */ struct pdm_pwm_frames { u32 frame_id; @@ -66,6 +72,7 @@ struct pdm_pwm_frames { bool freq_set; struct mutex frame_lock; /* PWM per frame lock */ struct pdm_pwm_chip *pwm_chip; + bool cnt_rollover_en; }; /* @@ -100,8 +107,11 @@ static int __pdm_pwm_calc_pwm_frequency(struct pdm_pwm_chip *chip, unsigned long cyc_cfg, freq; int ret; - /* PWM client could set the period only once, due to HW limitation. */ - if (chip->frames[hw_idx].freq_set) + /* + * PWM client can set the period only once if the HW version does + * not support reset functionality. + */ + if (chip->frames[hw_idx].freq_set && !chip->priv_data->pwm_reset_support) return 0; freq = PERIOD_TO_HZ(period_ns); @@ -167,18 +177,34 @@ static int pdm_pwm_config(struct pdm_pwm_chip *chip, u32 hw_idx, mutex_lock(&chip->frames[hw_idx].frame_lock); + /* + * Set the counter rollover enable bit, so that counter doesn't get stuck + * in period change configuration. + */ + if (chip->priv_data->pwm_cnt_rollover && !chip->frames[hw_idx].cnt_rollover_en) { + regmap_update_bits(chip->regmap, chip->frames[hw_idx].reg_offset + PWM_CTL0, + PWM_FRAME_ROLLOVER_CNT_BIT, PWM_FRAME_ROLLOVER_CNT_BIT); + chip->frames[hw_idx].cnt_rollover_en = true; + } + ret = __pdm_pwm_calc_pwm_frequency(chip, current_period, hw_idx); if (ret) goto out; if (chip->frames[hw_idx].current_period_ns != period_ns) { - pr_err("Period cannot be updated, calculating dutycycle on old period\n"); - current_period = chip->frames[hw_idx].current_period_ns; + if (chip->priv_data->pwm_reset_support) + regmap_update_bits(chip->regmap, + chip->frames[hw_idx].reg_offset + PWM_RESET, + PWM_FRAME_RESET_BIT, PWM_FRAME_RESET_BIT); + else { + pr_err("Period cannot be updated, calculating dutycycle on old period\n"); + current_period = chip->frames[hw_idx].current_period_ns; + } } if (chip->frames[hw_idx].polarity != polarity) { regmap_update_bits(chip->regmap, chip->frames[hw_idx].reg_offset - + PWM_CTL0, BIT(PWM_FRAME_POLARITY_BIT), polarity); + + PWM_CTL0, PWM_FRAME_POLARITY_BIT, polarity); chip->frames[hw_idx].polarity = polarity; } @@ -220,21 +246,6 @@ static int pdm_pwm_config(struct pdm_pwm_chip *chip, u32 hw_idx, return ret; } -static void pdm_pwm_free(struct pwm_chip *pwm_chip, struct pwm_device *pwm) -{ - struct pdm_pwm_chip *chip = container_of(pwm_chip, - struct pdm_pwm_chip, pwm_chip); - u32 hw_idx = pwm->hwpwm; - - mutex_lock(&chip->lock); - - chip->frames[hw_idx].freq_set = false; - chip->frames[hw_idx].current_period_ns = 0; - chip->frames[hw_idx].current_duty_ns = 0; - - mutex_unlock(&chip->lock); -} - static int pdm_pwm_enable(struct pdm_pwm_chip *chip, struct pwm_device *pwm) { u32 ret, val; @@ -305,7 +316,7 @@ static int pdm_pwm_apply(struct pwm_chip *pwm_chip, struct pwm_device *pwm, pwm_get_state(pwm, &curr_state); - if (state->period < curr_state.period) + if (state->period < curr_state.period && !chip->priv_data->pwm_reset_support) return -EINVAL; if (state->period != curr_state.period || @@ -331,6 +342,24 @@ static int pdm_pwm_apply(struct pwm_chip *pwm_chip, struct pwm_device *pwm, return 0; } +static void pdm_pwm_free(struct pwm_chip *pwm_chip, struct pwm_device *pwm) +{ + struct pdm_pwm_chip *chip = container_of(pwm_chip, + struct pdm_pwm_chip, pwm_chip); + u32 hw_idx = pwm->hwpwm; + + mutex_lock(&chip->lock); + + chip->frames[hw_idx].freq_set = false; + chip->frames[hw_idx].current_period_ns = 0; + chip->frames[hw_idx].current_duty_ns = 0; + chip->frames[hw_idx].cnt_rollover_en = false; + + mutex_unlock(&chip->lock); + + pdm_pwm_disable(chip, pwm); +} + static const struct pwm_ops pdm_pwm_ops = { .apply = pdm_pwm_apply, .free = pdm_pwm_free, @@ -465,7 +494,7 @@ static int get_polarity(struct seq_file *m, void *unused) u32 temp; regmap_read(chip->regmap, frame->reg_offset + PWM_CTL0, &temp); - if (BIT(PWM_FRAME_POLARITY_BIT) & temp) + if (PWM_FRAME_POLARITY_BIT & temp) seq_puts(m, "PWM_POLARITY_INVERSED\n"); else seq_puts(m, "PWM_POLARITY_NORMAL\n"); @@ -672,6 +701,8 @@ static struct pdm_pwm_priv_data pdm_pwm_v2_reg_offsets = { [ENABLE_STATUS0] = 0xc, [ENABLE_STATUS1] = 0x10, }, + .pwm_reset_support = true, + .pwm_cnt_rollover = true, }; static const struct of_device_id pdm_pwm_of_match[] = { From 9dd92cf1a245cecfb47bf660bb61f4fe08ea20d2 Mon Sep 17 00:00:00 2001 From: Kalpak Kawadkar Date: Wed, 31 Jul 2024 16:25:36 +0530 Subject: [PATCH 084/117] arm64: defconfig: Add pwm support for NIOBE Add pwm support for NIOBE platform. Change-Id: I375958e1e1c5e341c31d997dda7c22a229c41742 Signed-off-by: Kalpak Kawadkar --- arch/arm64/configs/vendor/niobe_GKI.config | 1 + niobe.bzl | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/niobe_GKI.config b/arch/arm64/configs/vendor/niobe_GKI.config index beb4e58196f3..2c90f9e1ca33 100644 --- a/arch/arm64/configs/vendor/niobe_GKI.config +++ b/arch/arm64/configs/vendor/niobe_GKI.config @@ -120,6 +120,7 @@ CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y CONFIG_POWER_RESET_QCOM_PON=m CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m +CONFIG_PWM_QCOM=m CONFIG_PWM_QTI_LPG=m CONFIG_QCOM_AOSS_QMP=m CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y diff --git a/niobe.bzl b/niobe.bzl index f5627c18672f..24b83109d415 100644 --- a/niobe.bzl +++ b/niobe.bzl @@ -99,6 +99,7 @@ def define_niobe(): "drivers/power/reset/reboot-mode.ko", "drivers/power/supply/qti_battery_charger.ko", "drivers/powercap/qti_epm_hardware.ko", + "drivers/pwm/pwm-qcom.ko", "drivers/pwm/pwm-qti-lpg.ko", "drivers/regulator/ap72200-regulator.ko", "drivers/regulator/debug-regulator.ko", From 659777c86124c3948f6ebe16274c136977cf5262 Mon Sep 17 00:00:00 2001 From: Faisal Hassan Date: Wed, 21 Aug 2024 16:54:25 +0530 Subject: [PATCH 085/117] usb: phy: Resolve NOC error during host mode PM suspend In the host mode suspend scenario, the dwc3 core executes dwc3_core_exit, which suspends the USB PHYs and turn off the clocks. Later, during the dwc3-msm PM suspend, it invokes notify_disconnect to the PHYs. As part of the SS PHY disconnect, it attempts to power down, leading to a NOC error. To address this, a check has been added to enable the clock during the power-down process. Change-Id: I6040c431dea4a693a7226dc3006c099eb43bce43 Signed-off-by: Faisal Hassan --- drivers/usb/phy/phy-msm-ssusb-qmp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 9299800afc5e..1b138a5476a3 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -850,12 +850,19 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy, { struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp, phy); + bool clk_enabled = phy->clk_enabled; atomic_notifier_call_chain(&uphy->notifier, 0, uphy); if (phy->phy.flags & PHY_HOST_MODE) { + if (!clk_enabled) + msm_ssphy_qmp_enable_clks(phy, true); + writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + + if (!clk_enabled) + msm_ssphy_qmp_enable_clks(phy, false); } dev_dbg(uphy->dev, "QMP phy disconnect notification\n"); From 17e5be797ad9ad33752b260f958d3d3876fef3b6 Mon Sep 17 00:00:00 2001 From: Navya Vemula Date: Tue, 23 Jul 2024 12:11:53 +0530 Subject: [PATCH 086/117] defconfig: Enable pinctrl config for Seraph SoC Add pinctrl config to support pin control in Seraph SoC. Add seraph pinctrl module to list of first stage module list. Add dependent QCOM_SCM config support and SCM module for seraph SoC. Change-Id: I9703ff9b04eb6646008f52226b600e8e90470cb7 Signed-off-by: Navya Vemula --- arch/arm64/configs/vendor/seraph_GKI.config | 3 +++ modules.list.msm.seraph | 3 +++ seraph.bzl | 3 +++ 3 files changed, 9 insertions(+) diff --git a/arch/arm64/configs/vendor/seraph_GKI.config b/arch/arm64/configs/vendor/seraph_GKI.config index 96ca36750dce..8aac24360086 100644 --- a/arch/arm64/configs/vendor/seraph_GKI.config +++ b/arch/arm64/configs/vendor/seraph_GKI.config @@ -2,3 +2,6 @@ CONFIG_ARCH_QCOM=y CONFIG_ARCH_SERAPH=y CONFIG_LOCALVERSION="-gki" # CONFIG_MODULE_SIG_ALL is not set +CONFIG_PINCTRL_MSM=m +CONFIG_PINCTRL_SERAPH=m +CONFIG_QCOM_SCM=m diff --git a/modules.list.msm.seraph b/modules.list.msm.seraph index 5f22fd052ba6..9da9b46358e7 100644 --- a/modules.list.msm.seraph +++ b/modules.list.msm.seraph @@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only # Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +qcom-scm.ko +pinctrl-msm.ko +pinctrl-seraph.ko diff --git a/seraph.bzl b/seraph.bzl index 5f1e2ec93d8f..f946b1cfafaa 100644 --- a/seraph.bzl +++ b/seraph.bzl @@ -8,6 +8,9 @@ def define_seraph(): _seraph_in_tree_modules = [ # keep sorted # TODO: Need to add GKI modules + "drivers/firmware/qcom-scm.ko", + "drivers/pinctrl/qcom/pinctrl-msm.ko", + "drivers/pinctrl/qcom/pinctrl-seraph.ko", ] _seraph_consolidate_in_tree_modules = _seraph_in_tree_modules + [ From e6ac656a861f26728f7b30bbe208413f9168579d Mon Sep 17 00:00:00 2001 From: Navya Vemula Date: Tue, 23 Jul 2024 15:44:40 +0530 Subject: [PATCH 087/117] arm64: defconfig: Enable socinfo and smem drivers for Seraph SoC Enable socinfo and dependent drivers for Seraph SoC. Add socinfo and dependency modules in first stage list. Change-Id: I80937e6ecf97e13c5e980434e52f4acd6b45c3d3 Signed-off-by: Navya Vemula --- arch/arm64/configs/vendor/seraph_GKI.config | 3 +++ modules.list.msm.seraph | 3 +++ seraph.bzl | 3 +++ 3 files changed, 9 insertions(+) diff --git a/arch/arm64/configs/vendor/seraph_GKI.config b/arch/arm64/configs/vendor/seraph_GKI.config index 8aac24360086..16f4a334c1b8 100644 --- a/arch/arm64/configs/vendor/seraph_GKI.config +++ b/arch/arm64/configs/vendor/seraph_GKI.config @@ -1,7 +1,10 @@ CONFIG_ARCH_QCOM=y CONFIG_ARCH_SERAPH=y +CONFIG_HWSPINLOCK_QCOM=m CONFIG_LOCALVERSION="-gki" # CONFIG_MODULE_SIG_ALL is not set CONFIG_PINCTRL_MSM=m CONFIG_PINCTRL_SERAPH=m CONFIG_QCOM_SCM=m +CONFIG_QCOM_SMEM=m +CONFIG_QCOM_SOCINFO=m diff --git a/modules.list.msm.seraph b/modules.list.msm.seraph index 9da9b46358e7..9acba4fd4cfc 100644 --- a/modules.list.msm.seraph +++ b/modules.list.msm.seraph @@ -3,3 +3,6 @@ qcom-scm.ko pinctrl-msm.ko pinctrl-seraph.ko +qcom_hwspinlock.ko +smem.ko +socinfo.ko diff --git a/seraph.bzl b/seraph.bzl index f946b1cfafaa..bf93f003608c 100644 --- a/seraph.bzl +++ b/seraph.bzl @@ -9,8 +9,11 @@ def define_seraph(): # keep sorted # TODO: Need to add GKI modules "drivers/firmware/qcom-scm.ko", + "drivers/hwspinlock/qcom_hwspinlock.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/pinctrl/qcom/pinctrl-seraph.ko", + "drivers/soc/qcom/smem.ko", + "drivers/soc/qcom/socinfo.ko", ] _seraph_consolidate_in_tree_modules = _seraph_in_tree_modules + [ From 26b1ad0d6ee470a394ad71422d9e3a4254344311 Mon Sep 17 00:00:00 2001 From: Priyanka G Pai Date: Thu, 8 Aug 2024 19:32:21 +0530 Subject: [PATCH 088/117] bzl: Add msm_npu driver for gen3auto Add msm_npu driver module to the list. Change-Id: Iba9d6a78b3ca0afcbb8a022b84986b8b943f82c7 Signed-off-by: Priyanka G Pai --- gen3auto.bzl | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3auto.bzl b/gen3auto.bzl index 0bb1312369e0..f2918cd9b27a 100644 --- a/gen3auto.bzl +++ b/gen3auto.bzl @@ -87,6 +87,7 @@ def define_gen3auto(): "drivers/irqchip/qcom-pdc.ko", "drivers/mailbox/msm_qmp.ko", "drivers/mailbox/qcom-apcs-ipc-mailbox.ko", + "drivers/media/platform/msm/npu/msm_npu.ko", "drivers/mfd/qcom-spmi-pmic.ko", "drivers/misc/qseecom_proxy.ko", "drivers/mmc/host/cqhci.ko", From 3cfdbcb79902ed37d3b6bfc749b172c8dd235bde Mon Sep 17 00:00:00 2001 From: Priyanka G Pai Date: Thu, 8 Aug 2024 19:33:37 +0530 Subject: [PATCH 089/117] defconfig: gen3auto: Enable MSM NPU Enable MSM NPU module. Change-Id: I4feaa61b484c72c3eca1a6bc2c44ae1865b74775 Signed-off-by: Priyanka G Pai --- arch/arm64/configs/vendor/gen3auto_GKI.config | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/configs/vendor/gen3auto_GKI.config b/arch/arm64/configs/vendor/gen3auto_GKI.config index b30238d41427..3004203ecac6 100644 --- a/arch/arm64/configs/vendor/gen3auto_GKI.config +++ b/arch/arm64/configs/vendor/gen3auto_GKI.config @@ -113,6 +113,7 @@ CONFIG_MSM_CORE_HANG_DETECT=m CONFIG_MSM_GPI_DMA=m # CONFIG_MSM_GPI_DMA_DEBUG is not set CONFIG_MSM_HSUSB_PHY=m +CONFIG_MSM_NPU=m CONFIG_MSM_PERFORMANCE=m CONFIG_MSM_QMP=m CONFIG_NL80211_TESTMODE=y From 7ceec973634f577153ae74b939ff5722244883db Mon Sep 17 00:00:00 2001 From: lixiang Date: Wed, 14 Aug 2024 18:04:32 +0800 Subject: [PATCH 090/117] soc: qcom: hab: Replace imp whse with rbtree based Replace the linked list-based import warehouse with an RB tree-based one to improve the performance of import and unimport operations when there is an excessive amount of exp_desc nodes. Change-Id: I2685d28060902b69994325d05c9475ad70e27737 Signed-off-by: lixiang --- drivers/soc/qcom/hab/hab.c | 9 ++- drivers/soc/qcom/hab/hab.h | 8 ++- drivers/soc/qcom/hab/hab_linux.c | 58 ++++++++++++++++ drivers/soc/qcom/hab/hab_mimex.c | 114 +++++++++++++++---------------- drivers/soc/qcom/hab/hab_msg.c | 25 ++++--- drivers/soc/qcom/hab/hab_os.h | 11 +++ drivers/soc/qcom/hab/hab_stat.c | 45 ++++++------ 7 files changed, 177 insertions(+), 93 deletions(-) diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c index afd342f880c7..88e0b275e482 100644 --- a/drivers/soc/qcom/hab/hab.c +++ b/drivers/soc/qcom/hab/hab.c @@ -80,7 +80,7 @@ struct uhab_context *hab_ctx_alloc(int kernel) ctx->closing = 0; INIT_LIST_HEAD(&ctx->vchannels); INIT_LIST_HEAD(&ctx->exp_whse); - INIT_LIST_HEAD(&ctx->imp_whse); + hab_rb_init(&ctx->imp_whse); INIT_LIST_HEAD(&ctx->exp_rxq); init_waitqueue_head(&ctx->exp_wq); @@ -167,8 +167,11 @@ void hab_ctx_free_fn(struct uhab_context *ctx) write_unlock(&ctx->exp_lock); spin_lock_bh(&ctx->imp_lock); - list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) { - list_del(&exp->node); + for (exp_super = hab_rb_min(&ctx->imp_whse, struct export_desc_super, node); + exp_super != NULL; + exp_super = hab_rb_min(&ctx->imp_whse, struct export_desc_super, node)) { + exp = &exp_super->exp; + hab_rb_remove(&ctx->imp_whse, exp_super); ctx->import_total--; pr_debug("leaked imp %d vcid %X for ctx is collected total %d\n", exp->export_id, exp->vcid_local, diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h index 5b0370fd531b..34d58804dee0 100644 --- a/drivers/soc/qcom/hab/hab.h +++ b/drivers/soc/qcom/hab/hab.h @@ -318,7 +318,7 @@ struct uhab_context { struct list_head exp_rxq; spinlock_t expq_lock; - struct list_head imp_whse; + HAB_RB_ROOT imp_whse; spinlock_t imp_lock; uint32_t import_total; @@ -511,6 +511,8 @@ struct export_desc_super { enum export_state exp_state; uint32_t remote_imported; + HAB_RB_ENTRY node; + /* * exp must be the last member * because it is a variable length struct with pfns as payload @@ -767,4 +769,8 @@ int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest, int hab_stat_buffer_print(char *dest, int dest_size, const char *fmt, ...); int hab_create_cdev_node(int mmid_grp_index); + +struct export_desc_super *hab_rb_exp_insert(struct rb_root *root, struct export_desc_super *exp_s); +struct export_desc_super *hab_rb_exp_find(struct rb_root *root, struct export_desc_super *key); + #endif /* __HAB_H */ diff --git a/drivers/soc/qcom/hab/hab_linux.c b/drivers/soc/qcom/hab/hab_linux.c index f82f0fae31d7..ea1fa8fcff79 100644 --- a/drivers/soc/qcom/hab/hab_linux.c +++ b/drivers/soc/qcom/hab/hab_linux.c @@ -423,6 +423,64 @@ static void reclaim_cleanup(struct work_struct *reclaim_work) } } +void hab_rb_init(struct rb_root *root) +{ + *root = RB_ROOT; +} + +struct export_desc_super *hab_rb_exp_find(struct rb_root *root, struct export_desc_super *key) +{ + struct rb_node *node = root->rb_node; + struct export_desc_super *exp_super; + + while (node) { + exp_super = rb_entry(node, struct export_desc_super, node); + if (key->exp.export_id < exp_super->exp.export_id) + node = node->rb_left; + else if (key->exp.export_id > exp_super->exp.export_id) + node = node->rb_right; + else { + if (key->exp.pchan < exp_super->exp.pchan) + node = node->rb_left; + else if (key->exp.pchan > exp_super->exp.pchan) + node = node->rb_right; + else + return exp_super; + } + } + + return NULL; +} + +struct export_desc_super *hab_rb_exp_insert(struct rb_root *root, struct export_desc_super *exp_s) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + while (*new) { + struct export_desc_super *this = rb_entry(*new, struct export_desc_super, node); + + parent = *new; + if (exp_s->exp.export_id < this->exp.export_id) + new = &((*new)->rb_left); + else if (exp_s->exp.export_id > this->exp.export_id) + new = &((*new)->rb_right); + else { + if (exp_s->exp.pchan < this->exp.pchan) + new = &((*new)->rb_left); + else if (exp_s->exp.pchan > this->exp.pchan) + new = &((*new)->rb_right); + else + /* should not found the target key before insert */ + return this; + } + } + + rb_link_node(&exp_s->node, parent, new); + rb_insert_color(&exp_s->node, root); + + return NULL; +} + /* create one more char device for /dev/hab */ #define CDEV_NUM_MAX (MM_ID_MAX / 100 + 1) diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c index 01194954a828..773b650cbfe8 100644 --- a/drivers/soc/qcom/hab/hab_mimex.c +++ b/drivers/soc/qcom/hab/hab_mimex.c @@ -470,8 +470,8 @@ int hab_mem_import(struct uhab_context *ctx, int kernel) { int ret = 0, found = 0; - struct export_desc *exp = NULL; - struct export_desc_super *exp_super = NULL; + struct export_desc *export = NULL; + struct export_desc_super *exp_super = NULL, key = {0}; struct virtual_channel *vchan = NULL; struct hab_header header = HAB_HEADER_INITIALIZER; struct hab_import_ack expected_ack = {0}; @@ -528,59 +528,56 @@ int hab_mem_import(struct uhab_context *ctx, } } + key.exp.export_id = param->exportid; + key.exp.pchan = vchan->pchan; spin_lock_bh(&ctx->imp_lock); - list_for_each_entry(exp, &ctx->imp_whse, node) { - if ((exp->export_id == param->exportid) && - (exp->pchan == vchan->pchan)) { - exp_super = container_of(exp, struct export_desc_super, exp); - - /* not allowed to import one exp desc more than once */ - if (exp_super->import_state == EXP_DESC_IMPORTED - || exp_super->import_state == EXP_DESC_IMPORTING) { - pr_err("vc %x not allowed to import expid %u more than once\n", - vchan->id, exp->export_id); - spin_unlock_bh(&ctx->imp_lock); - ret = -EINVAL; - goto err_imp; - } - - /* - * set the flag to avoid another thread getting the exp desc again - * and must be before unlock, otherwise it is no use. - */ - exp_super->import_state = EXP_DESC_IMPORTING; - found = 1; - break; + exp_super = hab_rb_exp_find(&ctx->imp_whse, &key); + if (exp_super) { + /* not allowed to import one exp desc more than once */ + if (exp_super->import_state == EXP_DESC_IMPORTED + || exp_super->import_state == EXP_DESC_IMPORTING) { + export = &exp_super->exp; + pr_err("vc %x not allowed to import one expid %u more than once\n", + vchan->id, export->export_id); + spin_unlock_bh(&ctx->imp_lock); + ret = -EINVAL; + goto err_imp; } - } - spin_unlock_bh(&ctx->imp_lock); - - if (!found) { - pr_err("vc %x fail to get export descriptor from export id %d\n", - vchan->id, param->exportid); + /* + * set the flag to avoid another thread getting the exp desc again + * and must be before unlock, otherwise it is no use. + */ + exp_super->import_state = EXP_DESC_IMPORTING; + found = 1; + } else { + spin_unlock_bh(&ctx->imp_lock); + pr_err("Fail to get export descriptor from export id %d vcid %x\n", + param->exportid, vchan->id); ret = -ENODEV; goto err_imp; } + spin_unlock_bh(&ctx->imp_lock); - if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) { + export = &exp_super->exp; + if ((export->payload_count << PAGE_SHIFT) != param->sizebytes) { pr_err("vc %x input size %d don't match buffer size %d\n", - vchan->id, param->sizebytes, exp->payload_count << PAGE_SHIFT); + vchan->id, param->sizebytes, export->payload_count << PAGE_SHIFT); ret = -EINVAL; exp_super->import_state = EXP_DESC_INIT; goto err_imp; } - ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel); + ret = habmem_imp_hyp_map(ctx->import_ctx, param, export, kernel); if (ret) { pr_err("Import fail on vc %x ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n", - vchan->id, ret, exp->payload_count, - exp->domid_local, *((uint32_t *)exp->payload)); + vchan->id, ret, export->payload_count, + export->domid_local, *((uint32_t *)export->payload)); exp_super->import_state = EXP_DESC_INIT; goto err_imp; } - exp->import_index = param->index; - exp->kva = kernel ? (void *)param->kva : NULL; + export->import_index = param->index; + export->kva = kernel ? (void *)param->kva : NULL; exp_super->import_state = EXP_DESC_IMPORTED; err_imp: @@ -590,10 +587,10 @@ int hab_mem_import(struct uhab_context *ctx, (found == 1) && (ret != 0)) { /* dma_buf create failure, rollback required */ - hab_send_unimport_msg(vchan, exp->export_id); + hab_send_unimport_msg(vchan, export->export_id); spin_lock_bh(&ctx->imp_lock); - list_del(&exp->node); + hab_rb_remove(&ctx->imp_whse, exp_super); ctx->import_total--; spin_unlock_bh(&ctx->imp_lock); @@ -610,8 +607,8 @@ int hab_mem_unimport(struct uhab_context *ctx, int kernel) { int ret = 0, found = 0; - struct export_desc *exp = NULL, *exp_tmp; - struct export_desc_super *exp_super = NULL; + struct export_desc *exp = NULL; + struct export_desc_super *exp_super = NULL, key = {0}; struct virtual_channel *vchan; if (!ctx || !param) @@ -624,30 +621,27 @@ int hab_mem_unimport(struct uhab_context *ctx, return -ENODEV; } + key.exp.export_id = param->exportid; + key.exp.pchan = vchan->pchan; spin_lock_bh(&ctx->imp_lock); - list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) { - - /* same pchan is expected here */ - if (exp->export_id == param->exportid && - exp->pchan == vchan->pchan) { - exp_super = container_of(exp, struct export_desc_super, exp); - - /* only successfully imported export desc could be found and released */ - if (exp_super->import_state == EXP_DESC_IMPORTED) { - list_del(&exp->node); - ctx->import_total--; - found = 1; - } else - pr_err("vc %x exp id:%u status:%d is found, invalid to unimport\n", - vchan->id, exp->export_id, exp_super->import_state); - break; - } + exp_super = hab_rb_exp_find(&ctx->imp_whse, &key); + if (exp_super) { + /* only successfully imported export desc could be found and released */ + if (exp_super->import_state == EXP_DESC_IMPORTED) { + hab_rb_remove(&ctx->imp_whse, exp_super); + ctx->import_total--; + found = 1; + } else + pr_err("vc %x exp id:%u status:%d is found, invalid to unimport\n", + vchan->id, exp_super->exp.export_id, exp_super->import_state); } spin_unlock_bh(&ctx->imp_lock); - if (!found) + if (!found) { ret = -EINVAL; - else { + pr_err("exp id %u unavailable on vc %x\n", param->exportid, vchan->id); + } else { + exp = &exp_super->exp; ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel); if (ret) { pr_err("unmap fail id:%d pcnt:%d vcid:%d\n", diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c index 9f47b142a1f8..5df04bc0f90a 100644 --- a/drivers/soc/qcom/hab/hab_msg.c +++ b/drivers/soc/qcom/hab/hab_msg.c @@ -242,17 +242,23 @@ static void hab_msg_queue(struct virtual_channel *vchan, } static int hab_export_enqueue(struct virtual_channel *vchan, - struct export_desc *exp) + struct export_desc *export) { struct uhab_context *ctx = vchan->ctx; + struct export_desc_super *exp_super = container_of(export, struct export_desc_super, exp); int irqs_disabled = irqs_disabled(); + struct export_desc_super *ret; hab_spin_lock(&ctx->imp_lock, irqs_disabled); - list_add_tail(&exp->node, &ctx->imp_whse); - ctx->import_total++; + ret = hab_rb_exp_insert(&ctx->imp_whse, exp_super); + if (ret != NULL) + pr_err("expid %u already exists on vc %x, size %d\n", + export->export_id, vchan->id, PAGE_SIZE * export->payload_count); + else + ctx->import_total++; hab_spin_unlock(&ctx->imp_lock, irqs_disabled); - return 0; + return (ret == NULL) ? 0 : -EINVAL; } /* @@ -544,19 +550,22 @@ static int hab_receive_export_desc(struct physical_channel *pchan, ack_recvd->ack.export_id = exp_desc->export_id; ack_recvd->ack.vcid_local = exp_desc->vcid_local; ack_recvd->ack.vcid_remote = exp_desc->vcid_remote; - ack_recvd->ack.imp_whse_added = 1; } - hab_export_enqueue(vchan, exp_desc); + ret = hab_export_enqueue(vchan, exp_desc); if (pchan->mem_proto == 1) { + ack_recvd->ack.imp_whse_added = ret ? 0 : 1; hab_spin_lock(&vchan->ctx->impq_lock, irqs_disabled); list_add_tail(&ack_recvd->node, &vchan->ctx->imp_rxq); hab_spin_unlock(&vchan->ctx->impq_lock, irqs_disabled); } else - hab_send_export_ack(vchan, pchan, exp_desc); + (void)hab_send_export_ack(vchan, pchan, exp_desc); - return 0; + if (ret) + kfree(exp_desc_super); + + return ret; err_imp: if (pchan->mem_proto == 1) { diff --git a/drivers/soc/qcom/hab/hab_os.h b/drivers/soc/qcom/hab/hab_os.h index 586d35c03ec7..faa1bf65c7de 100644 --- a/drivers/soc/qcom/hab/hab_os.h +++ b/drivers/soc/qcom/hab/hab_os.h @@ -40,6 +40,17 @@ #include #include #include + +void hab_rb_init(struct rb_root *root); + +#define hab_rb_remove(root, pos) rb_erase(&(pos)->node, root) +#define hab_rb_min(root, type, node) rb_entry_safe(rb_first(root), type, node) +#define hab_rb_max(root, type, node) rb_entry_safe(rb_last(root), type, node) +#define hab_rb_for_each_entry(pos, n, head, member) \ + rbtree_postorder_for_each_entry_safe(pos, n, head, member) +#define HAB_RB_ENTRY struct rb_node +#define HAB_RB_ROOT struct rb_root + #if defined(CONFIG_MSM_VHOST_HAB) || defined(CONFIG_MSM_VIRTIO_HAB) #include static inline unsigned long long msm_timer_get_sclk_ticks(void) diff --git a/drivers/soc/qcom/hab/hab_stat.c b/drivers/soc/qcom/hab/hab_stat.c index b2c15837aeb5..ccbb946d282d 100644 --- a/drivers/soc/qcom/hab/hab_stat.c +++ b/drivers/soc/qcom/hab/hab_stat.c @@ -121,13 +121,14 @@ static int print_ctx_total_expimp(struct uhab_context *ctx, struct compressed_pfns *pfn_table = NULL; int exp_total = 0, imp_total = 0; int exp_cnt = 0, imp_cnt = 0; - struct export_desc *exp = NULL; + struct export_desc *export = NULL; + struct export_desc_super *exp_super, *exp_super_tmp; int exim_size = 0; int ret = 0; read_lock(&ctx->exp_lock); - list_for_each_entry(exp, &ctx->exp_whse, node) { - pfn_table = (struct compressed_pfns *)exp->payload; + list_for_each_entry(export, &ctx->exp_whse, node) { + pfn_table = (struct compressed_pfns *)export->payload; exim_size = get_pft_tbl_total_size(pfn_table); exp_total += exim_size; exp_cnt++; @@ -135,9 +136,10 @@ static int print_ctx_total_expimp(struct uhab_context *ctx, read_unlock(&ctx->exp_lock); spin_lock_bh(&ctx->imp_lock); - list_for_each_entry(exp, &ctx->imp_whse, node) { - if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) { - pfn_table = (struct compressed_pfns *)exp->payload; + hab_rb_for_each_entry(exp_super, exp_super_tmp, &ctx->imp_whse, node) { + export = &exp_super->exp; + if (habmm_imp_hyp_map_check(ctx->import_ctx, export)) { + pfn_table = (struct compressed_pfns *)export->payload; exim_size = get_pft_tbl_total_size(pfn_table); imp_total += exim_size; imp_cnt++; @@ -146,7 +148,7 @@ static int print_ctx_total_expimp(struct uhab_context *ctx, spin_unlock_bh(&ctx->imp_lock); if (exp_cnt || exp_total || imp_cnt || imp_total) - hab_stat_buffer_print(buf, size, + ret = hab_stat_buffer_print(buf, size, "ctx %d exp %d size %d imp %d size %d\n", ctx->owner, exp_cnt, exp_total, imp_cnt, imp_total); @@ -154,26 +156,27 @@ static int print_ctx_total_expimp(struct uhab_context *ctx, return 0; read_lock(&ctx->exp_lock); - hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: "); - list_for_each_entry(exp, &ctx->exp_whse, node) { - pfn_table = (struct compressed_pfns *)exp->payload; + ret = hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: "); + list_for_each_entry(export, &ctx->exp_whse, node) { + pfn_table = (struct compressed_pfns *)export->payload; exim_size = get_pft_tbl_total_size(pfn_table); - hab_stat_buffer_print(buf, size, - "[%d:%x:%d] ", exp->export_id, - exp->vcid_local, exim_size); + ret = hab_stat_buffer_print(buf, size, + "[%d:%x:%d] ", export->export_id, + export->vcid_local, exim_size); } - hab_stat_buffer_print(buf, size, "\n"); + ret = hab_stat_buffer_print(buf, size, "\n"); read_unlock(&ctx->exp_lock); spin_lock_bh(&ctx->imp_lock); - hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: "); - list_for_each_entry(exp, &ctx->imp_whse, node) { - if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) { - pfn_table = (struct compressed_pfns *)exp->payload; + ret = hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: "); + hab_rb_for_each_entry(exp_super, exp_super_tmp, &ctx->imp_whse, node) { + export = &exp_super->exp; + if (habmm_imp_hyp_map_check(ctx->import_ctx, export)) { + pfn_table = (struct compressed_pfns *)export->payload; exim_size = get_pft_tbl_total_size(pfn_table); - hab_stat_buffer_print(buf, size, - "[%d:%x:%d] ", exp->export_id, - exp->vcid_local, exim_size); + ret = hab_stat_buffer_print(buf, size, + "[%d:%x:%d] ", export->export_id, + export->vcid_local, exim_size); } } ret = hab_stat_buffer_print(buf, size, "\n"); From a3589d370c06035b128e7879f4d54d63f1f6c615 Mon Sep 17 00:00:00 2001 From: Pranav Mahesh Phansalkar Date: Wed, 7 Aug 2024 13:52:53 +0530 Subject: [PATCH 091/117] rpmsg: native: Increase iterations count in glink ISR Currently, if APPS sends more than 10 requests to RPM, glink hard interrupt service function is unable to process more than 10 acknowledgements. Increase the loop iterations to 15 to process up to 15 acknowledgements in the hard interrupt context. Change-Id: Ief7385f21d5853275a2b90438181c93a01c76f78 Signed-off-by: Pranav Mahesh Phansalkar --- drivers/rpmsg/qcom_glink_native.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 3b869e0599e0..17e16c099cdd 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1630,7 +1630,7 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data) struct qcom_glink *glink = data; int ret; - ret = qcom_glink_native_rx(glink, 10); + ret = qcom_glink_native_rx(glink, 15); return (ret) ? IRQ_WAKE_THREAD : IRQ_HANDLED; } From c7b140cf91887f8dd86577f3d4970eb20ba0c496 Mon Sep 17 00:00:00 2001 From: jizho Date: Mon, 26 Aug 2024 17:26:46 +0800 Subject: [PATCH 092/117] drivers: emac_mdio_fe: Add module dependency Add emac_mdio_fe module dependency. Change-Id: Ib1de750f65184841aedda934dece7d5eaa815652 Signed-off-by: jizho --- drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c b/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c index 0e51412250ad..722deb13aaeb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c +++ b/drivers/net/ethernet/stmicro/stmmac/emac_mdio_fe.c @@ -525,5 +525,6 @@ static void __exit emac_mdio_fe_exit(void) module_init(emac_mdio_fe_init); module_exit(emac_mdio_fe_exit); +MODULE_SOFTDEP("post: stmmac"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("EMAC Virt MDIO FE Driver"); From b9b1609599823dce24c4f58179016170a85c0bf2 Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Mon, 26 Aug 2024 17:17:21 +0530 Subject: [PATCH 093/117] modules.list: pineapple: Add qfprom module to first stage First stage module like tsens depends on qfprom module. Enable it in first stage DLKMs list. Change-Id: Id38b3a66f174c77b80dd6f020d1eca706955bb7e Signed-off-by: Manaf Meethalavalappu Pallikunhi --- modules.list.msm.pineapple | 1 + 1 file changed, 1 insertion(+) diff --git a/modules.list.msm.pineapple b/modules.list.msm.pineapple index d42a8dc2448a..6afc01622005 100644 --- a/modules.list.msm.pineapple +++ b/modules.list.msm.pineapple @@ -7,6 +7,7 @@ qcom_scmi_client.ko cmd-db.ko qcom_rpmh.ko qcom-pdc.ko +nvmem_qfprom.ko thermal_minidump.ko qcom_tsens.ko qcom_iommu_util.ko From 2325103d699d383f6750be9e8d6b2b96f43dee4f Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Mon, 26 Aug 2024 17:25:48 +0530 Subject: [PATCH 094/117] thermal: qcom: tsens: Fix function prototype mismatch There is function prototype mismatch in tsens init function. Enable __init keyword only for static driver case. Fix issues in clean up also in init function. Change-Id: Id7eaf4c65e78c1864c8b377ed0137c45cce256ad Signed-off-by: Manaf Meethalavalappu Pallikunhi --- drivers/thermal/qcom/tsens.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 4cad26c8d159..c8f9a75cc2fa 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -837,7 +837,6 @@ static const struct regmap_config tsens_srot_config = { static int init_cold_interrupt(struct tsens_priv *priv, struct platform_device *op, u32 ver_minor) { - struct device *dev = priv->dev; int ret = 0; @@ -850,16 +849,17 @@ static int init_cold_interrupt(struct tsens_priv *priv, priv->fields[COLD_STATUS]); if (IS_ERR(priv->rf[COLD_STATUS])) { ret = PTR_ERR(priv->rf[COLD_STATUS]); - goto err_put_device; } } -err_put_device: - put_device(&op->dev); return ret; } +#if IS_MODULE(CONFIG_QCOM_TSENS) +int init_common(struct tsens_priv *priv) +#else int __init init_common(struct tsens_priv *priv) +#endif { void __iomem *tm_base, *srot_base; struct device *dev = priv->dev; @@ -1032,7 +1032,7 @@ int __init init_common(struct tsens_priv *priv) regmap_field_write(priv->rf[CC_MON_MASK], 1); } - ret = init_cold_interrupt(priv, op, ver_minor); + init_cold_interrupt(priv, op, ver_minor); spin_lock_init(&priv->ul_lock); From e00fb1d01498634b88f98f18b45091dad95460ec Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Tue, 30 Jul 2024 02:19:42 -0700 Subject: [PATCH 095/117] q2spi-msm-geni: Set sma write pending during multi CRs During ranging sessions back to back doorbells from SOC racing with UWB session request and multi CRs reported and sma_wr_pending not set if the doorbell has independent doorbell. After that if we get another independent doorbell we could see the failure for processing independent doorbell becz sma write was pending part of previous multi CR. Change-Id: I6b8cfa86f80038935877360896f383084fbb04c1 Signed-off-by: Chandana Kishori Chiluveru --- drivers/spi/q2spi-msm-geni.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index ab1a5ba52060..dc8b1613cd3d 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -3867,6 +3867,11 @@ void q2spi_find_pkt_by_flow_id(struct q2spi_geni *q2spi, struct q2spi_cr_packet if (q2spi_pkt) { Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n", __func__, q2spi_pkt, flow_id); + if (!atomic_read(&q2spi->sma_wr_pending)) { + atomic_set(&q2spi->sma_wr_pending, 1); + Q2SPI_DEBUG(q2spi, "%s sma_wr_pending set for prev DB\n", __func__); + } + /* wakeup HRF flow which is waiting for this CR doorbell */ complete_all(&q2spi_pkt->wait_for_db); return; From 3f7ba8359f874655628cb2f28ba1035c7753c0cc Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Fri, 10 May 2024 01:00:26 +0530 Subject: [PATCH 096/117] thermal: qcom: Add support to update tsens trip based on nvmem data Add support to detect higher thermal profile parts and update thermal zone trips dynamically based on nvmem cell data for tsens. Change-Id: I792c4f2736d10d68b45cc9b64c0ec08d185cf007 Signed-off-by: Manaf Meethalavalappu Pallikunhi --- drivers/thermal/qcom/tsens.c | 69 ++++++++++++++++++++++++++++++++++++ drivers/thermal/qcom/tsens.h | 7 +++- 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index c8f9a75cc2fa..b4a05c1d6c4b 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -1256,11 +1256,77 @@ int tsens_v2_tsens_resume(struct tsens_priv *priv) return 0; } +static void tsens_thermal_zone_trip_update(struct thermal_zone_device *tz, + int trip_id) +{ + u32 trip_delta = 0; + + if (!of_thermal_is_trip_valid(tz, trip_id) || !tz->trips) + return; + + if (tz->trips[trip_id].type == THERMAL_TRIP_CRITICAL) + return; + + if (tz->trips[trip_id].type == THERMAL_TRIP_HOT) + trip_delta = TSENS_ELEVATE_HOT_DELTA; + else if (strnstr(tz->type, "cpu", sizeof(tz->type))) + trip_delta = TSENS_ELEVATE_CPU_DELTA; + else + trip_delta = TSENS_ELEVATE_DELTA; + + mutex_lock(&tz->lock); + tz->trips[trip_id].temperature += trip_delta; + mutex_unlock(&tz->lock); + + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); +} + +static int tsens_nvmem_trip_update(struct thermal_zone_device *tz) +{ + int i, num_trips = 0; + + if (strnstr(tz->type, "mdmss", sizeof(tz->type))) + return 0; + + num_trips = of_thermal_get_ntrips(tz); + /* First trip is for userspace, update all other trips. */ + for (i = 1; i < num_trips; i++) + tsens_thermal_zone_trip_update(tz, i); + + return 0; +} + +static bool tsens_is_nvmem_trip_update_needed(struct tsens_priv *priv) +{ + int ret; + u32 itemp = 0; + + if (!of_property_read_bool(priv->dev->of_node, "nvmem-cells")) + return false; + + ret = nvmem_cell_read_variable_le_u32(priv->dev, + "tsens_itemp", &itemp); + if (ret) { + dev_err(priv->dev, + "%s: Not able to read tsens_chipinfo nvmem, ret:%d\n", + __func__, ret); + return false; + } + + TSENS_DBG_2(priv, "itemp fuse:0x%x", itemp); + if (itemp) + return true; + + return false; +} + static int tsens_register(struct tsens_priv *priv) { int i, temp, ret; struct thermal_zone_device *tzd; + priv->need_trip_update = tsens_is_nvmem_trip_update_needed(priv); + for (i = 0; i < priv->num_sensors; i++) { priv->sensor[i].priv = priv; tzd = devm_thermal_of_zone_register(priv->dev, priv->sensor[i].hw_id, @@ -1287,6 +1353,9 @@ static int tsens_register(struct tsens_priv *priv) if (devm_thermal_add_hwmon_sysfs(tzd)) dev_warn(priv->dev, "Failed to add hwmon sysfs attributes\n"); + /* update tsens trip based on fuse register */ + if (priv->need_trip_update) + ret = tsens_nvmem_trip_update(tzd); qti_update_tz_ops(tzd, true); } diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index 724529b39c34..e98691ad0ec0 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -19,6 +19,10 @@ #define THRESHOLD_MIN_ADC_CODE 0x0 #define COLD_SENSOR_HW_ID 128 +#define TSENS_ELEVATE_DELTA 10000 +#define TSENS_ELEVATE_CPU_DELTA 5000 +#define TSENS_ELEVATE_HOT_DELTA 3000 + #include #include #include @@ -615,7 +619,8 @@ struct tsens_priv { int crit_irq; int cold_irq; - bool tm_disable_on_suspend; + bool need_trip_update; + bool tm_disable_on_suspend; struct dentry *debug_root; struct dentry *debug; From 23a1aabf8fa3c7993364a18ccfb455fdc54d91ca Mon Sep 17 00:00:00 2001 From: Shivnandan Kumar Date: Tue, 6 Aug 2024 11:07:38 +0530 Subject: [PATCH 097/117] drivers: dcvs: bwmon: synchronize_irq before hibernation During the hibernation preparation phase, the bwmon driver disables the bwmon hardware, including its IRQ. However, there can be instances where a bwmon IRQ remains pending at the GIC level even after the hardware and IRQ are disabled. Upon hibernation exit, the GIC resumes first and restores all pending IRQs, including bwmon if it was pending before hibernation entry. This can lead to an IRQ storm. To fix this, call synchronize_irq before hibernation entry to ensure that there are no pending bwmon IRQs before hibernation entry. Change-Id: Ib64510cb93296f37ab5fecd8944ce33a3c95e17b Signed-off-by: Shivnandan Kumar --- drivers/soc/qcom/dcvs/bwmon.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/dcvs/bwmon.c b/drivers/soc/qcom/dcvs/bwmon.c index 014573b4a063..c9d7a5ba149c 100644 --- a/drivers/soc/qcom/dcvs/bwmon.c +++ b/drivers/soc/qcom/dcvs/bwmon.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "qcom-bwmon: " fmt @@ -1702,6 +1702,7 @@ void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type) bwmon_monitor_stop(hw); mon_irq_disable(m, type); + synchronize_irq(m->irq); free_irq(m->irq, m); mon_disable(m, type); mon_clear(m, true, type); From 269307d1c195c374f2638a846ab974920caabe68 Mon Sep 17 00:00:00 2001 From: Deyan Wang Date: Mon, 26 Aug 2024 15:31:04 +0530 Subject: [PATCH 098/117] soc: qcom: hab: Add 3 new mmids in virtio-hab Add 3 new mmids for VNW, EXT and GPCE. Change-Id: I79801c87b22313cfbce0c83887a946e04e7e7915 Signed-off-by: Deyan Wang --- drivers/soc/qcom/hab/hab_virtio.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/soc/qcom/hab/hab_virtio.c b/drivers/soc/qcom/hab/hab_virtio.c index ced6f622d9e3..e81023212471 100644 --- a/drivers/soc/qcom/hab/hab_virtio.c +++ b/drivers/soc/qcom/hab/hab_virtio.c @@ -21,6 +21,9 @@ #define HAB_VIRTIO_DEVICE_ID_DISPLAY 93 #define HAB_VIRTIO_DEVICE_ID_GRAPHICS 94 #define HAB_VIRTIO_DEVICE_ID_VIDEO 95 +#define HAB_VIRTIO_DEVICE_ID_VNW 96 +#define HAB_VIRTIO_DEVICE_ID_EXT 97 +#define HAB_VIRTIO_DEVICE_ID_GPCE 98 /* all probed virtio_hab stored in this list */ static struct list_head vhab_list = LIST_HEAD_INIT(vhab_list); @@ -39,6 +42,9 @@ static struct virtio_device_tbl { { MM_DISP_1, HAB_VIRTIO_DEVICE_ID_DISPLAY, NULL }, { MM_GFX, HAB_VIRTIO_DEVICE_ID_GRAPHICS, NULL }, { MM_VID, HAB_VIRTIO_DEVICE_ID_VIDEO, NULL }, + { MM_VNW_1, HAB_VIRTIO_DEVICE_ID_VNW, NULL }, + { MM_EXT_1, HAB_VIRTIO_DEVICE_ID_EXT, NULL }, + { MM_GPCE_1, HAB_VIRTIO_DEVICE_ID_GPCE, NULL }, }; enum pool_type_t { @@ -743,6 +749,18 @@ static int virthab_probe(struct virtio_device *vdev) mmid_start = MM_VID; mmid_range = MM_VID_END - MM_VID_START - 1; virthab_store_vdev(MM_VID, vdev); + } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_VNW) { + mmid_start = MM_VNW_1; + mmid_range = MM_VNW_END - MM_VNW_START - 1; + virthab_store_vdev(MM_VNW_1, vdev); + } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_EXT) { + mmid_start = MM_EXT_1; + mmid_range = MM_EXT_END - MM_EXT_START - 1; + virthab_store_vdev(MM_EXT_1, vdev); + } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_GPCE) { + mmid_start = MM_GPCE_1; + mmid_range = MM_GPCE_END - MM_GPCE_START - 1; + virthab_store_vdev(MM_GPCE_1, vdev); } else { pr_err("unknown virtio device is detected %d\n", vdev->id.device); @@ -878,6 +896,9 @@ static struct virtio_device_id id_table[] = { { HAB_VIRTIO_DEVICE_ID_DISPLAY, VIRTIO_DEV_ANY_ID }, /* virtio display */ { HAB_VIRTIO_DEVICE_ID_GRAPHICS, VIRTIO_DEV_ANY_ID }, /* virtio graphics */ { HAB_VIRTIO_DEVICE_ID_VIDEO, VIRTIO_DEV_ANY_ID }, /* virtio video */ + { HAB_VIRTIO_DEVICE_ID_VNW, VIRTIO_DEV_ANY_ID }, /* virtio vnw */ + { HAB_VIRTIO_DEVICE_ID_EXT, VIRTIO_DEV_ANY_ID }, /* virtio external */ + { HAB_VIRTIO_DEVICE_ID_GPCE, VIRTIO_DEV_ANY_ID }, /* virtio gpce */ { 0 }, }; From cb2f2e127f75f834117c337c33acb5cb06e10357 Mon Sep 17 00:00:00 2001 From: Kamati Srinivas Date: Tue, 27 Aug 2024 16:24:26 +0530 Subject: [PATCH 099/117] remoteproc: qcom: pas: Fix rproc_config_check Introduce delay only if read value is mis-matched with requested state. Change-Id: I5c35706bcfe00151b09e90f20b8e14e9a8842643 Signed-off-by: Kamati Srinivas --- drivers/remoteproc/qcom_q6v5_pas.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index a117ce953b82..b8150aa5e930 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -838,7 +838,7 @@ static int rproc_config_check(struct qcom_adsp *adsp, u32 state, void *addr) do { usleep_range(SOCCP_SLEEP_US, SOCCP_SLEEP_US + 100); val = readl(addr); - } while (!(val && state) && --retry_num); + } while (!(val & state) && --retry_num); return (val & state) ? 0 : -ETIMEDOUT; } From fef9e81ee158327cd31c6ede6133c918ed7b027d Mon Sep 17 00:00:00 2001 From: Srinath Pandey Date: Mon, 19 Aug 2024 01:32:22 +0530 Subject: [PATCH 100/117] net: ethernet: stmmac: Add 2.5G Phy Support Add 2.5G support for SA8775 and enable CL45 read write through indirect read/write APIs. Change-Id: Ia71501f27429ff775a1b39a6754922047d30a44f Signed-off-by: Srinath Pandey --- drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 7 +++++++ drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 4 ++-- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 ++ drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 9 ++++++--- 4 files changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 7fd3fe6a83d6..695dfa5d1e88 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -2832,5 +2832,12 @@ module_init(qcom_ethqos_init_module) module_exit(qcom_ethqos_exit_module) +#if IS_ENABLED(CONFIG_AQUANTIA_PHY) +MODULE_SOFTDEP("post: aquantia"); +#endif +#if IS_ENABLED(CONFIG_MARVELL_PHY) +MODULE_SOFTDEP("post: marvell"); +#endif + MODULE_DESCRIPTION("Qualcomm ETHQOS driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d99fa028c646..2206390f7626 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -380,8 +380,8 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr, dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; - dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; - + // dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; + dma_cap->sphen = 0; dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; switch (dma_cap->addr64) { case 0: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 58123275b30e..988a492754ec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7479,6 +7479,8 @@ int stmmac_dvr_probe(struct device *device, ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED ndev->vlan_features |= ndev->hw_features; + priv->dma_cap.vlhash = 0; + priv->dma_cap.vlins = 0; /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; priv->dma_cap.vlhash = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index abf663371448..5d2d6d5654ba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -506,9 +506,12 @@ int stmmac_mdio_register(struct net_device *ndev) } else { err = new_bus->read(new_bus, phyaddr, MII_BMSR); if (err == -EBUSY || !err || err == 0xffff) { - dev_warn(dev, "Invalid PHY address read from dtsi: %d\n", - phyaddr); - new_bus->phy_mask = mdio_bus_data->phy_mask; + err = of_property_read_u32(np, "emac-cl45-phy-addr", &phyaddr); + new_bus->phy_mask = ~(1 << phyaddr); + skip_phy_detect = 1; + new_bus->read = &virtio_mdio_read_c45_indirect; + new_bus->write = &virtio_mdio_write_c45_indirect; + new_bus->probe_capabilities = MDIOBUS_C22_C45; } else { new_bus->phy_mask = ~(1 << phyaddr); skip_phy_detect = 1; From f1e0fdbcd55cf0437d4f94b54069b5965ec8f6d8 Mon Sep 17 00:00:00 2001 From: Urmila Pundalikrao Lakade Date: Fri, 23 Aug 2024 12:14:13 +0530 Subject: [PATCH 101/117] clk: qcom: gcc-mdm9607: Fix cmd_rcgr offset for blsp1_uart6_apps_clk_src Fix cmd_rcgr offset for blsp1_uart6_apps_clk_src on mdm9607 platform. Change-Id: Iddba14caa2f59cba6d2723cbed98410930f3a8c2 Signed-off-by: Urmila Pundalikrao Lakade --- drivers/clk/qcom/gcc-mdm9607.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c index d2424e322f09..e4f57b03d0dc 100644 --- a/drivers/clk/qcom/gcc-mdm9607.c +++ b/drivers/clk/qcom/gcc-mdm9607.c @@ -632,7 +632,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = { }; static struct clk_rcg2 blsp1_uart6_apps_clk_src = { - .cmd_rcgr = 0x6044, + .cmd_rcgr = 0x7044, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, From 811947e6b5b75ca7f35596c49884a4002ca663a0 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Fri, 19 Jul 2024 08:06:36 -0700 Subject: [PATCH 102/117] q2spi-msm-geni: connect GSI doorbell after start channel operation Synchronous channel command 48 needed to connect doorbell signal after performing start channel operation to serve the doorbell signal from gsi. Also ensure doorbell buffers are mapped after doorbell connect cmd. Without this GSI FW corruptions seen during q2spi sleep wakeup sequence. Change-Id: Idf7a420e29eac3767d66492872d54c20bb657371 Signed-off-by: Chandana Kishori Chiluveru --- drivers/dma/qcom/msm_gpi.c | 33 +++++++++++++++++++++++++++++---- drivers/spi/q2spi-gsi.c | 6 ++++++ drivers/spi/q2spi-msm-geni.c | 31 ++++++++++++------------------- include/linux/msm_gpi.h | 8 ++++++++ 4 files changed, 55 insertions(+), 23 deletions(-) diff --git a/drivers/dma/qcom/msm_gpi.c b/drivers/dma/qcom/msm_gpi.c index a728e20655a8..18d69ed0f969 100644 --- a/drivers/dma/qcom/msm_gpi.c +++ b/drivers/dma/qcom/msm_gpi.c @@ -1772,11 +1772,40 @@ int gpi_terminate_channel(struct gpii_chan *gpii_chan) return ret; } +/* + * geni_gsi_connect_doorbell() - function to connect gsi doorbell + * @chan: gsi channel handle + * + * This function uses asynchronous channel command 48 to connect + * io_6 input from GSI interrupt input. + * + * Return: Returns success or failure + */ +int geni_gsi_connect_doorbell(struct dma_chan *chan) +{ + struct gpii_chan *gpii_chan = to_gpii_chan(chan); + struct gpii *gpii = gpii_chan->gpii; + int ret = 0; + + GPII_VERB(gpii, gpii_chan->chid, "Enter\n"); + ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ENABLE_HID); + if (ret) { + GPII_ERR(gpii, gpii_chan->chid, "Error enable Chan:%d HID interrupt\n", ret); + gpi_dump_debug_reg(gpii); + } + + return ret; +} +EXPORT_SYMBOL_GPL(geni_gsi_connect_doorbell); + /* * geni_gsi_disconnect_doorbell_stop_ch() - function to disconnect gsi doorbell and stop channel * @chan: gsi channel handle * @stop_ch: stop channel if set to true * + * This function uses asynchronous channel command 49 to dis-connect + * io_6 input from GSI interrupt input. + * * Return: Returns success or failure */ int geni_gsi_disconnect_doorbell_stop_ch(struct dma_chan *chan, bool stop_ch) @@ -1786,10 +1815,6 @@ int geni_gsi_disconnect_doorbell_stop_ch(struct dma_chan *chan, bool stop_ch) int ret = 0; bool error = false; - /* - * Use asynchronous channel command 49 (see section 3.10.7) to dis-connect - * io_6 input from GSI interrupt input. - */ GPII_VERB(gpii, gpii_chan->chid, "Enter\n"); ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_DISABLE_HID); if (ret) { diff --git a/drivers/spi/q2spi-gsi.c b/drivers/spi/q2spi-gsi.c index ee4a9b225b2c..7f7e4a1630b1 100644 --- a/drivers/spi/q2spi-gsi.c +++ b/drivers/spi/q2spi-gsi.c @@ -708,6 +708,7 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void * case MSM_GPI_QUP_CR_HEADER: /* Update last access time of a device for autosuspend */ pm_runtime_mark_last_busy(q2spi->dev); + q2spi->gsi->qup_gsi_err = false; q2spi_cr_hdr_event = &cb->q2spi_cr_header_event; num_crs = q2spi_cr_hdr_event->byte0_len; if (q2spi_cr_hdr_event->code == Q2SPI_CR_HEADER_LEN_ZERO || @@ -744,6 +745,11 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void * if (cb->cb_event == MSM_GPI_QUP_ERROR) q2spi->gsi->qup_gsi_global_err = true; + if (cb->cb_event == MSM_GPI_QUP_FW_ERROR) { + q2spi_geni_se_dump_regs(q2spi); + gpi_dump_for_geni(q2spi->gsi->tx_c); + } + if (q2spi->gsi->qup_gsi_err) Q2SPI_DEBUG(q2spi, "%s set qup_gsi_err\n", __func__); } diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index dc8b1613cd3d..adaceda5c111 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -1977,15 +1977,16 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re return ret; } else if (ret == -ETIMEDOUT) { /* Upon transfer failure's retry here */ - Q2SPI_DEBUG(q2spi, "%s ret:%d retry_count:%d retrying cur_q2spi_pkt:%p\n", - __func__, ret, i + 1, cur_q2spi_pkt); + Q2SPI_DEBUG(q2spi, "%s ret:%d retry_count:%d q2spi_pkt:%p db_pending:%d\n", + __func__, ret, i + 1, cur_q2spi_pkt, + atomic_read(&q2spi->doorbell_pending)); if (q2spi->gsi->qup_gsi_global_err) { Q2SPI_DEBUG(q2spi, "%s GSI global error, No retry\n", __func__); ret = -EIO; goto transfer_exit; } - if (i == 0) { + if (i == 0 && !atomic_read(&q2spi->doorbell_pending)) { ret = q2spi_wakeup_hw_from_sleep(q2spi); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_wakeup_hw_from_sleep\n", @@ -2212,7 +2213,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t return -EINVAL; } q2spi = filp->private_data; - Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DEBUG(q2spi, "In %s Enter PID=%d\n", __func__, current->pid); mutex_lock(&q2spi->port_lock); ret = q2spi_transfer_check(q2spi, &q2spi_req, buf, len); @@ -2784,9 +2785,6 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt) if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret); atomic_set(&q2spi->sma_wr_pending, 0); - atomic_set(&q2spi->doorbell_pending, 0); - q2spi_geni_se_dump_regs(q2spi); - gpi_dump_for_geni(q2spi->gsi->tx_c); del_timer_sync(&q2spi->slave_sleep_timer); goto unmap_buf; } @@ -2797,9 +2795,6 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt) Q2SPI_DEBUG(q2spi, "%s PID:%d Err completion timeout: %d\n", __func__, current->pid, ret); atomic_set(&q2spi->sma_wr_pending, 0); - atomic_set(&q2spi->doorbell_pending, 0); - q2spi_geni_se_dump_regs(q2spi); - gpi_dump_for_geni(q2spi->gsi->tx_c); del_timer_sync(&q2spi->slave_sleep_timer); goto unmap_buf; } @@ -3102,14 +3097,11 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) q2spi_pkt->var5_pkt->flow_id); } } - if (!cm_flow_pkt && atomic_read(&q2spi->doorbell_pending)) { - atomic_inc(&q2spi->retry); - Q2SPI_DEBUG(q2spi, "%s doorbell pending retry\n", __func__); - complete_all(&q2spi_pkt->bulk_wait); - q2spi_unmap_var_bufs(q2spi, q2spi_pkt); - ret = -EAGAIN; - goto send_msg_exit; - } + + if (!cm_flow_pkt && atomic_read(&q2spi->doorbell_pending)) + Q2SPI_DEBUG(q2spi, "%s cm_flow_pkt:%d doorbell_pending:%d\n", + __func__, cm_flow_pkt, atomic_read(&q2spi->doorbell_pending)); + ret = q2spi_gsi_submit(q2spi_pkt); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret); @@ -4619,7 +4611,7 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi) return ret; } geni_gsi_ch_start(q2spi->gsi->tx_c); - + geni_gsi_connect_doorbell(q2spi->gsi->tx_c); ret = q2spi_map_doorbell_rx_buf(q2spi); return ret; } @@ -4769,6 +4761,7 @@ static int q2spi_geni_runtime_resume(struct device *dev) Q2SPI_DEBUG(q2spi, "%s Failed to set IRQ wake\n", __func__); geni_gsi_ch_start(q2spi->gsi->tx_c); + geni_gsi_connect_doorbell(q2spi->gsi->tx_c); /* Clear is_suspend to map doorbell buffers */ atomic_set(&q2spi->is_suspend, 0); diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h index dac16f3d4157..1f64a2ed1e8f 100644 --- a/include/linux/msm_gpi.h +++ b/include/linux/msm_gpi.h @@ -470,6 +470,14 @@ int gsi_common_tx_tre_optimization(struct gsi_common *gsi, u32 num_xfers, u32 nu */ int geni_gsi_ch_start(struct dma_chan *chan); +/** + * geni_gsi_connect_doorbell() - function to connect gsi doorbell + * @chan: dma channel handle + * + * Return: Returns success or failure + */ +int geni_gsi_connect_doorbell(struct dma_chan *chan); + /** * geni_gsi_disconnect_doorbell_stop_ch() - function to disconnect gsi doorbell and stop channel * @chan: dma channel handle From 2875185c00ca856be842c0ed182e91f6c7549012 Mon Sep 17 00:00:00 2001 From: Anil Veshala Veshala Date: Mon, 5 Aug 2024 04:21:13 -0700 Subject: [PATCH 103/117] q2spi-msm-geni: skip terminate sequence during start sequence failed When SOC in sleep state, q2spi first transfer will be failed with start sequence timed out status. As part of timeout host performs terminate sequence, once terminate sequence is done, GSI-FW can serve the doorbell any point of time, which is leading to race conditions. To solve this skipped terminate sequence during start sequence failure case. Also modified SW sequence as per recommendation by HPG. Change-Id: I8ccbb93d45b2fe6da67bc36086691666e34cb0db Signed-off-by: Chandana Kishori Chiluveru Signed-off-by: Anil Veshala Veshala --- drivers/spi/q2spi-gsi.c | 44 +++++++++++++++++++++++++++++++++--- drivers/spi/q2spi-msm-geni.c | 14 +++++++----- drivers/spi/q2spi-msm.h | 14 ++++++++++++ 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/drivers/spi/q2spi-gsi.c b/drivers/spi/q2spi-gsi.c index 7f7e4a1630b1..1e233c97d07c 100644 --- a/drivers/spi/q2spi-gsi.c +++ b/drivers/spi/q2spi-gsi.c @@ -106,6 +106,40 @@ static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb co q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event); } +/* + * q2spi_check_m_irq_err_status() - this function checks m_irq error status and + * also if start sequence error seen it will set is_start_seq_fail flag as true. + * + * @q2spi: q2spi master device handle + * @cb_status: irq status fields + * + * Return: None + */ +static void q2spi_check_m_irq_err_status(struct q2spi_geni *q2spi, u32 cb_status) +{ + /* bit 5 to 12 represents gp irq status */ + u32 status = (cb_status & M_GP_IRQ_MASK) >> M_GP_IRQ_ERR_START_BIT; + + if (status & Q2SPI_PWR_ON_NACK) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_PWR_ON_NACK\n", __func__); + if (status & Q2SPI_HDR_FAIL) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_HDR_FAIL\n", __func__); + if (status & Q2SPI_HCR_FAIL) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_HCR_FAIL\n", __func__); + if (status & Q2SPI_CHECKSUM_FAIL) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_CHEKSUM_FAIL\n", __func__); + if (status & Q2SPI_START_SEQ_TIMEOUT) { + q2spi->is_start_seq_fail = true; + Q2SPI_DEBUG(q2spi, "%s Q2SPI_START_SEQ_TIMEOUT\n", __func__); + } + if (status & Q2SPI_STOP_SEQ_TIMEOUT) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_STOP_SEQ_TIMEOUT\n", __func__); + if (status & Q2SPI_WAIT_PHASE_TIMEOUT) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_WAIT_PHASE_TIMEOUT\n", __func__); + if (status & Q2SPI_CLIENT_EN_NOT_DETECTED) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_CLIENT_EN_NOT_DETECTED\n", __func__); +} + static void q2spi_gsi_tx_callback(void *cb) { struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL; @@ -131,6 +165,9 @@ static void q2spi_gsi_tx_callback(void *cb) if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) { Q2SPI_DEBUG(q2spi, "%s Unexpected GSI CB completion code CB status:0x%x\n", __func__, cb_param->status); + q2spi->gsi->qup_gsi_err = true; + q2spi_check_m_irq_err_status(q2spi, cb_param->status); + complete_all(&q2spi->tx_cb); return; } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) { Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); @@ -470,7 +507,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s PID:%d Tx[%d] timeout\n", __func__, current->pid, i); ret = -ETIMEDOUT; goto err_gsi_geni_transfer; - } else { + } else if (!q2spi->gsi->qup_gsi_err) { Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__); } } @@ -482,7 +519,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s PID:%d Rx[%d] timeout\n", __func__, current->pid, i); ret = -ETIMEDOUT; goto err_gsi_geni_transfer; - } else { + } else if (!q2spi->gsi->qup_gsi_err) { Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__); } } @@ -492,7 +529,8 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s Err QUP Gsi Error\n", __func__); q2spi->gsi->qup_gsi_err = false; q2spi->setup_config0 = false; - gpi_q2spi_terminate_all(q2spi->gsi->tx_c); + if (!q2spi->is_start_seq_fail) + gpi_q2spi_terminate_all(q2spi->gsi->tx_c); } return ret; } diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index adaceda5c111..c7c8eb2cbeef 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -1986,7 +1986,9 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re goto transfer_exit; } - if (i == 0 && !atomic_read(&q2spi->doorbell_pending)) { + if (i == 0 && !atomic_read(&q2spi->doorbell_pending) && + q2spi->is_start_seq_fail) { + q2spi->is_start_seq_fail = false; ret = q2spi_wakeup_hw_from_sleep(q2spi); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_wakeup_hw_from_sleep\n", @@ -2270,6 +2272,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t pm_runtime_set_suspended(q2spi->dev); goto err; } + q2spi->is_start_seq_fail = false; Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); q2spi_wait_for_doorbell_setup_ready(q2spi); @@ -4570,8 +4573,6 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s Sending disconnect doorbell only\n", __func__); atomic_set(&q2spi->slave_in_sleep, 0); - geni_gsi_disconnect_doorbell_stop_ch(q2spi->gsi->tx_c, true); - q2spi_unmap_doorbell_rx_buf(q2spi); ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_default); if (ret) { @@ -4610,9 +4611,10 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi) __func__, ret); return ret; } - geni_gsi_ch_start(q2spi->gsi->tx_c); - geni_gsi_connect_doorbell(q2spi->gsi->tx_c); - ret = q2spi_map_doorbell_rx_buf(q2spi); + + /* add necessary delay to wake up the soc */ + usleep_range(5000, 6000); + gpi_q2spi_terminate_all(q2spi->gsi->tx_c); return ret; } diff --git a/drivers/spi/q2spi-msm.h b/drivers/spi/q2spi-msm.h index 2745e7297073..e25863cb1169 100644 --- a/drivers/spi/q2spi-msm.h +++ b/drivers/spi/q2spi-msm.h @@ -133,6 +133,18 @@ #define SE_SPI_RX_TRANS_LEN 0x270 #define TRANS_LEN_MSK GENMASK(23, 0) +/* GENI General Purpose Interrupt Status */ +#define M_GP_IRQ_ERR_START_BIT 5 +#define M_GP_IRQ_MASK GENMASK(12, 5) +#define Q2SPI_PWR_ON_NACK BIT(0) +#define Q2SPI_HDR_FAIL BIT(1) +#define Q2SPI_HCR_FAIL BIT(2) +#define Q2SPI_CHECKSUM_FAIL BIT(3) +#define Q2SPI_START_SEQ_TIMEOUT BIT(4) +#define Q2SPI_STOP_SEQ_TIMEOUT BIT(5) +#define Q2SPI_WAIT_PHASE_TIMEOUT BIT(6) +#define Q2SPI_CLIENT_EN_NOT_DETECTED BIT(7) + /* HRF FLOW Info */ #define HRF_ENTRY_OPCODE 3 #define HRF_ENTRY_TYPE 3 @@ -518,6 +530,7 @@ struct q2spi_dma_transfer { * @q2spi_sleep_cmd_enable: reflects start sending the sleep command to slave * @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header * @slave_sleep_lock: lock to wait for 3msec after sleep packet before initiating next transfer. + * @is_start_seq_fail: start sequence fail due to slave not responding */ struct q2spi_geni { struct device *wrapper_dev; @@ -626,6 +639,7 @@ struct q2spi_geni { bool q2spi_cr_hdr_err; /* lock to protect sleep cmd to slave and next transfer */ struct mutex slave_sleep_lock; + bool is_start_seq_fail; }; /** From e4b373f66ac7170e11090e83838f21e2e54d7428 Mon Sep 17 00:00:00 2001 From: Visweswara Tanuku Date: Thu, 15 Aug 2024 07:38:06 -0700 Subject: [PATCH 104/117] q2spi-msm-geni: Prevent double free of q2spi_req data_buff pointer When allocation of tid fails, since q2spi_add_req_to_tx_queue accepts q2spi_req as value its leading to double free of data_buff in q2spi_transfer. Pass q2spi_req by reference to q2spi_add_req_to_tx_queue to avoid double free of pointer. Change-Id: Ic2d984ce4a54da33016dad805ec74b9f4bc53f37 Signed-off-by: Visweswara Tanuku --- drivers/spi/q2spi-msm-geni.c | 40 ++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index c7c8eb2cbeef..5687c40eb5e3 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -1255,11 +1255,12 @@ q2spi_get_dw_offset(struct q2spi_geni *q2spi, enum cmd_type c_type, unsigned int return offset; } -int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, +int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_ptr, struct q2spi_packet **q2spi_pkt_ptr, int vtype) { struct q2spi_packet *q2spi_pkt; struct q2spi_host_variant1_pkt *q2spi_hc_var1; + struct q2spi_request q2spi_req = *q2spi_req_ptr; int ret; unsigned int dw_offset = 0; @@ -1288,7 +1289,7 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, sizeof(q2spi_hc_var1->data_buf) : q2spi_req.data_len; memcpy(q2spi_hc_var1->data_buf, q2spi_req.data_buff, q2spi_req.data_len); q2spi_kfree(q2spi, q2spi_req.data_buff, __LINE__); - q2spi_req.data_buff = NULL; + q2spi_req_ptr->data_buff = NULL; } q2spi_hc_var1->flow = MC_FLOW; q2spi_hc_var1->interrupt = CLIENT_INTERRUPT; @@ -1324,10 +1325,11 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, return q2spi_hc_var1->flow_id; } -int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, +int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_ptr, struct q2spi_packet *q2spi_pkt) { struct q2spi_host_variant4_5_pkt *q2spi_hc_var5; + struct q2spi_request q2spi_req = *q2spi_req_ptr; int ret = 0, flow_id; if (!q2spi) { @@ -1386,7 +1388,7 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, q2spi_dump_ipc(q2spi, "sma format var5 data_buf", (char *)q2spi_hc_var5->data_buf, q2spi_req.data_len); q2spi_kfree(q2spi, q2spi_req.data_buff, __LINE__); - q2spi_req.data_buff = NULL; + q2spi_req_ptr->data_buff = NULL; } if (q2spi_req.flow_id < Q2SPI_END_TID_ID) q2spi_hc_var5->flow = MC_FLOW; @@ -1528,7 +1530,7 @@ int q2spi_hrf_sleep(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, Q2SPI_DEBUG(q2spi, "%s hrf_req cmd:%d flow_id:%d data_buff:%p\n", __func__, q2spi_hrf_req->cmd, q2spi_hrf_req->flow_id, q2spi_hrf_req->data_buff); - ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt, VARIANT_1_LRA); + ret = q2spi_frame_lra(q2spi, q2spi_hrf_req, &q2spi_pkt, VARIANT_1_LRA); Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", __func__, q2spi_hrf_req, q2spi_pkt); if (ret < 0) { @@ -1562,7 +1564,7 @@ int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, Q2SPI_DEBUG(q2spi, "%s addr:0x%x proto:0x%x data_len:0x%x\n", __func__, q2spi_req.addr, q2spi_req.proto_ind, q2spi_req.data_len); - ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt, VARIANT_1_HRF); + ret = q2spi_frame_lra(q2spi, q2spi_hrf_req, &q2spi_pkt, VARIANT_1_HRF); Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", __func__, q2spi_hrf_req, q2spi_pkt); if (ret < 0) { @@ -1571,7 +1573,7 @@ int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, } q2spi_pkt->flow_id = ret; - ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt); + ret = q2spi_sma_format(q2spi, &q2spi_req, q2spi_pkt); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_sma_format failed ret:%d\n", __func__, ret); q2spi_unmap_var_bufs(q2spi, q2spi_pkt); @@ -1658,6 +1660,7 @@ bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet * /* * q2spi_add_req_to_tx_queue - Add q2spi packets to tx_queue_list * @q2spi: pointer to q2spi_geni + * @q2spi_req_ptr: pointer to q2spi_request * @q2spi_pkt_ptr: ponter to q2spi_packet * * This function frames the Q2SPI host request based on request type @@ -1665,10 +1668,11 @@ bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet * * * Return: 0 on success. Error code on failure. */ -int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, +int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_ptr, struct q2spi_packet **q2spi_pkt_ptr) { struct q2spi_packet *q2spi_pkt = NULL; + struct q2spi_request q2spi_req = *q2spi_req_ptr; int ret = -EINVAL; if (q2spi->port_release) { @@ -1679,7 +1683,7 @@ int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2s q2spi_tx_queue_status(q2spi); q2spi_print_req_cmd(q2spi, q2spi_req); if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == LOCAL_REG_WRITE) { - ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA); + ret = q2spi_frame_lra(q2spi, q2spi_req_ptr, &q2spi_pkt, VARIANT_1_LRA); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); @@ -1690,7 +1694,7 @@ int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2s q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__); if (!q2spi_pkt) return -ENOMEM; - ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt); + ret = q2spi_sma_format(q2spi, q2spi_req_ptr, q2spi_pkt); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_sma_format failed ret:%d\n", __func__, ret); @@ -2029,7 +2033,7 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re q2spi_req.data_buff = data_buf; } mutex_lock(&q2spi->queue_lock); - flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &cur_q2spi_pkt); + flow_id = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &cur_q2spi_pkt); mutex_unlock(&q2spi->queue_lock); if (flow_id < 0) { q2spi_kfree(q2spi, data_buf, __LINE__); @@ -2074,7 +2078,7 @@ void q2spi_transfer_abort(struct q2spi_geni *q2spi) abort_request.cmd = ABORT; abort_request.sync = 1; mutex_lock(&q2spi->queue_lock); - ret = q2spi_add_req_to_tx_queue(q2spi, abort_request, &cur_q2spi_abort_pkt); + ret = q2spi_add_req_to_tx_queue(q2spi, &abort_request, &cur_q2spi_abort_pkt); mutex_unlock(&q2spi->queue_lock); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_add_req_to_tx_queue ret:%d\n", __func__, ret); @@ -2107,7 +2111,7 @@ void q2spi_transfer_soft_reset(struct q2spi_geni *q2spi) soft_reset_request.cmd = SOFT_RESET; soft_reset_request.sync = 1; mutex_lock(&q2spi->queue_lock); - ret = q2spi_add_req_to_tx_queue(q2spi, soft_reset_request, &cur_q2spi_sr_pkt); + ret = q2spi_add_req_to_tx_queue(q2spi, &soft_reset_request, &cur_q2spi_sr_pkt); mutex_unlock(&q2spi->queue_lock); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_add_req_to_tx_queue ret:%d\n", __func__, ret); @@ -2278,7 +2282,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t q2spi_wait_for_doorbell_setup_ready(q2spi); mutex_lock(&q2spi->queue_lock); reinit_completion(&q2spi->sma_wr_comp); - flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &cur_q2spi_pkt); + flow_id = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &cur_q2spi_pkt); mutex_unlock(&q2spi->queue_lock); if (flow_id < 0) { if (q2spi_req.data_buff) @@ -3537,7 +3541,7 @@ int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset) q2spi_req.data_len = 4; /* In bytes */ Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p &q2spi_pkt=%p\n", __func__, q2spi_pkt, &q2spi_pkt); - ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA); + ret = q2spi_frame_lra(q2spi, &q2spi_req, &q2spi_pkt, VARIANT_1_LRA); Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p flow_id:%d\n", __func__, q2spi_pkt, ret); if (ret < 0) { Q2SPI_DEBUG(q2spi, "Err q2spi_frame_lra failed ret:%d\n", ret); @@ -3597,7 +3601,7 @@ static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned lo q2spi_req.addr = reg_offset; q2spi_req.data_len = 4; q2spi_req.data_buff = &data; - ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA); + ret = q2spi_frame_lra(q2spi, &q2spi_req, &q2spi_pkt, VARIANT_1_LRA); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); return ret; @@ -3794,7 +3798,7 @@ int q2spi_send_system_mem_access(struct q2spi_geni *q2spi, struct q2spi_packet * q2spi_req.sync = 0; while (retries--) { mutex_lock(&q2spi->queue_lock); - ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, q2spi_pkt); + ret = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, q2spi_pkt); mutex_unlock(&q2spi->queue_lock); if (ret == -ENOMEM) { Q2SPI_DEBUG(q2spi, "%s Err ret:%d\n", __func__, ret); @@ -4649,7 +4653,7 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) q2spi_req.sync = 1; mutex_lock(&q2spi->queue_lock); - ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &q2spi_pkt); + ret = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &q2spi_pkt); mutex_unlock(&q2spi->queue_lock); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err failed ret:%d\n", __func__, ret); From 33bf11a9b4ea5b2b55b392040ffa57eb74f4ea40 Mon Sep 17 00:00:00 2001 From: Visweswara Tanuku Date: Thu, 15 Aug 2024 06:00:14 -0700 Subject: [PATCH 105/117] q2spi-msm-geni: Add 2msec delay for slave to complete sleep state Add 2msec delay for slave to complete sleep state after it received a sleep packet from host. If slave is going to sleep, any data packet from host to slave while slave's sleep process is ongoing will result in unwanted failures. Slave expects minimum 1msec to complete its sleep process. Hence added delay of 2msecs to let slave complete its sleep process. Change-Id: I9d444ce420580f3b7c8eb582fdaa28e3b0a10bda Signed-off-by: Visweswara Tanuku --- drivers/spi/q2spi-msm-geni.c | 18 +++--------------- drivers/spi/q2spi-msm.h | 4 ---- 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index 5687c40eb5e3..973e5c8d685c 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -2207,7 +2207,6 @@ static int q2spi_transfer_check(struct q2spi_geni *q2spi, struct q2spi_request * */ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t len, loff_t *f_pos) { - int retries = Q2SPI_SLAVE_SLEEP_WAIT_TIME; struct q2spi_geni *q2spi; struct q2spi_request q2spi_req; struct q2spi_packet *cur_q2spi_pkt; @@ -2226,14 +2225,6 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t if (ret) goto err; - while (retries--) { - /* add 2msec delay for slave to process the sleep packet */ - if (mutex_is_locked(&q2spi->slave_sleep_lock)) - usleep_range(100, 150); - else - break; - } - if (q2spi_req.cmd == HRF_WRITE) { q2spi_req.addr = Q2SPI_HRF_PUSH_ADDRESS; q2spi_req.sync = 1; @@ -3124,6 +3115,9 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) atomic_set(&q2spi->sma_rd_pending, 0); } + /* add 2msec delay for slave to complete sleep process after it received a sleep packet */ + if (q2spi_pkt->is_client_sleep_pkt) + usleep_range(2000, 3000); send_msg_exit: mutex_unlock(&q2spi->send_msgs_lock); if (atomic_read(&q2spi->sma_rd_pending)) @@ -4402,7 +4396,6 @@ static int q2spi_geni_probe(struct platform_device *pdev) INIT_LIST_HEAD(&q2spi->tx_queue_list); mutex_init(&q2spi->gsi_lock); mutex_init(&q2spi->port_lock); - mutex_init(&q2spi->slave_sleep_lock); spin_lock_init(&q2spi->txn_lock); mutex_init(&q2spi->queue_lock); mutex_init(&q2spi->send_msgs_lock); @@ -4661,14 +4654,12 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) } Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid); q2spi_pkt->is_client_sleep_pkt = true; - mutex_lock(&q2spi->slave_sleep_lock); ret = __q2spi_transfer(q2spi, q2spi_req, q2spi_pkt, 0); if (ret) { Q2SPI_DEBUG(q2spi, "%s __q2spi_transfer q2spi_pkt:%p ret%d\n", __func__, q2spi_pkt, ret); if (q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s Err Port in closed state, return\n", __func__); - mutex_unlock(&q2spi->slave_sleep_lock); return -ENOENT; } } @@ -4677,9 +4668,6 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt); q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__); atomic_set(&q2spi->slave_in_sleep, 1); - /* add 2msec delay for slave to process the sleep packet */ - usleep_range(2000, 3000); - mutex_unlock(&q2spi->slave_sleep_lock); Q2SPI_DEBUG(q2spi, "%s: PID=%d End slave_in_sleep:%d\n", __func__, current->pid, atomic_read(&q2spi->slave_in_sleep)); err: diff --git a/drivers/spi/q2spi-msm.h b/drivers/spi/q2spi-msm.h index e25863cb1169..83cf08b298da 100644 --- a/drivers/spi/q2spi-msm.h +++ b/drivers/spi/q2spi-msm.h @@ -196,7 +196,6 @@ #define Q2SPI_MAX_DEV 2 #define Q2SPI_DEV_NAME_MAX_LEN 64 -#define Q2SPI_SLAVE_SLEEP_WAIT_TIME (20) #define Q2SPI_RESP_BUF_RETRIES (100) #define Q2SPI_INFO(q2spi_ptr, x...) do { \ @@ -529,7 +528,6 @@ struct q2spi_dma_transfer { * @q2spi_cr_txn_err: reflects Q2SPI_CR_TRANSACTION_ERROR in CR body * @q2spi_sleep_cmd_enable: reflects start sending the sleep command to slave * @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header - * @slave_sleep_lock: lock to wait for 3msec after sleep packet before initiating next transfer. * @is_start_seq_fail: start sequence fail due to slave not responding */ struct q2spi_geni { @@ -637,8 +635,6 @@ struct q2spi_geni { bool q2spi_cr_txn_err; bool q2spi_sleep_cmd_enable; bool q2spi_cr_hdr_err; - /* lock to protect sleep cmd to slave and next transfer */ - struct mutex slave_sleep_lock; bool is_start_seq_fail; }; From 2c81d5636846391d2ad565259c074631953c5a99 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Tue, 20 Aug 2024 04:55:13 -0700 Subject: [PATCH 106/117] q2spi-msm-geni: Perform GSI terminate sequence for sleep packet Currently GSI terminate sequence not executed when sleep packet fails with start sequence timeout and leading to GSI failures. Perform q2spi gsi terminate sequence for sleep packet failures. Change-Id: Ic9135e5eed9e8dbb616d796bc13ba5dfb0be9e66 Signed-off-by: Chandana Kishori Chiluveru --- drivers/spi/q2spi-msm-geni.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index 973e5c8d685c..a5cb66cd941c 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -4630,34 +4630,42 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s: PID=%d q2spi_sleep_cmd_enable:%d\n", __func__, current->pid, q2spi->q2spi_sleep_cmd_enable); + if (!q2spi->q2spi_sleep_cmd_enable) return 0; - if (atomic_read(&q2spi->slave_in_sleep)) { - Q2SPI_DEBUG(q2spi, "%s: Client in sleep\n", __func__); - return 0; - } if (mutex_is_locked(&q2spi->port_lock) || q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s: port_lock acquired or release is in progress\n", __func__); return 0; } + mutex_lock(&q2spi->queue_lock); + if (atomic_read(&q2spi->slave_in_sleep)) { + Q2SPI_DEBUG(q2spi, "%s: Client in sleep\n", __func__); + mutex_unlock(&q2spi->queue_lock); + return 0; + } + atomic_set(&q2spi->slave_in_sleep, 1); + q2spi_req.cmd = Q2SPI_HRF_SLEEP_CMD; q2spi_req.sync = 1; - mutex_lock(&q2spi->queue_lock); ret = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &q2spi_pkt); mutex_unlock(&q2spi->queue_lock); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err failed ret:%d\n", __func__, ret); - goto err; + atomic_set(&q2spi->slave_in_sleep, 0); + return ret; } + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid); q2spi_pkt->is_client_sleep_pkt = true; ret = __q2spi_transfer(q2spi, q2spi_req, q2spi_pkt, 0); if (ret) { - Q2SPI_DEBUG(q2spi, "%s __q2spi_transfer q2spi_pkt:%p ret%d\n", - __func__, q2spi_pkt, ret); + atomic_set(&q2spi->slave_in_sleep, 0); + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p ret: %d\n", __func__, q2spi_pkt, ret); + if (ret == -ETIMEDOUT) + gpi_q2spi_terminate_all(q2spi->gsi->tx_c); if (q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s Err Port in closed state, return\n", __func__); return -ENOENT; @@ -4667,10 +4675,8 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid); q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt); q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__); - atomic_set(&q2spi->slave_in_sleep, 1); Q2SPI_DEBUG(q2spi, "%s: PID=%d End slave_in_sleep:%d\n", __func__, current->pid, atomic_read(&q2spi->slave_in_sleep)); -err: return ret; } From b70cdcf3aad630af2d4e57cd67ed30d273ec3dee Mon Sep 17 00:00:00 2001 From: Srinath Pandey Date: Wed, 28 Aug 2024 01:27:29 +0530 Subject: [PATCH 107/117] defconfig: autogvm: Enable Aquantia PHY Enable CONFIG_AQUANTIA_PHY to support Aquantia phy in LA Guest. Change-Id: I0823019f287974187b917193215bc0f389527920 Signed-off-by: Srinath Pandey --- arch/arm64/configs/vendor/autogvm_GKI.config | 1 + autogvm.bzl | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/autogvm_GKI.config b/arch/arm64/configs/vendor/autogvm_GKI.config index 1ba391e219c6..e650791682ff 100644 --- a/arch/arm64/configs/vendor/autogvm_GKI.config +++ b/arch/arm64/configs/vendor/autogvm_GKI.config @@ -1,3 +1,4 @@ +CONFIG_AQUANTIA_PHY=m CONFIG_ARCH_QCOM=y CONFIG_ARM_PARAVIRT_SMMU_V3=m CONFIG_ARM_SMMU=m diff --git a/autogvm.bzl b/autogvm.bzl index 57f6aa337ae1..fab19d77e27c 100644 --- a/autogvm.bzl +++ b/autogvm.bzl @@ -54,6 +54,7 @@ def define_autogvm(): "drivers/net/mdio/mdio-mux.ko", "drivers/net/net_failover.ko", "drivers/net/pcs/pcs_xpcs.ko", + "drivers/net/phy/aquantia.ko", "drivers/net/phy/marvell.ko", "drivers/net/virtio_net.ko", "drivers/pci/controller/pci-msm-drv.ko", From 37b027fb58fc1debbbd1f29a5dfa9e18dda26007 Mon Sep 17 00:00:00 2001 From: Uttkarsh Aggarwal Date: Wed, 19 Jun 2024 11:17:35 +0530 Subject: [PATCH 108/117] usb: dwc3: dwc3-msm-core: configured dp/dm irqs - Ensure proper configuration of USB wakeup interrupts for DP_HS_PHY_IRQ and DM_HS_PHY_IRQ. - Apply level high trigger when in host mode without a connected device. - Use edge rising trigger otherwise. Change-Id: I5962baa53c5170c61bca7be389d38bf63894caea Signed-off-by: Uttkarsh Aggarwal --- drivers/usb/dwc3/dwc3-msm-core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/dwc3-msm-core.c b/drivers/usb/dwc3/dwc3-msm-core.c index bb0aafb804f4..26db1e20ee4b 100644 --- a/drivers/usb/dwc3/dwc3-msm-core.c +++ b/drivers/usb/dwc3/dwc3-msm-core.c @@ -3942,12 +3942,14 @@ static void configure_usb_wakeup_interrupts(struct dwc3_msm *mdwc, bool enable) */ configure_usb_wakeup_interrupt(mdwc, &mdwc->wakeup_irq[DP_HS_PHY_IRQ], - mdwc->in_host_mode ? + mdwc->in_host_mode && !(mdwc->use_pwr_event_for_wakeup + & PWR_EVENT_HS_WAKEUP) ? (IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH) : IRQ_TYPE_EDGE_RISING, true); configure_usb_wakeup_interrupt(mdwc, &mdwc->wakeup_irq[DM_HS_PHY_IRQ], - mdwc->in_host_mode ? + mdwc->in_host_mode && !(mdwc->use_pwr_event_for_wakeup + & PWR_EVENT_HS_WAKEUP) ? (IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH) : IRQ_TYPE_EDGE_RISING, true); } From b3bbfe7c4386b5044fef3e8016bd3f57c31b5f36 Mon Sep 17 00:00:00 2001 From: Srinath Pandey Date: Fri, 23 Aug 2024 18:31:25 +0530 Subject: [PATCH 109/117] net: stmmac: Re initialize Rx buffers reinit rx buffers in resume. Add support for driver remove\shutdown operation. Change-Id: I3df26647627817a574990c223382d8e5d4da4c03 Signed-off-by: Srinath Pandey --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 11 +++ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 84 +++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 7fd3fe6a83d6..6311f9a51b5a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -2460,6 +2460,16 @@ static int qcom_ethqos_remove(struct platform_device *pdev) return ret; } +static void qcom_ethqos_shutdown_main(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (!dev) + return; + + qcom_ethqos_remove(pdev); +} + static int qcom_ethqos_suspend(struct device *dev) { struct qcom_ethqos *ethqos; @@ -2775,6 +2785,7 @@ static const struct dev_pm_ops qcom_ethqos_pm_ops = { static struct platform_driver qcom_ethqos_driver = { .probe = qcom_ethqos_probe, .remove = qcom_ethqos_remove, + .shutdown = qcom_ethqos_shutdown_main, .driver = { .name = DRV_NAME, .pm = &qcom_ethqos_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 58123275b30e..e1153465046c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1821,6 +1821,88 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q return xsk_get_pool_from_qid(priv->dev, queue); } +static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int i; + + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (buf->page) { + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + } + + if (priv->sph && buf->sec_page) { + page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); + buf->sec_page = NULL; + } + } + } + + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + struct dma_desc *p; + + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + if (!buf->page) { + buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->page) + goto err_reinit_rx_buffers; + + buf->addr = page_pool_get_dma_addr(buf->page); + if (!buf->addr) { + pr_err("buf->addr is NULL\n"); + goto err_reinit_rx_buffers; + } + } + + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->sec_page) + goto err_reinit_rx_buffers; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + if (!buf->sec_addr) { + pr_err("buf->sec_addr is NULL\n"); + goto err_reinit_rx_buffers; + } + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + } else { + buf->sec_page = NULL; + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); + } + + stmmac_set_desc_addr(priv, p, buf->addr); + + if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) + stmmac_init_desc3(priv, p); + } + } + + return; + +err_reinit_rx_buffers: + pr_err(" error in reinit_rx_buffers\n"); + do { + dma_free_rx_skbufs(priv, dma_conf, queue); + if (queue == 0) + break; + } while (queue-- > 0); +} + /** * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) * @priv: driver private structure @@ -7834,6 +7916,8 @@ int stmmac_resume(struct device *dev) stmmac_reset_queues_param(priv); + stmmac_reinit_rx_buffers(priv, &priv->dma_conf); + stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv, &priv->dma_conf); From 85b2f4820c2177d3a15e7abaeeba35f58337461f Mon Sep 17 00:00:00 2001 From: Wesley Cheng Date: Tue, 9 Jul 2024 18:01:09 -0700 Subject: [PATCH 110/117] usb: dwc3: dwc3-msm-core: Switch to UTMI clk during host teardown After moving the flush_work() call after the usb_phy_notify_disconnect() to address USB type C compliance issues (LFPS generated during host teardown): commit fb3c680116db ("usb: dwc3: dwc3-msm-core: Notify PHY disconnect before doing flush_work") During USB device PIPO, the notify PHY disconnect call to the QMP PHY will cause the PHY to be powered down. As part of the stop host mode routine, the DWC3 core has to be placed back into device/peripheral mode. Some parts of the device mode initialization sequence, such as the core soft reset, requires that the PIPE clk (or controller source clock) be active, otherwise the core soft reset will time out. To mitigate the side effect, temporarily switch to the UTMI as the controller clock source, so that the PIPE clock can be powered off without any consequences. Once the move to DWC3 gadget mode is complete, re-enable the PIPE clock as the controller source. (after flush_work() is complete). Change-Id: I59de803d737581c0037348498b8447f872adb62f Signed-off-by: Wesley Cheng --- drivers/usb/dwc3/dwc3-msm-core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/usb/dwc3/dwc3-msm-core.c b/drivers/usb/dwc3/dwc3-msm-core.c index bb0aafb804f4..d53890a5a26c 100644 --- a/drivers/usb/dwc3/dwc3-msm-core.c +++ b/drivers/usb/dwc3/dwc3-msm-core.c @@ -7012,7 +7012,12 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) /* * Performing phy disconnect before flush work to * address TypeC certification--TD 4.7.4 failure. + * In order to avoid any controller start/halt + * sequences, switch to the UTMI as the clk source + * as the notify_disconnect() callback to the QMP + * PHY will power down the PIPE clock. */ + dwc3_msm_switch_utmi(mdwc, true); if (mdwc->ss_phy->flags & PHY_HOST_MODE) { usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER); @@ -7025,6 +7030,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) if (dwc->dr_mode == USB_DR_MODE_OTG) flush_work(&dwc->drd_work); + dwc3_msm_switch_utmi(mdwc, false); mdwc->hs_phy->flags &= ~PHY_HOST_MODE; usb_unregister_notify(&mdwc->host_nb); From 2e6653987c1118c5de65aa6622949d024e4ff3b2 Mon Sep 17 00:00:00 2001 From: Prashanth K Date: Fri, 28 Apr 2023 12:37:42 +0530 Subject: [PATCH 111/117] usb: gadget: f_gsi: bail out if opts is null Currently, functions gsi_inst_clean & gsi_free_inst utilises gsi_opts without any check, however there is a possibility that the opts structure could become NULL. In such case, due to lack of if checks can result in NULL pointer dereference. Change-Id: I548690e2eee377b5292f258972ae7e38417f3085 Signed-off-by: Prashanth K Signed-off-by: Udipto Goswami --- drivers/usb/gadget/function/f_gsi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index b6e6823e13c9..fc30b021e53b 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -3544,6 +3544,9 @@ static struct config_item_type gsi_func_rndis_type = { static void gsi_inst_clean(struct gsi_opts *opts) { + if (!opts) + return; + if (opts->gsi->c_port.cdev.dev) { struct cdev *cdev = &opts->gsi->c_port.cdev; int minor = MINOR(cdev->dev); @@ -3626,7 +3629,7 @@ static void gsi_free_inst(struct usb_function_instance *f) enum ipa_usb_teth_prot prot_id; struct f_gsi *gsi; - if (!opts->gsi) + if (!opts || !opts->gsi) return; prot_id = opts->gsi->prot_id; From a324ec868e7b6f17250eefb1857eb90df14801ff Mon Sep 17 00:00:00 2001 From: Chintan Kothari Date: Wed, 24 Jul 2024 14:45:04 +0530 Subject: [PATCH 112/117] ARM: config: msm: Enable config for buses drivers in NEO Enabling config for HS_UART,i2c,spi,gpi drivers for NEO soc. Change-Id: Id177635b376d3078999e296b792389eadd5a232d Signed-off-by: Chintan Kothari --- arch/arm64/configs/vendor/neo_la_GKI.config | 4 ++++ arch/arm64/configs/vendor/neo_la_consolidate.config | 1 + neo_la.bzl | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index 990b84c13322..f9ebf890cbf7 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -24,6 +24,7 @@ CONFIG_GH_VIRT_WATCHDOG=m CONFIG_GUNYAH_DRIVERS=y # CONFIG_HVC_GUNYAH is not set CONFIG_HWSPINLOCK_QCOM=m +CONFIG_I2C_MSM_GENI=m CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_INTERCONNECT_QCOM_BCM_VOTER=m CONFIG_INTERCONNECT_QCOM_DEBUG=m @@ -36,6 +37,7 @@ CONFIG_MFD_I2C_PMIC=m # CONFIG_MODULE_SIG_ALL is not set CONFIG_MSM_BOOT_STATS=m CONFIG_MSM_CORE_HANG_DETECT=m +CONFIG_MSM_GPI_DMA=m CONFIG_MSM_PERFORMANCE=m CONFIG_MSM_SYSSTATS=m CONFIG_PDR_INDICATION_NOTIF_TIMEOUT=9000 @@ -95,6 +97,8 @@ CONFIG_REGULATOR_QCOM_PM8008=m CONFIG_REGULATOR_QTI_FIXED_VOLTAGE=m CONFIG_REGULATOR_RPMH=m CONFIG_SCHED_WALT=m +CONFIG_SERIAL_MSM_GENI=m +CONFIG_SPI_MSM_GENI=m CONFIG_SXR_CAMCC_NEO=m CONFIG_SXR_DEBUGCC_NEO=m CONFIG_SXR_DISPCC_NEO=m diff --git a/arch/arm64/configs/vendor/neo_la_consolidate.config b/arch/arm64/configs/vendor/neo_la_consolidate.config index 83455e7a85ec..8c151a331548 100644 --- a/arch/arm64/configs/vendor/neo_la_consolidate.config +++ b/arch/arm64/configs/vendor/neo_la_consolidate.config @@ -9,6 +9,7 @@ CONFIG_LOCALVERSION="-consolidate" CONFIG_LOCKDEP=y CONFIG_LOCKUP_DETECTOR=y CONFIG_LOCK_STAT=y +CONFIG_MSM_GPI_DMA_DEBUG=y CONFIG_PAGE_POISONING=y CONFIG_PM_DEBUG=y CONFIG_PM_SLEEP_DEBUG=y diff --git a/neo_la.bzl b/neo_la.bzl index 87e7f3852e7a..0818b39e65a2 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -22,10 +22,12 @@ def define_neo_la(): "drivers/cpufreq/qcom-cpufreq-hw-debug.ko", "drivers/cpuidle/governors/qcom_lpm.ko", "drivers/dma-buf/heaps/qcom_dma_heaps.ko", + "drivers/dma/qcom/msm_gpi.ko", "drivers/edac/kryo_arm64_edac.ko", "drivers/edac/qcom_edac.ko", "drivers/firmware/qcom-scm.ko", "drivers/hwspinlock/qcom_hwspinlock.ko", + "drivers/i2c/busses/i2c-msm-geni.ko", "drivers/interconnect/qcom/icc-bcm-voter.ko", "drivers/interconnect/qcom/icc-debug.ko", "drivers/interconnect/qcom/icc-rpmh.ko", @@ -69,6 +71,8 @@ def define_neo_la(): "drivers/soc/qcom/secure_buffer.ko", "drivers/soc/qcom/smem.ko", "drivers/soc/qcom/socinfo.ko", + "drivers/spi/spi-msm-geni.ko", + "drivers/tty/serial/msm_geni_serial.ko", "drivers/virt/gunyah/gh_msgq.ko", "drivers/virt/gunyah/gh_rm_drv.ko", "drivers/virt/gunyah/gh_virt_wdt.ko", From 573d0730c518215b97517f836cd46737ad5f0efa Mon Sep 17 00:00:00 2001 From: Shubham Chouhan Date: Wed, 24 Jul 2024 15:35:01 +0530 Subject: [PATCH 113/117] defconfig: Add support for usb related configs in Neo Add support for usb related defconfigs on neo. Change-Id: I3b6270e4f2f543af77af2d6357aa7011074023b3 Signed-off-by: Shubham Chouhan --- arch/arm64/configs/vendor/neo_la_GKI.config | 9 +++++++++ modules.list.msm.neo-la | 1 - neo_la.bzl | 7 +++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index f9ebf890cbf7..9e4f99dbef14 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -24,6 +24,7 @@ CONFIG_GH_VIRT_WATCHDOG=m CONFIG_GUNYAH_DRIVERS=y # CONFIG_HVC_GUNYAH is not set CONFIG_HWSPINLOCK_QCOM=m +CONFIG_I2C_EUSB2_REPEATER=m CONFIG_I2C_MSM_GENI=m CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_INTERCONNECT_QCOM_BCM_VOTER=m @@ -106,4 +107,12 @@ CONFIG_SXR_GCC_NEO=m CONFIG_SXR_GPUCC_NEO=m CONFIG_SXR_TCSRCC_NEO=m CONFIG_SXR_VIDEOCC_NEO=m +CONFIG_USB_CONFIGFS_F_DIAG=m +CONFIG_USB_CONFIGFS_F_QDSS=m +CONFIG_USB_DWC3_MSM=m +CONFIG_USB_F_DIAG=m +CONFIG_USB_F_QDSS=m +CONFIG_USB_MSM_EUSB2_PHY=m +CONFIG_USB_MSM_SSPHY_QMP=m +CONFIG_USB_REPEATER=m CONFIG_VIRT_DRIVERS=y diff --git a/modules.list.msm.neo-la b/modules.list.msm.neo-la index 4967e90af3c2..6c4a0904e476 100644 --- a/modules.list.msm.neo-la +++ b/modules.list.msm.neo-la @@ -55,7 +55,6 @@ pinctrl-neo.ko phy-generic.ko phy-qcom-emu.ko qcom_dma_heaps.ko -dwc3-msm.ko sdhci-msm-scaling.ko cqhci.ko qcom-vadc-common.ko diff --git a/neo_la.bzl b/neo_la.bzl index 0818b39e65a2..fe0b9d22cf73 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -73,6 +73,13 @@ def define_neo_la(): "drivers/soc/qcom/socinfo.ko", "drivers/spi/spi-msm-geni.ko", "drivers/tty/serial/msm_geni_serial.ko", + "drivers/usb/dwc3/dwc3-msm.ko", + "drivers/usb/gadget/function/usb_f_diag.ko", + "drivers/usb/gadget/function/usb_f_qdss.ko", + "drivers/usb/phy/phy-msm-snps-eusb2.ko", + "drivers/usb/phy/phy-msm-ssusb-qmp.ko", + "drivers/usb/repeater/repeater.ko", + "drivers/usb/repeater/repeater-i2c-eusb2.ko", "drivers/virt/gunyah/gh_msgq.ko", "drivers/virt/gunyah/gh_rm_drv.ko", "drivers/virt/gunyah/gh_virt_wdt.ko", From cc7ea4f7012d9547d01f6722e64e99b38a07806d Mon Sep 17 00:00:00 2001 From: Pranav Mahesh Phansalkar Date: Mon, 29 Jan 2024 14:15:52 +0530 Subject: [PATCH 114/117] net: qrtr: Add condition to check data length while logging For non QMI packets having data length less than eight bytes, skb_copy_bits fails to copy packet data to log buffer. So, Add condition to check data length. If the data length is less than eight bytes, send actual data length to skb_copy_bits. Change-Id: I2181016f224952d214a8f39fb06b47ace01dc51a Signed-off-by: Pranav Mahesh Phansalkar --- net/qrtr/af_qrtr.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c index a3e3a0b9b9e4..487ab6c360f6 100644 --- a/net/qrtr/af_qrtr.c +++ b/net/qrtr/af_qrtr.c @@ -3,6 +3,7 @@ * Copyright (c) 2015, Sony Mobile Communications Inc. * Copyright (c) 2013, The Linux Foundation. All rights reserved. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include @@ -246,7 +247,8 @@ static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr, type = le32_to_cpu(hdr->type); if (type == QRTR_TYPE_DATA) { - skb_copy_bits(skb, QRTR_HDR_MAX_SIZE, &pl_buf, sizeof(pl_buf)); + skb_copy_bits(skb, sizeof(*hdr), &pl_buf, hdr->size > sizeof(pl_buf) ? + sizeof(pl_buf) : hdr->size); QRTR_INFO(node->ilc, "TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n", hdr->size, hdr->confirm_rx, @@ -294,7 +296,8 @@ static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb) cb = (struct qrtr_cb *)skb->cb; if (cb->type == QRTR_TYPE_DATA) { - skb_copy_bits(skb, 0, &pl_buf, sizeof(pl_buf)); + skb_copy_bits(skb, 0, &pl_buf, skb->len > sizeof(pl_buf) ? + sizeof(pl_buf) : skb->len); QRTR_INFO(node->ilc, "RX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]\n", skb->len, cb->confirm_rx, cb->src_node, cb->src_port, From d2744b2a67bb6ec1f52d1a00510904471e38e46e Mon Sep 17 00:00:00 2001 From: Suraj Jaiswal Date: Mon, 2 Sep 2024 12:43:26 +0530 Subject: [PATCH 115/117] net: stmmac: Fix LPM issue on gen3 Fix LPM issue on gen3. Change-Id: Ib7f4953e2cd25db3ace8a5912490ad528b6a0059 Signed-off-by: Suraj Jaiswal --- .../stmicro/stmmac/dwmac-qcom-ethqos.c | 44 +++++++++++-------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 695dfa5d1e88..3c2a0be204f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -2589,32 +2589,38 @@ static int qcom_ethqos_enable_clks(struct qcom_ethqos *ethqos, struct device *de goto error_rgmii_get; } } - ethqos->sgmiref_clk = devm_clk_get(dev, "sgmi_ref"); - if (IS_ERR(ethqos->sgmiref_clk)) { - dev_warn(dev, "Failed sgmi_ref\n"); - ret = PTR_ERR(ethqos->sgmiref_clk); - goto error_sgmi_ref; - } else { - ret = clk_prepare_enable(ethqos->sgmiref_clk); - if (ret) + if (priv->plat->interface == PHY_INTERFACE_MODE_SGMII || + priv->plat->interface == PHY_INTERFACE_MODE_USXGMII) { + ethqos->sgmiref_clk = devm_clk_get(dev, "sgmi_ref"); + if (IS_ERR(ethqos->sgmiref_clk)) { + dev_warn(dev, "Failed sgmi_ref\n"); + ret = PTR_ERR(ethqos->sgmiref_clk); goto error_sgmi_ref; - } - ethqos->phyaux_clk = devm_clk_get(dev, "phyaux"); - if (IS_ERR(ethqos->phyaux_clk)) { - dev_warn(dev, "Failed phyaux\n"); - ret = PTR_ERR(ethqos->phyaux_clk); - goto error_phyaux_ref; - } else { - ret = clk_prepare_enable(ethqos->phyaux_clk); - if (ret) + } else { + ret = clk_prepare_enable(ethqos->sgmiref_clk); + if (ret) + goto error_sgmi_ref; + } + ethqos->phyaux_clk = devm_clk_get(dev, "phyaux"); + if (IS_ERR(ethqos->phyaux_clk)) { + dev_warn(dev, "Failed phyaux\n"); + ret = PTR_ERR(ethqos->phyaux_clk); goto error_phyaux_ref; + } else { + ret = clk_prepare_enable(ethqos->phyaux_clk); + if (ret) + goto error_phyaux_ref; + } } return 0; + if (priv->plat->interface == PHY_INTERFACE_MODE_SGMII || + priv->plat->interface == PHY_INTERFACE_MODE_USXGMII) { error_phyaux_ref: - clk_disable_unprepare(ethqos->sgmiref_clk); + clk_disable_unprepare(ethqos->sgmiref_clk); error_sgmi_ref: - clk_disable_unprepare(ethqos->rgmii_clk); + clk_disable_unprepare(ethqos->rgmii_clk); + } error_rgmii_get: clk_disable_unprepare(priv->plat->pclk); error_pclk_get: From 4219aa4a08dad0959afb920fa4fb5c4c53f87e26 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Tue, 27 Aug 2024 23:04:40 -0700 Subject: [PATCH 116/117] q2spi-msm-geni: Ensure Channel errors handled sequentially after START sequence fail When slave is in sleep q2spi host transfer will fail with GSI start sequence failure. In this scenario two events reported from GSI channel error and TX dma completion event 0x22. Since GSI reports these events separately if start sequence fail is not processed before channel error gpi_q2spi_terminate_all is called twice and leading to GSI failures. Ensure Channel errors in GSI recover path are handled sequentially only after START sequence fail is processed. Change-Id: Ie85528b6354241153330c403ba026c5006d5c78e Signed-off-by: Chandana Kishori Chiluveru --- drivers/spi/q2spi-gsi.c | 17 +++++++++++------ drivers/spi/q2spi-msm-geni.c | 4 ++++ drivers/spi/q2spi-msm.h | 2 ++ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/spi/q2spi-gsi.c b/drivers/spi/q2spi-gsi.c index 1e233c97d07c..0af382331462 100644 --- a/drivers/spi/q2spi-gsi.c +++ b/drivers/spi/q2spi-gsi.c @@ -130,6 +130,7 @@ static void q2spi_check_m_irq_err_status(struct q2spi_geni *q2spi, u32 cb_status Q2SPI_DEBUG(q2spi, "%s Q2SPI_CHEKSUM_FAIL\n", __func__); if (status & Q2SPI_START_SEQ_TIMEOUT) { q2spi->is_start_seq_fail = true; + complete_all(&q2spi->wait_comp_start_fail); Q2SPI_DEBUG(q2spi, "%s Q2SPI_START_SEQ_TIMEOUT\n", __func__); } if (status & Q2SPI_STOP_SEQ_TIMEOUT) @@ -495,15 +496,15 @@ int check_gsi_transfer_completion_db_rx(struct q2spi_geni *q2spi) int check_gsi_transfer_completion(struct q2spi_geni *q2spi) { int i = 0, ret = 0; - unsigned long timeout = 0, xfer_timeout = 0; + unsigned long timeleft = 0, xfer_timeout = 0; xfer_timeout = XFER_TIMEOUT_OFFSET; Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__, q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot); for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) { - timeout = + timeleft = wait_for_completion_timeout(&q2spi->tx_cb, msecs_to_jiffies(xfer_timeout)); - if (!timeout) { + if (!timeleft) { Q2SPI_DEBUG(q2spi, "%s PID:%d Tx[%d] timeout\n", __func__, current->pid, i); ret = -ETIMEDOUT; goto err_gsi_geni_transfer; @@ -513,9 +514,9 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) } for (i = 0 ; i < q2spi->gsi->num_rx_eot; i++) { - timeout = + timeleft = wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout)); - if (!timeout) { + if (!timeleft) { Q2SPI_DEBUG(q2spi, "%s PID:%d Rx[%d] timeout\n", __func__, current->pid, i); ret = -ETIMEDOUT; goto err_gsi_geni_transfer; @@ -524,11 +525,15 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) } } err_gsi_geni_transfer: - if (q2spi->gsi->qup_gsi_err || !timeout) { + if (q2spi->gsi->qup_gsi_err || !timeleft) { ret = -ETIMEDOUT; Q2SPI_DEBUG(q2spi, "%s Err QUP Gsi Error\n", __func__); q2spi->gsi->qup_gsi_err = false; q2spi->setup_config0 = false; + /* Block on TX completion callback for start sequence failure */ + wait_for_completion_interruptible_timeout + (&q2spi->wait_comp_start_fail, + msecs_to_jiffies(TIMEOUT_MSECONDS)); if (!q2spi->is_start_seq_fail) gpi_q2spi_terminate_all(q2spi->gsi->tx_c); } diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index a5cb66cd941c..4b64d1c66d91 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -2267,7 +2267,9 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t pm_runtime_set_suspended(q2spi->dev); goto err; } + q2spi->is_start_seq_fail = false; + reinit_completion(&q2spi->wait_comp_start_fail); Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); q2spi_wait_for_doorbell_setup_ready(q2spi); @@ -2286,6 +2288,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t ret = -ENOMEM; goto err; } + Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__, flow_id); ret = q2spi_transfer_with_retries(q2spi, q2spi_req, cur_q2spi_pkt, len, flow_id, user_buf); Q2SPI_DEBUG(q2spi, "%s transfer_with_retries ret:%d\n", __func__, ret); @@ -4421,6 +4424,7 @@ static int q2spi_geni_probe(struct platform_device *pdev) atomic_set(&q2spi->sma_rd_pending, 0); init_completion(&q2spi->sma_wr_comp); init_completion(&q2spi->sma_rd_comp); + init_completion(&q2spi->wait_comp_start_fail); /* Pre allocate buffers for transfers */ ret = q2spi_pre_alloc_buffers(q2spi); diff --git a/drivers/spi/q2spi-msm.h b/drivers/spi/q2spi-msm.h index 83cf08b298da..e0b4db019da5 100644 --- a/drivers/spi/q2spi-msm.h +++ b/drivers/spi/q2spi-msm.h @@ -529,6 +529,7 @@ struct q2spi_dma_transfer { * @q2spi_sleep_cmd_enable: reflects start sending the sleep command to slave * @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header * @is_start_seq_fail: start sequence fail due to slave not responding + * @wait_comp_start_fail: completion for transfer callback during start sequence failure */ struct q2spi_geni { struct device *wrapper_dev; @@ -636,6 +637,7 @@ struct q2spi_geni { bool q2spi_sleep_cmd_enable; bool q2spi_cr_hdr_err; bool is_start_seq_fail; + struct completion wait_comp_start_fail; }; /** From 9e0b7733ce94e4ced3783356c2b1cd5cf1444f77 Mon Sep 17 00:00:00 2001 From: Kishor Krishna Bhat Date: Fri, 30 Aug 2024 11:00:02 +0530 Subject: [PATCH 117/117] defconfig: sdmsteppeauto: Enable gpi config Enable gpi config to allow the uart to access gpi. Change-Id: I37bb00f25f3f5caed974b06828a118739a857e29 Signed-off-by: Kishor Krishna Bhat --- arch/arm64/configs/vendor/sdmsteppeauto_GKI.config | 1 + arch/arm64/configs/vendor/sdmsteppeauto_consolidate.config | 1 + sdmsteppeauto.bzl | 1 + 3 files changed, 3 insertions(+) diff --git a/arch/arm64/configs/vendor/sdmsteppeauto_GKI.config b/arch/arm64/configs/vendor/sdmsteppeauto_GKI.config index 4b888b403ec9..b6caa38d6c8a 100644 --- a/arch/arm64/configs/vendor/sdmsteppeauto_GKI.config +++ b/arch/arm64/configs/vendor/sdmsteppeauto_GKI.config @@ -87,6 +87,7 @@ CONFIG_MMC_SDHCI_MSM=m # CONFIG_MODULE_SIG_ALL is not set CONFIG_MSM_BOOT_STATS=m CONFIG_MSM_CORE_HANG_DETECT=m +CONFIG_MSM_GPI_DMA=m CONFIG_MSM_PERFORMANCE=m CONFIG_MSM_QMP=m CONFIG_MSM_QUSB_PHY=m diff --git a/arch/arm64/configs/vendor/sdmsteppeauto_consolidate.config b/arch/arm64/configs/vendor/sdmsteppeauto_consolidate.config index 985d0c49587f..bdcfc92d092c 100644 --- a/arch/arm64/configs/vendor/sdmsteppeauto_consolidate.config +++ b/arch/arm64/configs/vendor/sdmsteppeauto_consolidate.config @@ -2,6 +2,7 @@ CONFIG_ATOMIC64_SELFTEST=m CONFIG_LKDTM=m CONFIG_LOCALVERSION="-gki-consolidate" CONFIG_LOCK_TORTURE_TEST=m +CONFIG_MSM_GPI_DMA_DEBUG=y CONFIG_RCU_TORTURE_TEST=m CONFIG_TEST_USER_COPY=m CONFIG_USB_LINK_LAYER_TEST=m diff --git a/sdmsteppeauto.bzl b/sdmsteppeauto.bzl index b7b11924b823..3ae81d0d74f4 100644 --- a/sdmsteppeauto.bzl +++ b/sdmsteppeauto.bzl @@ -25,6 +25,7 @@ def define_sdmsteppeauto(): "drivers/cpufreq/qcom-cpufreq-hw.ko", "drivers/cpuidle/governors/qcom_lpm.ko", "drivers/dma-buf/heaps/qcom_dma_heaps.ko", + "drivers/dma/qcom/msm_gpi.ko", "drivers/extcon/extcon-usb-gpio.ko", "drivers/firmware/qcom-scm.ko", "drivers/gpu/drm/bridge/analogix/anx7625.ko",