Merge "bus: mhi: host: Move MHI_MAX_MTU to external header file"

This commit is contained in:
qctecmdr 2022-10-14 04:55:21 -07:00 committed by Gerrit - the friendly Code Review server
commit 509c8ede30
6 changed files with 137 additions and 81 deletions

View File

@ -299,17 +299,22 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
struct image_info **image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
struct mhi_buf *mhi_buf = (*image_info)->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
if (mhi_cntrl->img_pre_alloc)
return;
for (i = 0; i < (*image_info)->entries; i++, mhi_buf++)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
kfree((*image_info)->mhi_buf);
kfree(*image_info);
*image_info = NULL;
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
@ -322,6 +327,9 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *img_info;
struct mhi_buf *mhi_buf;
if (mhi_cntrl->img_pre_alloc)
return 0;
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
@ -435,8 +443,20 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
ret = request_firmware(&firmware, fw_name, dev);
if (ret) {
dev_err(dev, "Error loading firmware: %d\n", ret);
goto error_fw_load;
if (!mhi_cntrl->fallback_fw_image) {
dev_err(dev, "Error loading firmware: %d\n", ret);
goto error_fw_load;
}
ret = request_firmware(&firmware,
mhi_cntrl->fallback_fw_image,
dev);
if (ret) {
dev_err(dev, "Error loading fallback firmware: %d\n",
ret);
goto error_fw_load;
}
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FALLBACK_IMG);
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
@ -504,10 +524,8 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
return;
error_ready_state:
if (mhi_cntrl->fbc_download) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
if (mhi_cntrl->fbc_download)
mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
error_fw_load:
mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*
*/
@ -563,6 +563,8 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return ret;
}
mhi_misc_init_mmio(mhi_cntrl);
return 0;
}
@ -687,8 +689,7 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
&mhi_cntrl->mhi_chan[mhi_event->chan];
}
/* Priority is fixed to 1 for now */
mhi_event->priority = 1;
mhi_event->priority = event_cfg->priority;
mhi_event->db_cfg.brstmode = event_cfg->mode;
if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
@ -708,6 +709,9 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
case MHI_ER_CTRL:
mhi_event->process_event = mhi_process_ctrl_ev_ring;
break;
case MHI_ER_BW_SCALE:
mhi_event->process_event = mhi_process_misc_bw_ev_ring;
break;
default:
dev_err(dev, "Event Ring type not supported\n");
goto error_ev_cfg;
@ -1041,8 +1045,15 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
unsigned int i;
mhi_deinit_free_irq(mhi_cntrl);
/* Free the memory controller wanted to preserve for BHIe images */
if (mhi_cntrl->img_pre_alloc) {
mhi_cntrl->img_pre_alloc = false;
if (mhi_cntrl->fbc_image)
mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
if (mhi_cntrl->rddm_image)
mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image);
}
mhi_destroy_debugfs(mhi_cntrl);
destroy_workqueue(mhi_cntrl->hiprio_wq);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_event);
@ -1163,15 +1174,8 @@ EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
{
if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
if (mhi_cntrl->rddm_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
mhi_cntrl->rddm_image = NULL;
}
if (mhi_cntrl->rddm_image)
mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image);
mhi_cntrl->bhi = NULL;
mhi_cntrl->bhie = NULL;
@ -1359,6 +1363,7 @@ static int mhi_driver_remove(struct device *dev)
if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
ch_state[dir] == MHI_CH_STATE_STOP) &&
mhi_chan->ch_state != MHI_CH_STATE_DISABLED &&
!mhi_chan->offload_ch)
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);

View File

@ -129,7 +129,6 @@ enum mhi_pm_state {
#define CMD_EL_PER_RING 128
#define PRIMARY_CMD_RING 0
#define MHI_DEV_WAKE_DB 127
#define MHI_MAX_MTU 0xffff
#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
enum mhi_er_type {
@ -277,7 +276,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl);
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info, size_t alloc_size);
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info);
struct image_info **image_info);
/* Power management APIs */
enum mhi_pm_state __must_check mhi_tryset_pm_state(

View File

@ -426,10 +426,11 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
}
}
irqreturn_t mhi_irq_handler(int irq_number, void *dev)
irqreturn_t mhi_irq_handler(int irq_number, void *priv)
{
struct mhi_event *mhi_event = dev;
struct mhi_event *mhi_event = priv;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_event_ctxt *er_ctxt =
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
struct mhi_ring *ev_ring = &mhi_event->ring;
@ -455,8 +456,20 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
if (mhi_dev)
mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
} else {
return IRQ_HANDLED;
}
switch (mhi_event->priority) {
case MHI_ER_PRIORITY_HI_NOSLEEP:
tasklet_hi_schedule(&mhi_event->task);
break;
case MHI_ER_PRIORITY_DEFAULT_NOSLEEP:
tasklet_schedule(&mhi_event->task);
break;
default:
dev_dbg(dev, "skip unknown priority event\n");
break;
}
return IRQ_HANDLED;

View File

@ -156,14 +156,52 @@ static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
mhi_cntrl->wake_put(mhi_cntrl, true);
}
/* Add event ring elements and ring er db */
static void mhi_setup_event_rings(struct mhi_controller *mhi_cntrl, bool add_el)
{
struct mhi_event *mhi_event;
int i;
bool skip_er_setup;
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
if (mhi_event->offload_ev)
continue;
/* skip HW event ring setup in ready state */
if (mhi_cntrl->dev_state == MHI_STATE_READY)
skip_er_setup = mhi_event->hw_ring;
else
skip_er_setup = !mhi_event->hw_ring;
/* if no er element to add, ring all er dbs */
if (add_el && skip_er_setup)
continue;
if (add_el) {
ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp =
ring->iommu_base + ring->len - ring->el_size;
/* Update all cores */
smp_wmb();
}
/* Ring the event ring db */
spin_lock_irq(&mhi_event->lock);
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
}
/* Handle device ready state transition */
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event;
enum mhi_pm_state cur_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret, i;
int ret = -EINVAL;
/* Check if device entered error state */
if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
@ -212,25 +250,8 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
goto error_mmio;
}
/* Add elements to all SW event rings */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
/* Skip if this is an offload or HW event */
if (mhi_event->offload_ev || mhi_event->hw_ring)
continue;
ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
/* Update all cores */
smp_wmb();
/* Ring the event ring db */
spin_lock_irq(&mhi_event->lock);
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
/* add SW event ring elements and ring SW event ring dbs */
mhi_setup_event_rings(mhi_cntrl, true);
/* Set MHI to M0 state */
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
@ -267,18 +288,10 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
/* Ring all event rings and CMD ring only if we're in mission mode */
if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct mhi_cmd *mhi_cmd =
&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
if (mhi_event->offload_ev)
continue;
spin_lock_irq(&mhi_event->lock);
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
mhi_setup_event_rings(mhi_cntrl, false);
/* Only ring primary cmd ring if ring is not empty */
spin_lock_irq(&mhi_cmd->lock);
@ -377,10 +390,9 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
/* Handle device Mission Mode transition */
static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
int i, ret;
int ret;
dev_dbg(dev, "Processing Mission Mode transition\n");
@ -415,24 +427,8 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
goto error_mission_mode;
}
/* Add elements to all HW event rings */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
struct mhi_ring *ring = &mhi_event->ring;
if (mhi_event->offload_ev || !mhi_event->hw_ring)
continue;
ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
/* Update to all cores */
smp_wmb();
spin_lock_irq(&mhi_event->lock);
if (MHI_DB_ACCESS_VALID(mhi_cntrl))
mhi_ring_er_db(mhi_event);
spin_unlock_irq(&mhi_event->lock);
}
/* Add elements to all HW event rings and ring HW event ring dbs */
mhi_setup_event_rings(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
@ -1190,6 +1186,8 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
flush_work(&mhi_cntrl->st_worker);
disable_irq(mhi_cntrl->irq[0]);
if (mhi_cntrl->fbc_image)
mhi_free_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image);
}
EXPORT_SYMBOL_GPL(mhi_power_down);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*
*/
#ifndef _MHI_H_
@ -15,6 +15,9 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
/* MHI client drivers to set this upper bound for tx buffer */
#define MHI_MAX_MTU 0xffff
#define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
struct mhi_chan;
@ -34,6 +37,7 @@ struct mhi_buf_info;
* @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
* @MHI_CB_BW_REQ: Received a bandwidth switch request from device
* @MHI_CB_FALLBACK_IMG: MHI device was loaded with the provided fallback image
*/
enum mhi_callback {
MHI_CB_IDLE,
@ -45,6 +49,7 @@ enum mhi_callback {
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
MHI_CB_BW_REQ,
MHI_CB_FALLBACK_IMG,
};
/**
@ -102,10 +107,12 @@ struct image_info {
* struct mhi_link_info - BW requirement
* target_link_speed - Link speed as defined by TLS bits in LinkControl reg
* target_link_width - Link width as defined by NLW bits in LinkStatus reg
* sequence_num - used by device to track bw requests sent to host
*/
struct mhi_link_info {
unsigned int target_link_speed;
unsigned int target_link_width;
int sequence_num;
};
/**
@ -183,10 +190,22 @@ enum mhi_ch_ee_mask {
* enum mhi_er_data_type - Event ring data types
* @MHI_ER_DATA: Only client data over this ring
* @MHI_ER_CTRL: MHI control data and client data
* @MHI_ER_BW_SCALE: MHI controller bandwidth scale functionality
*/
enum mhi_er_data_type {
MHI_ER_DATA,
MHI_ER_CTRL,
MHI_ER_BW_SCALE,
};
/**
* enum mhi_er_priority - Event ring processing priority
* @MHI_ER_PRIORITY_DEFAULT_NOSLEEP: processed by tasklet
* @MHI_ER_PRIORITY_HI_NOSLEEP: processed by hi-priority tasklet
*/
enum mhi_er_priority {
MHI_ER_PRIORITY_DEFAULT_NOSLEEP,
MHI_ER_PRIORITY_HI_NOSLEEP,
};
/**
@ -242,7 +261,7 @@ struct mhi_channel_config {
* @irq_moderation_ms: Delay irq for additional events to be aggregated
* @irq: IRQ associated with this ring
* @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
* @priority: Priority of this ring. Use 1 for now
* @priority: Processing priority of this ring.
* @mode: Doorbell mode
* @data_type: Type of data this ring will process
* @hardware_event: This ring is associated with hardware channels
@ -299,6 +318,7 @@ struct mhi_controller_config {
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
* @fallback_fw_image: Fallback firmware image name for backup boot (optional)
* @edl_image: Firmware image name for emergency download mode (optional)
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size downloaded through BHIe (optional)
@ -353,6 +373,7 @@ struct mhi_controller_config {
* @reset: Controller specific reset function (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @img_pre_alloc: allocate rddm and fbc image buffers one time
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @wake_set: Device wakeup set flag
@ -384,6 +405,7 @@ struct mhi_controller {
dma_addr_t iova_start;
dma_addr_t iova_stop;
const char *fw_image;
const char *fallback_fw_image;
const char *edl_image;
size_t rddm_size;
size_t sbl_size;
@ -447,6 +469,7 @@ struct mhi_controller {
size_t buffer_len;
int index;
bool img_pre_alloc;
bool bounce_buf;
bool fbc_download;
bool wake_set;