i2c: i2c-msm-geni: Enable multi descriptor support for shared SE

So far we bypassed multi-descriptor changes for shared-se, now
enabled multi-descriptor changes for shared SE as well.

Multi-descriptor uses block event interrupt(bei) to receive interrupt
after transferring i2c messages specified  by max number of transfers.
HW will transfer data corresponding to all tre's and generate interrupt
at the end of last transfer. In this implementation the number of
interrupts are reduced and also ensured that while hardware is
processing one set of tre's,software queues the next set of tre's
if available.

Change-Id: I983272681a7cbd99d4dab3d89e4f2b9edc8b3af7
Signed-off-by: Krishna Chaithanya Reddy G <quic_kgangapu@quicinc.com>
Signed-off-by: Prasanna S <quic_prass@quicinc.com>
This commit is contained in:
Krishna Chaithanya Reddy G 2024-05-22 14:44:28 +05:30 committed by Prasanna S
parent 659462265d
commit da0999a52a
3 changed files with 152 additions and 50 deletions

View File

@ -631,6 +631,8 @@ struct gpii {
bool unlock_tre_set;
bool dual_ee_sync_flag;
bool is_resumed;
bool is_multi_desc;
int num_msgs;
};
struct gpi_desc {
@ -997,6 +999,25 @@ void gpi_dump_for_geni(struct dma_chan *chan)
}
EXPORT_SYMBOL(gpi_dump_for_geni);
/**
* gpi_update_multi_desc_flag() - update multi descriptor flag and num of msgs for
* multi descriptor mode handling.
* @chan: Base address of dma channel
* @is_multi_descriptor: Is multi descriptor flag
* @num_msgs: Number of client messages
*
* Return:None
*/
void gpi_update_multi_desc_flag(struct dma_chan *chan, bool is_multi_descriptor, int num_msgs)
{
struct gpii_chan *gpii_chan = to_gpii_chan(chan);
struct gpii *gpii = gpii_chan->gpii;
gpii->is_multi_desc = is_multi_descriptor;
gpii->num_msgs = num_msgs;
}
EXPORT_SYMBOL_GPL(gpi_update_multi_desc_flag);
static void gpi_disable_interrupts(struct gpii *gpii)
{
struct {
@ -2191,6 +2212,8 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
gpi_desc = to_gpi_desc(vd);
spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
if (gpii->is_multi_desc)
gpii->num_msgs--;
/*
* RP pointed by Event is to last TRE processed,
@ -2213,22 +2236,32 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
*/
chid = imed_event->chid;
if (gpii->unlock_tre_set) {
if (chid == GPI_RX_CHAN) {
if (imed_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
if (!gpii->is_multi_desc) {
if (chid == GPI_RX_CHAN) {
if (imed_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
return;
} else if (imed_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else {
/*
* Multi descriptor case waiting for unlock
* tre eob, so not freeeing last descriptor
*/
if (gpii->num_msgs == 0)
return;
} else if (imed_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else if (imed_event->code == MSM_GPI_TCE_EOB) {
goto gpi_free_desc;
}
@ -2297,6 +2330,8 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
gpi_desc = to_gpi_desc(vd);
spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
if (gpii->is_multi_desc)
gpii->num_msgs--;
/*
* RP pointed by Event is to last TRE processed,
@ -2319,28 +2354,37 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
*/
chid = compl_event->chid;
if (gpii->unlock_tre_set) {
if (chid == GPI_RX_CHAN) {
if (compl_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
if (!gpii->is_multi_desc) {
if (chid == GPI_RX_CHAN) {
if (compl_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
return;
} else if (compl_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else {
/*
* Multi descriptor case waiting for unlock
* tre eob, so not freeeing last descriptor
*/
if (gpii->num_msgs == 0)
return;
} else if (compl_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else if (compl_event->code == MSM_GPI_TCE_EOB) {
if (!(gpii_chan->num_tre == 1 && gpii_chan->lock_tre_set)
&& (gpii->protocol != SE_PROTOCOL_UART))
goto gpi_free_desc;
}
tx_cb_param = vd->tx.callback_param;
if (vd->tx.callback && tx_cb_param) {
GPII_VERB(gpii, gpii_chan->chid,

View File

@ -24,6 +24,7 @@
#include <linux/msm_gpi.h>
#include <linux/ioctl.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#define SE_GENI_TEST_BUS_CTRL 0x44
@ -118,6 +119,8 @@ if (dev) \
#define NUM_TRE_MSGS_PER_INTR 64
#define IMMEDIATE_DMA_LEN 8
#define MIN_NUM_MSGS_FOR_MULTI_DESC_MODE 4
/* FTRACE Logging */
void i2c_trace_log(struct device *dev, const char *fmt, ...)
{
@ -1207,14 +1210,19 @@ static int geni_i2c_gsi_request_channel(struct geni_i2c_dev *gi2c)
static struct msm_gpi_tre *setup_lock_tre(struct geni_i2c_dev *gi2c)
{
struct msm_gpi_tre *lock_t = &gi2c->lock_t;
bool gsi_bei = false;
/* lock: chain bit set */
lock_t->dword[0] = MSM_GPI_LOCK_TRE_DWORD0;
lock_t->dword[1] = MSM_GPI_LOCK_TRE_DWORD1;
lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
if (gi2c->gsi_tx.is_multi_descriptor)
gsi_bei = true;
/* ieob for le-vm and chain for shared se */
if (gi2c->is_shared)
lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 0, 1);
lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, gsi_bei, 0, 0, 1);
else if (gi2c->is_le_vm)
lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 1, 0);
@ -1321,9 +1329,22 @@ static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
else
*gsi_bei = false;
/* BEI bit to be cleared for last TRE. */
if (i == (num - 1))
*gsi_bei = false;
/*
* Keep BEI = 0, for all last TREs
* Shared SE : Last is unlock TRE, hence continue to have BEI = TRUE for DMA TX TRE.
* BEI = 0, taken cared by setup_unlock_tre().
* Rest all/non shared/Multi descriptor TREs : BEI = 0 for last transfer TRE.
*/
if (i == (num - 1)) {
/* For Tx: for shared usecase unlock tre is send
* for last transfer so set bei bit for last transfer
* DMA tre
*/
if (gi2c->is_shared)
*gsi_bei = true;
else
*gsi_bei = false;
}
}
if (is_immediate_dma) {
@ -1338,7 +1359,7 @@ static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
* For Tx: unlock tre is send for last transfer
* so set chain bit for last transfer DMA tre.
*/
tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, 0, 1, 0, 1);
tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, *gsi_bei, 1, 0, 1);
else
tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, *gsi_bei, 1, 0, 0);
} else {
@ -1351,7 +1372,7 @@ static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
* For Tx: unlock tre is send for last transfer
* so set chain bit for last transfer DMA tre.
*/
tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 1);
tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, *gsi_bei, 1, 0, 1);
else
tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, *gsi_bei, 1, 0, 0);
}
@ -1531,6 +1552,34 @@ static void geni_i2c_unlock_bus(struct geni_i2c_dev *gi2c)
gi2c->i2c_kpi, start_time, 0, 0);
}
/**
* geni_i2c_check_for_gsi_multi_desc_mode() - check for i2c multi descriptor mode.
* @gi2c: Geni I2C device handle
* @msgs: Base address of i2c msgs
* @num: Number of messages
*
* This function check for multi desc mode.
*
* Return: None
*/
static void geni_i2c_check_for_gsi_multi_desc_mode(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
int num)
{
u32 i = 0;
if (num >= MIN_NUM_MSGS_FOR_MULTI_DESC_MODE) {
gi2c->gsi_tx.is_multi_descriptor = true;
/* assumes multi descriptor supports only for continuous writes */
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RD)
gi2c->gsi_tx.is_multi_descriptor = false;
} else {
gi2c->gsi_tx.is_multi_descriptor = false;
}
#if IS_ENABLED(CONFIG_MSM_GPI_DMA)
gpi_update_multi_desc_flag(gi2c->tx_c, gi2c->gsi_tx.is_multi_descriptor, num);
#endif
}
/**
* geni_i2c_gsi_read() - Perform gsi i2c read
* @gi2c: Geni I2C device handle
@ -1701,6 +1750,9 @@ static int geni_i2c_gsi_write(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
/* Send unlock tre at the end of last transfer */
sg_set_buf(&gi2c->tx_sg[index++],
unlock_t, sizeof(gi2c->unlock_t));
/* to enable call back for unlock tre */
if (gi2c->gsi_tx.is_multi_descriptor)
gsi_bei = false;
}
gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, true);
@ -1765,7 +1817,7 @@ static int geni_i2c_gsi_tx_tre_optimization(struct geni_i2c_dev *gi2c, u32 num,
* if it's last message, waiting for all pending tre's
* including last submitted tre as well.
*/
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared) {
if (gi2c->gsi_tx.is_multi_descriptor) {
for (i = 0; i < max_irq_cnt; i++) {
if (max_irq_cnt != atomic_read(&gi2c->gsi_tx.irq_cnt)) {
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
@ -1801,7 +1853,7 @@ static int geni_i2c_gsi_tx_tre_optimization(struct geni_i2c_dev *gi2c, u32 num,
/* process received tre's */
if (timeout) {
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared)
if (gi2c->gsi_tx.is_multi_descriptor)
gi2c_gsi_tre_process(gi2c, num);
}
@ -1877,16 +1929,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
unlock_t = setup_unlock_tre(gi2c);
}
gi2c->gsi_tx.is_multi_descriptor = false;
/* if num of msgs more than 4 checking for multi descriptor mode */
if (num >= 4) {
gi2c->gsi_tx.is_multi_descriptor = true;
/* assumes multi descriptor supports only for continuous writes */
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RD)
gi2c->gsi_tx.is_multi_descriptor = false;
}
geni_i2c_check_for_gsi_multi_desc_mode(gi2c, msgs, num);
if (!gi2c->cfg_sent)
cfg0_t = setup_cfg0_tre(gi2c);
@ -1975,8 +2018,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
* continuously without waiting, in b/w if any one of the
* tre is received processing and queuing next tre.
*/
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared &&
(i != (num - 1)) &&
if (gi2c->gsi_tx.is_multi_descriptor && (i != (num - 1)) &&
(gi2c->gsi_tx.msg_cnt < MAX_NUM_TRE_MSGS + gi2c->gsi_tx.tre_freed_cnt))
continue;
@ -2037,7 +2079,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
i2c_put_dma_safe_msg_buf(rd_dma_buf, &msgs[i], !gi2c->err);
} else if (gi2c->err) {
/* for multi descriptor unmap all submitted tre's */
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared)
if (gi2c->gsi_tx.is_multi_descriptor)
gi2c_gsi_tre_process(gi2c, num);
else
gi2c_gsi_tx_unmap(gi2c, i, wr_idx - 1);
@ -2049,6 +2091,11 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
}
geni_i2c_gsi_xfer_out:
/* clearing the gpi multi descriptor flag */
#if IS_ENABLED(CONFIG_MSM_GPI_DMA)
if (gi2c->gsi_tx.is_multi_descriptor)
gpi_update_multi_desc_flag(gi2c->tx_c, false, 0);
#endif
if (!ret && gi2c->err)
ret = gi2c->err;
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,

View File

@ -393,6 +393,17 @@ struct gsi_common {
*/
void gpi_dump_for_geni(struct dma_chan *chan);
/**
* gpi_update_multi_desc_flag() - update multi descriptor flag and num of msgs for
* multi descriptor mode handling.
* @chan: Base address of dma channel
* @is_multi_descriptor: is multi descriptor flag
* @num_msgs: number of client messages
*
* Return:None
*/
void gpi_update_multi_desc_flag(struct dma_chan *chan, bool is_multi_descriptor, int num_msgs);
/**
* gsi_common_tre_process() - Process received TRE's from GSI HW
* @gsi: Base address of the gsi common structure.