Merge "i2c: i2c-msm-geni: Enable multi descriptor support for shared SE"

This commit is contained in:
qctecmdr 2024-06-17 00:08:22 -07:00 committed by Gerrit - the friendly Code Review server
commit 2b26a676d8
3 changed files with 381 additions and 200 deletions

View File

@ -631,6 +631,8 @@ struct gpii {
bool unlock_tre_set;
bool dual_ee_sync_flag;
bool is_resumed;
bool is_multi_desc;
int num_msgs;
};
struct gpi_desc {
@ -997,6 +999,25 @@ void gpi_dump_for_geni(struct dma_chan *chan)
}
EXPORT_SYMBOL(gpi_dump_for_geni);
/**
* gpi_update_multi_desc_flag() - update multi descriptor flag and num of msgs for
* multi descriptor mode handling.
* @chan: Base address of dma channel
* @is_multi_descriptor: Is multi descriptor flag
* @num_msgs: Number of client messages
*
* Return:None
*/
void gpi_update_multi_desc_flag(struct dma_chan *chan, bool is_multi_descriptor, int num_msgs)
{
struct gpii_chan *gpii_chan = to_gpii_chan(chan);
struct gpii *gpii = gpii_chan->gpii;
gpii->is_multi_desc = is_multi_descriptor;
gpii->num_msgs = num_msgs;
}
EXPORT_SYMBOL_GPL(gpi_update_multi_desc_flag);
static void gpi_disable_interrupts(struct gpii *gpii)
{
struct {
@ -2191,6 +2212,8 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
gpi_desc = to_gpi_desc(vd);
spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
if (gpii->is_multi_desc)
gpii->num_msgs--;
/*
* RP pointed by Event is to last TRE processed,
@ -2213,22 +2236,32 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
*/
chid = imed_event->chid;
if (gpii->unlock_tre_set) {
if (chid == GPI_RX_CHAN) {
if (imed_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
if (!gpii->is_multi_desc) {
if (chid == GPI_RX_CHAN) {
if (imed_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
return;
} else if (imed_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else {
/*
* Multi descriptor case waiting for unlock
* tre eob, so not freeeing last descriptor
*/
if (gpii->num_msgs == 0)
return;
} else if (imed_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else if (imed_event->code == MSM_GPI_TCE_EOB) {
goto gpi_free_desc;
}
@ -2297,6 +2330,8 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
gpi_desc = to_gpi_desc(vd);
spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
if (gpii->is_multi_desc)
gpii->num_msgs--;
/*
* RP pointed by Event is to last TRE processed,
@ -2319,28 +2354,37 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
*/
chid = compl_event->chid;
if (gpii->unlock_tre_set) {
if (chid == GPI_RX_CHAN) {
if (compl_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
if (!gpii->is_multi_desc) {
if (chid == GPI_RX_CHAN) {
if (compl_event->code == MSM_GPI_TCE_EOT)
goto gpi_free_desc;
else if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR)
/*
* In case of an error in a read transfer on a
* shared se, unlock tre will not be processed
* as channels go to bad state so tx desc should
* be freed manually.
*/
gpi_free_chan_desc(gpii_tx_chan);
else
return;
} else if (compl_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else {
/*
* Multi descriptor case waiting for unlock
* tre eob, so not freeeing last descriptor
*/
if (gpii->num_msgs == 0)
return;
} else if (compl_event->code == MSM_GPI_TCE_EOT) {
return;
}
} else if (compl_event->code == MSM_GPI_TCE_EOB) {
if (!(gpii_chan->num_tre == 1 && gpii_chan->lock_tre_set)
&& (gpii->protocol != SE_PROTOCOL_UART))
goto gpi_free_desc;
}
tx_cb_param = vd->tx.callback_param;
if (vd->tx.callback && tx_cb_param) {
GPII_VERB(gpii, gpii_chan->chid,

View File

@ -24,6 +24,7 @@
#include <linux/msm_gpi.h>
#include <linux/ioctl.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#define SE_GENI_TEST_BUS_CTRL 0x44
@ -118,6 +119,8 @@ if (dev) \
#define NUM_TRE_MSGS_PER_INTR 64
#define IMMEDIATE_DMA_LEN 8
#define MIN_NUM_MSGS_FOR_MULTI_DESC_MODE 4
/* FTRACE Logging */
void i2c_trace_log(struct device *dev, const char *fmt, ...)
{
@ -139,6 +142,12 @@ enum i2c_se_mode {
GSI_ONLY,
};
enum gsi_error {
GENI_I2C_SUCCESS,
GENI_I2C_GSI_XFER_OUT,
GENI_I2C_ERR_PREP_SG,
};
struct dbg_buf_ctxt {
void *virt_buf;
void *map_buf;
@ -1201,14 +1210,19 @@ static int geni_i2c_gsi_request_channel(struct geni_i2c_dev *gi2c)
static struct msm_gpi_tre *setup_lock_tre(struct geni_i2c_dev *gi2c)
{
struct msm_gpi_tre *lock_t = &gi2c->lock_t;
bool gsi_bei = false;
/* lock: chain bit set */
lock_t->dword[0] = MSM_GPI_LOCK_TRE_DWORD0;
lock_t->dword[1] = MSM_GPI_LOCK_TRE_DWORD1;
lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
if (gi2c->gsi_tx.is_multi_descriptor)
gsi_bei = true;
/* ieob for le-vm and chain for shared se */
if (gi2c->is_shared)
lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 0, 1);
lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, gsi_bei, 0, 0, 1);
else if (gi2c->is_le_vm)
lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 1, 0);
@ -1315,9 +1329,22 @@ static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
else
*gsi_bei = false;
/* BEI bit to be cleared for last TRE. */
if (i == (num - 1))
*gsi_bei = false;
/*
* Keep BEI = 0, for all last TREs
* Shared SE : Last is unlock TRE, hence continue to have BEI = TRUE for DMA TX TRE.
* BEI = 0, taken cared by setup_unlock_tre().
* Rest all/non shared/Multi descriptor TREs : BEI = 0 for last transfer TRE.
*/
if (i == (num - 1)) {
/* For Tx: for shared usecase unlock tre is send
* for last transfer so set bei bit for last transfer
* DMA tre
*/
if (gi2c->is_shared)
*gsi_bei = true;
else
*gsi_bei = false;
}
}
if (is_immediate_dma) {
@ -1332,7 +1359,7 @@ static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
* For Tx: unlock tre is send for last transfer
* so set chain bit for last transfer DMA tre.
*/
tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, 0, 1, 0, 1);
tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, *gsi_bei, 1, 0, 1);
else
tx_t->dword[3] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(0, *gsi_bei, 1, 0, 0);
} else {
@ -1345,7 +1372,7 @@ static struct msm_gpi_tre *setup_tx_tre(struct geni_i2c_dev *gi2c,
* For Tx: unlock tre is send for last transfer
* so set chain bit for last transfer DMA tre.
*/
tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 1);
tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, *gsi_bei, 1, 0, 1);
else
tx_t->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, *gsi_bei, 1, 0, 0);
}
@ -1525,6 +1552,242 @@ static void geni_i2c_unlock_bus(struct geni_i2c_dev *gi2c)
gi2c->i2c_kpi, start_time, 0, 0);
}
/**
* geni_i2c_check_for_gsi_multi_desc_mode() - check for i2c multi descriptor mode.
* @gi2c: Geni I2C device handle
* @msgs: Base address of i2c msgs
* @num: Number of messages
*
* This function check for multi desc mode.
*
* Return: None
*/
static void geni_i2c_check_for_gsi_multi_desc_mode(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
int num)
{
u32 i = 0;
if (num >= MIN_NUM_MSGS_FOR_MULTI_DESC_MODE) {
gi2c->gsi_tx.is_multi_descriptor = true;
/* assumes multi descriptor supports only for continuous writes */
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RD)
gi2c->gsi_tx.is_multi_descriptor = false;
} else {
gi2c->gsi_tx.is_multi_descriptor = false;
}
#if IS_ENABLED(CONFIG_MSM_GPI_DMA)
gpi_update_multi_desc_flag(gi2c->tx_c, gi2c->gsi_tx.is_multi_descriptor, num);
#endif
}
/**
* geni_i2c_gsi_read() - Perform gsi i2c read
* @gi2c: Geni I2C device handle
* @dma_buf: Pointer to DMA buffer
* @msgs: Base address of i2c msgs
* @msg_index: Message index
* @unlock_t: Unlock tre handle
* @num: Number of messages
* @segs: Segment number
* @sg_index: Scatter gather index
*
* This function perform i2c gsi read
*
* Return: 0 for success or error code for failure
*/
static int geni_i2c_gsi_read(struct geni_i2c_dev *gi2c, u8 **dma_buf, struct i2c_msg msgs[],
int msg_index, struct msm_gpi_tre *unlock_t, int num, int segs,
int *sg_index)
{
struct msm_gpi_tre *rx_t = NULL;
int ret = 0;
dma_cookie_t tx_cookie, rx_cookie;
int index = *sg_index;
u8 *rd_dma_buf = NULL;
reinit_completion(&gi2c->xfer);
rd_dma_buf = i2c_get_dma_safe_msg_buf(&msgs[msg_index], 1);
if (!rd_dma_buf) {
ret = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"i2c_get_dma_safe_msg_buf failed :%d\n",
ret);
return GENI_I2C_GSI_XFER_OUT;
}
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"msg[%d].len:%d R\n", msg_index, gi2c->cur->len);
sg_init_table(gi2c->rx_sg, 1);
ret = geni_se_common_iommu_map_buf(gi2c->wrapper_dev,
&gi2c->rx_ph,
rd_dma_buf,
msgs[msg_index].len,
DMA_FROM_DEVICE);
if (ret) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni_se_common_iommu_map_buf for rx failed :%d\n", ret);
i2c_put_dma_safe_msg_buf(rd_dma_buf, &msgs[msg_index], false);
return GENI_I2C_GSI_XFER_OUT;
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[msg_index].virt_buf =
(void *)rd_dma_buf;
gi2c->dbg_buf_ptr[msg_index].map_buf =
(void *)&gi2c->rx_ph;
}
rx_t = setup_rx_tre(gi2c, msgs, msg_index, num);
sg_set_buf(gi2c->rx_sg, rx_t,
sizeof(gi2c->rx_t));
gi2c->rx_desc =
geni_i2c_prep_desc(gi2c, gi2c->rx_c, segs, false);
if (!gi2c->rx_desc) {
gi2c->err = -ENOMEM;
return GENI_I2C_ERR_PREP_SG;
}
/* Issue RX */
rx_cookie = dmaengine_submit(gi2c->rx_desc);
if (dma_submit_error(rx_cookie)) {
pr_err("%s: dmaengine_submit failed (%d)\n", __func__, rx_cookie);
gi2c->err = -EINVAL;
return GENI_I2C_ERR_PREP_SG;
}
dma_async_issue_pending(gi2c->rx_c);
/* submit config/go tre through tx channel */
if (gi2c->is_shared && (msg_index == (num - 1))) {
/* Send unlock tre at the end of last transfer */
sg_set_buf(&gi2c->tx_sg[index++],
unlock_t, sizeof(gi2c->unlock_t));
}
gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, true);
if (!gi2c->tx_desc) {
gi2c->err = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni_i2c_prep_desc failed\n");
return GENI_I2C_ERR_PREP_SG;
}
/* Issue TX */
tx_cookie = dmaengine_submit(gi2c->tx_desc);
if (dma_submit_error(tx_cookie)) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"%s: dmaengine_submit failed (%d)\n",
__func__, tx_cookie);
gi2c->err = -EINVAL;
return GENI_I2C_ERR_PREP_SG;
}
dma_async_issue_pending(gi2c->tx_c);
*dma_buf = rd_dma_buf;
*sg_index = index;
return ret;
}
/**
* geni_i2c_gsi_write() - Perform gsi i2c write
* @gi2c: Geni I2C device handle
* @msgs: Base address of i2c msgs
* @msg_index: Message index
* @unlock_t: Unlock tre handle
* @num: Number of messages
* @segs: Segment number
* @sg_index: Scatter gather index
* @wr_index: Write index
*
* This function perfrom gsi i2c write
*
* Return: 0 for success or error code for failure
*/
static int geni_i2c_gsi_write(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
int msg_index, struct msm_gpi_tre *unlock_t,
int num, int segs, int *sg_index, u32 *wr_index)
{
struct msm_gpi_tre *tx_t = NULL;
int ret = 0;
int index = *sg_index;
dma_cookie_t tx_cookie;
bool gsi_bei = false;
if (msgs[msg_index].len > IMMEDIATE_DMA_LEN) {
gi2c->gsi_tx.dma_buf[*wr_index] =
i2c_get_dma_safe_msg_buf(&msgs[msg_index], 1);
if (!gi2c->gsi_tx.dma_buf[*wr_index]) {
gi2c->err = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"i2c_get_dma_safe_msg_buf failed :%d\n", ret);
return GENI_I2C_GSI_XFER_OUT;
}
ret = geni_se_common_iommu_map_buf
(gi2c->wrapper_dev,
&gi2c->tx_ph[*wr_index],
gi2c->gsi_tx.dma_buf[*wr_index],
msgs[msg_index].len,
DMA_TO_DEVICE);
if (ret) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni iommu_map_buf for tx failed :%d\n", ret);
i2c_put_dma_safe_msg_buf
(gi2c->gsi_tx.dma_buf[*wr_index],
&msgs[msg_index],
false);
gi2c->err = ret;
return GENI_I2C_GSI_XFER_OUT;
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[*wr_index].virt_buf =
(void *)gi2c->gsi_tx.dma_buf[*wr_index];
gi2c->dbg_buf_ptr[*wr_index].map_buf =
(void *)&gi2c->tx_ph[*wr_index];
}
}
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"msg[%d].len:%d W cnt:%d idx:%d\n",
msg_index, gi2c->cur->len, gi2c->gsi_tx.msg_cnt, *wr_index);
tx_t = setup_tx_tre(gi2c, msgs, msg_index, num, &gsi_bei, *wr_index);
sg_set_buf(&gi2c->tx_sg[index++], tx_t, sizeof(gi2c->tx_t));
if (gi2c->is_shared && (msg_index == (num - 1))) {
/* Send unlock tre at the end of last transfer */
sg_set_buf(&gi2c->tx_sg[index++],
unlock_t, sizeof(gi2c->unlock_t));
/* to enable call back for unlock tre */
if (gi2c->gsi_tx.is_multi_descriptor)
gsi_bei = false;
}
gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, true);
if (!gi2c->tx_desc) {
gi2c->err = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni_i2c_prep_desc failed\n");
return GENI_I2C_ERR_PREP_SG;
}
/* we don't need call back if bei bit is set */
if (gsi_bei) {
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"geni tx desc call back null %d\n", msg_index);
gi2c->tx_desc->callback = NULL;
gi2c->tx_desc->callback_param = NULL;
}
gi2c->gsi_tx.msg_cnt++;
*wr_index = (msg_index + 1) % MAX_NUM_TRE_MSGS;
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"tx_cnt:%d", gi2c->gsi_tx.msg_cnt);
/* Issue TX */
tx_cookie = dmaengine_submit(gi2c->tx_desc);
if (dma_submit_error(tx_cookie)) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"%s: dmaengine_submit failed (%d)\n",
__func__, tx_cookie);
gi2c->err = -EINVAL;
return GENI_I2C_ERR_PREP_SG;
}
dma_async_issue_pending(gi2c->tx_c);
*sg_index = index;
return ret;
}
/**
* geni_i2c_gsi_tx_tre_optimization() - Process received TRE's from GSI HW
* @gi2c: Base address of the gi2c dev structure.
@ -1554,7 +1817,7 @@ static int geni_i2c_gsi_tx_tre_optimization(struct geni_i2c_dev *gi2c, u32 num,
* if it's last message, waiting for all pending tre's
* including last submitted tre as well.
*/
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared) {
if (gi2c->gsi_tx.is_multi_descriptor) {
for (i = 0; i < max_irq_cnt; i++) {
if (max_irq_cnt != atomic_read(&gi2c->gsi_tx.irq_cnt)) {
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
@ -1590,7 +1853,7 @@ static int geni_i2c_gsi_tx_tre_optimization(struct geni_i2c_dev *gi2c, u32 num,
/* process received tre's */
if (timeout) {
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared)
if (gi2c->gsi_tx.is_multi_descriptor)
gi2c_gsi_tre_process(gi2c, num);
}
@ -1638,12 +1901,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
u8 op;
int segs;
u32 index = 0, wr_idx = 0;
dma_cookie_t tx_cookie, rx_cookie;
struct msm_gpi_tre *go_t = NULL;
struct msm_gpi_tre *rx_t = NULL;
struct msm_gpi_tre *tx_t = NULL;
bool tx_chan = true;
bool gsi_bei = false;
unsigned long long start_time;
unsigned long long start_time_xfer = sched_clock();
@ -1671,16 +1929,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
unlock_t = setup_unlock_tre(gi2c);
}
gi2c->gsi_tx.is_multi_descriptor = false;
/* if num of msgs more than 4 checking for multi descriptor mode */
if (num >= 4) {
gi2c->gsi_tx.is_multi_descriptor = true;
/* assumes multi descriptor supports only for continuous writes */
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RD)
gi2c->gsi_tx.is_multi_descriptor = false;
}
geni_i2c_check_for_gsi_multi_desc_mode(gi2c, msgs, num);
if (!gi2c->cfg_sent)
cfg0_t = setup_cfg0_tre(gi2c);
@ -1741,163 +1990,35 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
sg_set_buf(&gi2c->tx_sg[index++], go_t, sizeof(gi2c->go_t));
if (msgs[i].flags & I2C_M_RD) {
reinit_completion(&gi2c->xfer);
rd_dma_buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
if (!rd_dma_buf) {
ret = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"i2c_get_dma_safe_msg_buf failed :%d\n",
ret);
ret = geni_i2c_gsi_read(gi2c, &rd_dma_buf, msgs, i, unlock_t,
num, segs, &index);
if (ret == GENI_I2C_ERR_PREP_SG) {
ret = gi2c->err;
goto geni_i2c_err_prep_sg;
} else if (ret == GENI_I2C_GSI_XFER_OUT) {
ret = gi2c->err;
goto geni_i2c_gsi_xfer_out;
}
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"msg[%d].len:%d R\n", i, gi2c->cur->len);
sg_init_table(gi2c->rx_sg, 1);
ret = geni_se_common_iommu_map_buf(gi2c->wrapper_dev,
&gi2c->rx_ph,
rd_dma_buf,
msgs[i].len,
DMA_FROM_DEVICE);
if (ret) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni_se_common_iommu_map_buf for rx failed :%d\n", ret);
i2c_put_dma_safe_msg_buf(rd_dma_buf, &msgs[i], false);
goto geni_i2c_gsi_xfer_out;
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[i].virt_buf =
(void *)rd_dma_buf;
gi2c->dbg_buf_ptr[i].map_buf =
(void *)&gi2c->rx_ph;
}
rx_t = setup_rx_tre(gi2c, msgs, i, num);
sg_set_buf(gi2c->rx_sg, rx_t,
sizeof(gi2c->rx_t));
gi2c->rx_desc =
geni_i2c_prep_desc(gi2c, gi2c->rx_c, segs, !tx_chan);
if (!gi2c->rx_desc) {
gi2c->err = -ENOMEM;
goto geni_i2c_err_prep_sg;
}
/* Issue RX */
rx_cookie = dmaengine_submit(gi2c->rx_desc);
if (dma_submit_error(rx_cookie)) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"%s: dmaengine_submit failed (%d)\n", __func__, rx_cookie);
gi2c->err = -EINVAL;
goto geni_i2c_err_prep_sg;
}
dma_async_issue_pending(gi2c->rx_c);
/* submit config/go tre through tx channel */
if (gi2c->is_shared && (i == (num - 1))) {
/* Send unlock tre at the end of last transfer */
sg_set_buf(&gi2c->tx_sg[index++],
unlock_t, sizeof(gi2c->unlock_t));
}
gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, tx_chan);
if (!gi2c->tx_desc) {
gi2c->err = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni_i2c_prep_desc failed\n");
goto geni_i2c_err_prep_sg;
}
/* Issue TX */
tx_cookie = dmaengine_submit(gi2c->tx_desc);
if (dma_submit_error(tx_cookie)) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"%s: dmaengine_submit failed (%d)\n",
__func__, tx_cookie);
gi2c->err = -EINVAL;
goto geni_i2c_err_prep_sg;
}
dma_async_issue_pending(gi2c->tx_c);
timeout = wait_for_completion_timeout(&gi2c->xfer,
gi2c->xfer_timeout);
gi2c->xfer_timeout);
} else {
if (msgs[i].len > IMMEDIATE_DMA_LEN) {
gi2c->gsi_tx.dma_buf[wr_idx] =
i2c_get_dma_safe_msg_buf(&msgs[i], 1);
if (!gi2c->gsi_tx.dma_buf[wr_idx]) {
ret = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"i2c_get_dma_safe_msg_buf failed :%d\n", ret);
goto geni_i2c_gsi_xfer_out;
}
ret = geni_se_common_iommu_map_buf(gi2c->wrapper_dev,
&gi2c->tx_ph[wr_idx],
gi2c->gsi_tx.dma_buf[wr_idx],
msgs[i].len, DMA_TO_DEVICE);
if (ret) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni iommu_map_buf for tx failed :%d\n", ret);
i2c_put_dma_safe_msg_buf(gi2c->gsi_tx.dma_buf[wr_idx],
&msgs[i], false);
goto geni_i2c_gsi_xfer_out;
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[wr_idx].virt_buf =
(void *)gi2c->gsi_tx.dma_buf[wr_idx];
gi2c->dbg_buf_ptr[wr_idx].map_buf =
(void *)&gi2c->tx_ph[wr_idx];
}
ret = geni_i2c_gsi_write(gi2c, msgs, i, unlock_t,
num, segs, &index, &wr_idx);
if (ret == GENI_I2C_GSI_XFER_OUT) {
ret = gi2c->err;
goto geni_i2c_gsi_xfer_out;
} else if (ret == GENI_I2C_ERR_PREP_SG) {
ret = gi2c->err;
goto geni_i2c_err_prep_sg;
}
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"msg[%d].len:%d W cnt:%d idx:%d\n",
i, gi2c->cur->len, gi2c->gsi_tx.msg_cnt, wr_idx);
tx_t = setup_tx_tre(gi2c, msgs, i, num, &gsi_bei, wr_idx);
sg_set_buf(&gi2c->tx_sg[index++], tx_t, sizeof(gi2c->tx_t));
if (gi2c->is_shared && (i == (num - 1))) {
/* Send unlock tre at the end of last transfer */
sg_set_buf(&gi2c->tx_sg[index++],
unlock_t, sizeof(gi2c->unlock_t));
}
gi2c->tx_desc = geni_i2c_prep_desc(gi2c, gi2c->tx_c, segs, tx_chan);
if (!gi2c->tx_desc) {
gi2c->err = -ENOMEM;
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"geni_i2c_prep_desc failed\n");
goto geni_i2c_err_prep_sg;
}
/* we don't need call back if bei bit is set */
if (gsi_bei) {
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"geni tx desc call back null %d\n", i);
gi2c->tx_desc->callback = NULL;
gi2c->tx_desc->callback_param = NULL;
}
gi2c->gsi_tx.msg_cnt++;
wr_idx = (i + 1) % MAX_NUM_TRE_MSGS;
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
"tx_cnt:%d", gi2c->gsi_tx.msg_cnt);
/* Issue TX */
tx_cookie = dmaengine_submit(gi2c->tx_desc);
if (dma_submit_error(tx_cookie)) {
I2C_LOG_ERR(gi2c->ipcl, true, gi2c->dev,
"%s: dmaengine_submit failed (%d)\n",
__func__, tx_cookie);
gi2c->err = -EINVAL;
goto geni_i2c_err_prep_sg;
}
dma_async_issue_pending(gi2c->tx_c);
/**
* if it's not last message, submitting MAX_NUM_TRE_MSGS
* continuously without waiting, in b/w if any one of the
* tre is received processing and queuing next tre.
*/
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared &&
(i != (num - 1)) &&
if (gi2c->gsi_tx.is_multi_descriptor && (i != (num - 1)) &&
(gi2c->gsi_tx.msg_cnt < MAX_NUM_TRE_MSGS + gi2c->gsi_tx.tre_freed_cnt))
continue;
@ -1958,7 +2079,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
i2c_put_dma_safe_msg_buf(rd_dma_buf, &msgs[i], !gi2c->err);
} else if (gi2c->err) {
/* for multi descriptor unmap all submitted tre's */
if (gi2c->gsi_tx.is_multi_descriptor && !gi2c->is_shared)
if (gi2c->gsi_tx.is_multi_descriptor)
gi2c_gsi_tre_process(gi2c, num);
else
gi2c_gsi_tx_unmap(gi2c, i, wr_idx - 1);
@ -1970,6 +2091,11 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
}
geni_i2c_gsi_xfer_out:
/* clearing the gpi multi descriptor flag */
#if IS_ENABLED(CONFIG_MSM_GPI_DMA)
if (gi2c->gsi_tx.is_multi_descriptor)
gpi_update_multi_desc_flag(gi2c->tx_c, false, 0);
#endif
if (!ret && gi2c->err)
ret = gi2c->err;
I2C_LOG_DBG(gi2c->ipcl, false, gi2c->dev,
@ -2251,7 +2377,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
}
geni_ios = geni_read_reg(gi2c->base, SE_GENI_IOS);
if ((geni_ios & 0x3) != 0x3) { //SCL:b'1, SDA:b'0
if (!gi2c->is_shared && ((geni_ios & 0x3) != 0x3)) {//SCL:b'1, SDA:b'0
I2C_LOG_ERR(gi2c->ipcl, false, gi2c->dev,
"IO lines in bad state, Power the slave\n");
/* for levm skip auto suspend timer */

View File

@ -393,6 +393,17 @@ struct gsi_common {
*/
void gpi_dump_for_geni(struct dma_chan *chan);
/**
* gpi_update_multi_desc_flag() - update multi descriptor flag and num of msgs for
* multi descriptor mode handling.
* @chan: Base address of dma channel
* @is_multi_descriptor: is multi descriptor flag
* @num_msgs: number of client messages
*
* Return:None
*/
void gpi_update_multi_desc_flag(struct dma_chan *chan, bool is_multi_descriptor, int num_msgs);
/**
* gsi_common_tre_process() - Process received TRE's from GSI HW
* @gsi: Base address of the gsi common structure.