Merge "q2spi-msm-geni: Add q2spi interface drivers support"

This commit is contained in:
qctecmdr 2023-09-25 23:24:54 -07:00 committed by Gerrit - the friendly Code Review server
commit da665bb8cd
13 changed files with 4578 additions and 15 deletions

View File

@ -161,6 +161,20 @@ struct __packed xfer_compl_event {
u8 chid;
};
struct __packed qup_q2spi_status {
u32 ptr_l;
u32 ptr_h : 8;
u32 resvd_0 : 8;
u32 value : 8;
u32 resvd_1 : 8;
u32 length : 20;
u32 resvd_2 : 4;
u8 code : 8;
u16 status : 16;
u8 type : 8;
u8 ch_id : 8;
};
struct __packed immediate_data_event {
u8 data_bytes[8];
u8 length : 4;
@ -186,18 +200,13 @@ struct __packed gpi_ere {
u32 dword[4];
};
enum GPI_EV_TYPE {
XFER_COMPLETE_EV_TYPE = 0x22,
IMMEDIATE_DATA_EV_TYPE = 0x30,
QUP_NOTIF_EV_TYPE = 0x31,
STALE_EV_TYPE = 0xFF,
};
union __packed gpi_event {
struct __packed xfer_compl_event xfer_compl_event;
struct __packed immediate_data_event immediate_data_event;
struct __packed qup_notif_event qup_notif_event;
struct __packed gpi_ere gpi_ere;
struct __packed qup_q2spi_status q2spi_status;
struct __packed qup_q2spi_cr_header_event q2spi_cr_header_event;
};
enum gpii_irq_settings {
@ -298,6 +307,7 @@ enum se_protocol {
SE_PROTOCOL_SPI = 1,
SE_PROTOCOL_UART = 2,
SE_PROTOCOL_I2C = 3,
SE_PROTOCOL_Q2SPI = 0xE,
SE_MAX_PROTOCOL
};
@ -538,6 +548,7 @@ struct gpii_chan {
struct virt_dma_chan vc;
u32 chid;
u32 seid;
u8 init_config:1;
enum se_protocol protocol;
enum EV_PRIORITY priority; /* comes from clients DT node */
struct gpii *gpii;
@ -1700,12 +1711,118 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
tx_cb_param->length = compl_event->length;
tx_cb_param->completion_code = compl_event->code;
tx_cb_param->status = compl_event->status;
tx_cb_param->tce_type = compl_event->type;
GPII_INFO(gpii, gpii_chan->chid, "tx_cb_param:%p\n", tx_cb_param);
vd->tx.callback(tx_cb_param);
}
gpi_free_desc:
gpi_free_chan_desc(gpii_chan);
}
/* process Q2SPI_STATUS TCE notification event */
static void
gpi_process_qup_q2spi_status(struct gpii_chan *gpii_chan,
struct qup_q2spi_status *q2spi_status_event)
{
struct gpii *gpii = gpii_chan->gpii;
struct gpi_ring *ch_ring = gpii_chan->ch_ring;
void *ev_rp = to_virtual(ch_ring, q2spi_status_event->ptr_l);
struct virt_dma_desc *vd;
struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
struct gpi_desc *gpi_desc;
unsigned long flags;
/* only process events on active channel */
if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
GPII_ERR(gpii, gpii_chan->chid, "skipping processing event because ch @ %s state\n",
TO_GPI_PM_STR(gpii_chan->pm_state));
gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT, __LINE__);
return;
}
spin_lock_irqsave(&gpii_chan->vc.lock, flags);
vd = vchan_next_desc(&gpii_chan->vc);
if (!vd) {
struct gpi_ere *gpi_ere;
spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
GPII_ERR(gpii, gpii_chan->chid,
"Event without a pending descriptor!\n");
gpi_ere = (struct gpi_ere *)q2spi_status_event;
GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
gpi_ere->dword[0], gpi_ere->dword[1],
gpi_ere->dword[2], gpi_ere->dword[3]);
gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH, __LINE__);
return;
}
gpi_desc = to_gpi_desc(vd);
spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
/*
* RP pointed by Event is to last TRE processed,
* we need to update ring rp to ev_rp + 1
*/
ev_rp += ch_ring->el_size;
if (ev_rp >= (ch_ring->base + ch_ring->len))
ev_rp = ch_ring->base;
ch_ring->rp = ev_rp;
/* update must be visible to other cores */
smp_wmb();
if (q2spi_status_event->code == MSM_GPI_TCE_EOB) {
if (gpii->protocol != SE_PROTOCOL_Q2SPI)
goto gpi_free_desc;
}
tx_cb_param = vd->tx.callback_param;
if (vd->tx.callback && tx_cb_param) {
GPII_VERB(gpii, gpii_chan->chid,
"cb_length:%u code:0x%x type:0x%x status:0x%x q2spi_status:0x%x\n",
q2spi_status_event->length, q2spi_status_event->code,
q2spi_status_event->type, q2spi_status_event->status,
q2spi_status_event->value);
tx_cb_param->length = q2spi_status_event->length;
tx_cb_param->completion_code = q2spi_status_event->code;
tx_cb_param->tce_type = q2spi_status_event->type;
tx_cb_param->status = q2spi_status_event->status;
tx_cb_param->q2spi_status = q2spi_status_event->value;
vd->tx.callback(tx_cb_param);
}
gpi_free_desc:
gpi_free_chan_desc(gpii_chan);
}
/* process Q2SPI CR Header TCE notification event */
static void
gpi_process_xfer_q2spi_cr_header(struct gpii_chan *gpii_chan,
struct qup_q2spi_cr_header_event *q2spi_cr_header_event)
{
struct gpi_client_info *client_info = &gpii_chan->client_info;
struct gpii *gpii_ptr = NULL;
struct msm_gpi_cb msm_gpi_cb;
gpii_ptr = gpii_chan->gpii;
GPII_VERB(gpii_ptr, gpii_chan->chid,
"code:0x%x type:0x%x hdr_0:0x%x hrd_1:0x%x hrd_2:0x%x hdr3:0x%x\n",
q2spi_cr_header_event->code, q2spi_cr_header_event->type,
q2spi_cr_header_event->cr_hdr_0, q2spi_cr_header_event->cr_hdr_1,
q2spi_cr_header_event->cr_hdr_2, q2spi_cr_header_event->cr_hdr_3);
GPII_VERB(gpii_ptr, gpii_chan->chid,
"cr_byte_0:0x%x cr_byte_1:0x%x cr_byte_2:0x%x cr_byte_3h:0x%x\n",
q2spi_cr_header_event->cr_ed_byte_0, q2spi_cr_header_event->cr_ed_byte_1,
q2spi_cr_header_event->cr_ed_byte_2, q2spi_cr_header_event->cr_ed_byte_3);
GPII_VERB(gpii_ptr, gpii_chan->chid, "code:0x%x\n", q2spi_cr_header_event->code);
GPII_VERB(gpii_ptr, gpii_chan->chid,
"cr_byte_0_len:0x%x cr_byte_0_err:0x%x type:0x%x ch_id:0x%x\n",
q2spi_cr_header_event->byte0_len, q2spi_cr_header_event->byte0_err,
q2spi_cr_header_event->type, q2spi_cr_header_event->ch_id);
msm_gpi_cb.cb_event = MSM_GPI_QUP_CR_HEADER;
msm_gpi_cb.q2spi_cr_header_event = *q2spi_cr_header_event;
GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
client_info->cb_param);
}
/* process all events */
@ -1763,6 +1880,15 @@ static void gpi_process_events(struct gpii *gpii)
gpi_process_qup_notif_event(gpii_chan,
&gpi_event->qup_notif_event);
break;
case QUP_TCE_TYPE_Q2SPI_STATUS:
gpii_chan = &gpii->gpii_chan[chid];
gpi_process_qup_q2spi_status(gpii_chan, &gpi_event->q2spi_status);
break;
case QUP_TCE_TYPE_Q2SPI_CR_HEADER:
gpii_chan = &gpii->gpii_chan[chid];
gpi_process_xfer_q2spi_cr_header(gpii_chan,
&gpi_event->q2spi_cr_header_event);
break;
default:
GPII_VERB(gpii, GPI_DBG_COMMON,
"not supported event type:0x%x\n",
@ -1951,6 +2077,7 @@ static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
gpii_chan->chid),
GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
gpii_chan->init_config,
gpii_chan->protocol,
gpii_chan->seid),
},
@ -2988,13 +3115,15 @@ static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
/* get ring size, protocol, se_id, and priority */
gpii_chan->seid = seid;
gpii_chan->protocol = args->args[2];
if (gpii_chan->protocol == SE_PROTOCOL_Q2SPI)
gpii_chan->init_config = 1;
gpii_chan->req_tres = args->args[3];
gpii_chan->priority = args->args[4] & GPI_EV_PRIORITY_BMSK;
GPI_LOG(gpi_dev,
"client req gpii:%u chid:%u #_tre:%u prio:%u proto:%u SE:%d\n",
"client req gpii:%u chid:%u #_tre:%u prio:%u proto:%u SE:%d init_config:%d\n",
gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
gpii_chan->protocol, gpii_chan->seid);
gpii_chan->protocol, gpii_chan->seid, gpii_chan->init_config);
return dma_get_slave_channel(&gpii_chan->vc.chan);
}

View File

@ -1,11 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* Register offsets from gpi-top */
#define GPI_GPII_n_CH_k_CNTXT_0_OFFS(n, k) \
(0x20000 + (0x4000 * (n)) + (0x80 * (k)))
(0x20000 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_k_CNTXT_2_OFFS(n, k) \
(0x20008 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_k_CNTXT_4_OFFS(n, k) \
(0x20010 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_k_CNTXT_6_OFFS(n, k) \
(0x20018 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_k_RE_FETCH_READ_PTR(n, k) \
(0x20054 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000)
#define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24)
#define GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000)
@ -46,7 +56,14 @@
/* EV Context Array */
#define GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) \
(0x21000 + (0x4000 * (n)) + (0x80 * (k)))
(0x21000 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_EV_CH_k_CNTXT_2_OFFS(n, k) \
(0x21008 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_EV_CH_k_CNTXT_4_OFFS(n, k) \
(0x21010 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_EV_CH_k_CNTXT_6_OFFS(n, k) \
(0x21018 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000)
#define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24)
#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000)
@ -207,8 +224,8 @@ enum CNTXT_OFFS {
/* Scratch registeres */
#define GPI_GPII_n_CH_k_SCRATCH_0_OFFS(n, k) \
(0x20060 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_K_SCRATCH_0(pair, proto, seid) \
((pair << 16) | (proto << 4) | seid)
#define GPI_GPII_n_CH_K_SCRATCH_0(pair, int_config, proto, seid) \
(((pair) << 16) | ((int_config) << 15) | ((proto) << 4) | (seid))
#define GPI_GPII_n_CH_k_SCRATCH_1_OFFS(n, k) \
(0x20064 + (0x4000 * (n)) + (0x80 * (k)))
#define GPI_GPII_n_CH_k_SCRATCH_2_OFFS(n, k) \
@ -228,4 +245,4 @@ enum CNTXT_OFFS {
#define GPI_DEBUG_QSB_LOG_1 (0x5068)
#define GPI_DEBUG_QSB_LOG_2 (0x506C)
#define GPI_DEBUG_QSB_LOG_LAST_MISC_ID(n) (0x5070 + (0x4*n))
#define GPI_DEBUG_BUSY_REG (0x5010)

View File

@ -823,6 +823,18 @@ config SPI_QCOM_GENI
This driver can also be built as a module. If so, the module
will be called spi-geni-qcom.
config Q2SPI_MSM_GENI
tristate "Qualcomm Technologies Inc.'s GENI based Q2SPI controller"
depends on QCOM_GENI_SE
help
This driver supports GENI serial engine based Q2SPI controller in
master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
yes to this option, support will be included for the built-in Q2SPI
interface on the Qualcomm Technologies Inc.'s SoCs.
This driver can also be built as a module. If so, the module
will be called q2spi-geni.
config SPI_MSM_GENI
tristate "Qualcomm Technologies Inc.'s GENI based SPI controller"
depends on QCOM_GENI_SE

View File

@ -102,6 +102,8 @@ spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_Q2SPI_MSM_GENI) += q2spi-geni.o
q2spi-geni-y := q2spi-msm-geni.o q2spi-gsi.o
obj-$(CONFIG_SPI_MSM_GENI) += spi-msm-geni.o
obj-$(CONFIG_VIRTIO_SPI) += virtio-spi.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o

632
drivers/spi/q2spi-gsi.c Normal file
View File

@ -0,0 +1,632 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include "q2spi-msm.h"
#include "q2spi-slave-reg.h"
static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param *cb_param)
{
struct q2spi_packet *q2spi_pkt = cb_param->userdata;
struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
struct q2spi_dma_transfer *xfer;
u32 status = 0;
if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY)
xfer = q2spi->db_xfer;
else
xfer = q2spi->xfer;
if (!xfer || !xfer->rx_buf) {
pr_err("%s rx buf NULL!!!\n", __func__);
return;
}
Q2SPI_DEBUG(q2spi, "%s cb_param:%p cb_param->len:%p cb_param->status:%d\n",
__func__, cb_param, cb_param->length, cb_param->status);
Q2SPI_DEBUG(q2spi, "%s xfer:%p rx_buf:%p rx_dma:%p rx_len:%d m_cmd_param:%d\n",
__func__, xfer, xfer->rx_buf, xfer->rx_dma, xfer->rx_len,
q2spi_pkt->m_cmd_param);
status = cb_param->status; //check status is 0 or EOT for success
if (cb_param->length <= xfer->rx_len) {
xfer->rx_len = cb_param->length;
q2spi_dump_ipc(q2spi, q2spi->ipc, "rx_xfer_completion_event RX",
(char *)xfer->rx_buf, cb_param->length);
complete_all(&q2spi->rx_cb);
q2spi_add_req_to_rx_queue(q2spi, status, q2spi_pkt->m_cmd_param);
} else {
Q2SPI_DEBUG(q2spi, "%s Err length miss-match %d %d\n",
__func__, cb_param->length, xfer->rx_len);
}
}
static void q2spi_tx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param *cb_param)
{
struct q2spi_packet *q2spi_pkt = cb_param->userdata;
struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
struct q2spi_dma_transfer *xfer = q2spi->xfer;
Q2SPI_DEBUG(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__,
xfer->tx_len, cb_param->length);
if (cb_param->length == xfer->tx_len) {
Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__);
complete_all(&q2spi->tx_cb);
} else {
dev_err(q2spi->dev, "%s length miss-match\n", __func__);
}
}
static void q2spi_parse_q2spi_status(struct msm_gpi_dma_async_tx_cb_param *cb_param)
{
struct q2spi_packet *q2spi_pkt = cb_param->userdata;
struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
u32 status = 0;
status = cb_param->q2spi_status;
Q2SPI_DEBUG(q2spi, "%s status:%d complete_tx_cb\n", __func__, status);
complete_all(&q2spi->tx_cb);
q2spi_add_req_to_rx_queue(q2spi, status, q2spi_pkt->m_cmd_param);
}
static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb const *cb)
{
Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__);
complete_all(&q2spi->tx_cb);
q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event);
}
static void q2spi_gsi_tx_callback(void *cb)
{
struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL;
struct q2spi_packet *q2spi_pkt;
struct q2spi_geni *q2spi;
cb_param = (struct msm_gpi_dma_async_tx_cb_param *)cb;
if (!cb_param) {
pr_err("%s Err Invalid CB\n", __func__);
return;
}
q2spi_pkt = cb_param->userdata;
q2spi = q2spi_pkt->q2spi;
if (!q2spi) {
pr_err("%s Err Invalid q2spi\n", __func__);
return;
}
if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
dev_err(q2spi->dev, "%s Unexpected CB status\n", __func__);
return;
}
if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) {
dev_err(q2spi->dev, "%s Unexpected GSI CB completion code\n", __func__);
return;
} else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) {
Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
q2spi_tx_xfer_completion_event(cb_param);
} else if (cb_param->tce_type == QUP_TCE_TYPE_Q2SPI_STATUS) {
Q2SPI_DEBUG(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__);
q2spi_parse_q2spi_status(cb_param);
}
}
}
static void q2spi_gsi_rx_callback(void *cb)
{
struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL;
struct q2spi_packet *q2spi_pkt;
struct q2spi_geni *q2spi;
cb_param = (struct msm_gpi_dma_async_tx_cb_param *)cb;
if (!cb_param) {
pr_err("%s Err Invalid CB\n", __func__);
return;
}
q2spi_pkt = cb_param->userdata;
if (!q2spi_pkt) {
pr_err("%s Err Invalid packet\n", __func__);
return;
}
q2spi = q2spi_pkt->q2spi;
if (!q2spi) {
pr_err("%s Err Invalid q2spi\n", __func__);
return;
}
if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
Q2SPI_ERROR(q2spi, "%s Err cb_status:%d\n", __func__, cb_param->status);
return;
}
if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) {
Q2SPI_ERROR(q2spi, "%s Err MSM_GPI_TCE_UNEXP_ERR\n", __func__);
return;
} else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) {
/* CR header */
Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
q2spi_rx_xfer_completion_event(cb_param);
}
} else {
Q2SPI_DEBUG(q2spi, "%s: Err cb_param->completion_code = %d\n",
__func__, cb_param->completion_code);
}
Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
}
static void q2spi_geni_deallocate_chan(struct q2spi_gsi *gsi)
{
dma_release_channel(gsi->rx_c);
dma_release_channel(gsi->tx_c);
gsi->tx_c = NULL;
gsi->rx_c = NULL;
}
/**
*
* q2spi_geni_gsi_setup - GSI channel setup
*/
int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi)
{
struct q2spi_gsi *gsi = NULL;
int ret = 0;
gsi = q2spi_kzalloc(q2spi, sizeof(struct q2spi_gsi));
if (!gsi) {
Q2SPI_ERROR(q2spi, "%s Err GSI structure memory alloc failed\n", __func__);
return -ENOMEM;
}
q2spi->gsi = gsi;
Q2SPI_DEBUG(q2spi, "%s gsi:%p\n", __func__, gsi);
if (gsi->chan_setup) {
Q2SPI_ERROR(q2spi, "%s Err GSI channel already configured\n", __func__);
return ret;
}
gsi->tx_c = dma_request_slave_channel(q2spi->dev, "tx");
if (IS_ERR_OR_NULL(gsi->tx_c)) {
Q2SPI_ERROR(q2spi, "%s Err Failed to get tx DMA ch %ld\n",
__func__, PTR_ERR(gsi->tx_c));
return -EIO;
}
Q2SPI_DEBUG(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c);
gsi->rx_c = dma_request_slave_channel(q2spi->dev, "rx");
if (IS_ERR_OR_NULL(gsi->rx_c)) {
Q2SPI_ERROR(q2spi, "%s Err Failed to get rx DMA ch %ld\n",
__func__, PTR_ERR(gsi->rx_c));
dma_release_channel(gsi->tx_c);
gsi->tx_c = NULL;
return -EIO;
}
Q2SPI_DEBUG(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c);
gsi->tx_ev.init.callback = q2spi_gsi_ch_ev_cb;
gsi->tx_ev.init.cb_param = q2spi;
gsi->tx_ev.cmd = MSM_GPI_INIT;
gsi->tx_c->private = &gsi->tx_ev;
ret = dmaengine_slave_config(gsi->tx_c, NULL);
if (ret) {
Q2SPI_ERROR(q2spi, "%s tx dma slave config ret :%d\n", __func__, ret);
goto dmaengine_slave_config_fail;
}
gsi->rx_ev.init.callback = q2spi_gsi_ch_ev_cb;
gsi->rx_ev.init.cb_param = q2spi;
gsi->rx_ev.cmd = MSM_GPI_INIT;
gsi->rx_c->private = &gsi->rx_ev;
ret = dmaengine_slave_config(gsi->rx_c, NULL);
if (ret) {
Q2SPI_ERROR(q2spi, "%s rx dma slave config ret :%d\n", __func__, ret);
goto dmaengine_slave_config_fail;
}
Q2SPI_DEBUG(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi);
q2spi->gsi->chan_setup = true;
return ret;
dmaengine_slave_config_fail:
q2spi_geni_deallocate_chan(gsi);
return ret;
}
static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_idx, int *clk_div)
{
unsigned long sclk_freq;
unsigned long res_freq;
struct geni_se *se = &q2spi->se;
int ret = 0;
Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid);
ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling),
clk_idx, &sclk_freq, false);
if (ret) {
Q2SPI_ERROR(q2spi, "%s Err Failed(%d) to find src clk for 0x%x\n",
__func__, ret, speed_hz);
return ret;
}
*clk_div = DIV_ROUND_UP(sclk_freq, (q2spi->oversampling * speed_hz));
if (!(*clk_div)) {
Q2SPI_ERROR(q2spi, "%s Err sclk:%lu oversampling:%d speed:%u\n",
__func__, sclk_freq, q2spi->oversampling, speed_hz);
return -EINVAL;
}
res_freq = (sclk_freq / (*clk_div));
Q2SPI_DEBUG(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n",
__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
ret = clk_set_rate(se->clk, sclk_freq);
if (ret) {
Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret);
return ret;
}
Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
return 0;
}
/* 3.10.2.8 Q2SPI */
static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi)
{
struct msm_gpi_tre *c0_tre = &q2spi->gsi->config0_tre;
u8 word_len = 0;
u8 cs_mode = 0;
u8 intr_pol = 0;
u8 pack = 0;
u8 cs_clk_delay = SPI_CS_CLK_DLY;
int div = 0;
int ret = 0;
int idx = 0;
int tdn = S_GP_CNT5_TDN;
int tsn = M_GP_CNT7_TSN;
int tan = M_GP_CNT4_TAN;
int ssn = S_GP_CNT7_SSN;
int cn_delay = M_GP_CNT6_CN_DELAY;
Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid);
ret = get_q2spi_clk_cfg(q2spi->cur_speed_hz, q2spi, &idx, &div);
if (ret) {
Q2SPI_ERROR(q2spi, "%s Err setting clks:%d\n", __func__, ret);
return ERR_PTR(ret);
}
word_len = MIN_WORD_LEN;
pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
cs_mode = CS_LESS_MODE;
intr_pol = INTR_HIGH_POLARITY;
Q2SPI_DEBUG(q2spi, "%s cs_mode 0x%x word %d pack %d idx %d div %d\n",
__func__, cs_mode, word_len, pack, idx, div);
/* config0 */
c0_tre->dword[0] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode,
intr_pol, word_len);
c0_tre->dword[1] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_delay, ssn);
c0_tre->dword[2] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, idx, div);
c0_tre->dword[3] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
Q2SPI_DEBUG(q2spi, "%s c0_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
__func__, c0_tre->dword[0], c0_tre->dword[1],
c0_tre->dword[2], c0_tre->dword[3]);
q2spi->setup_config0 = true;
return c0_tre;
}
/* 3.10.4.9 Q2SPI */
static struct
msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags, struct q2spi_geni *q2spi)
{
struct msm_gpi_tre *go_tre = &q2spi->gsi->go_tre;
int chain = 0;
int eot = 0;
int eob = 0;
int link_rx = 0;
if (IS_ERR_OR_NULL(go_tre))
return go_tre;
go_tre->dword[0] = MSM_GPI_Q2SPI_GO_TRE_DWORD0(flags, cs, cmd);
go_tre->dword[1] = MSM_GPI_Q2SPI_GO_TRE_DWORD1;
go_tre->dword[2] = MSM_GPI_Q2SPI_GO_TRE_DWORD2(rx_len);
if (cmd == Q2SPI_RX_ONLY) {
eot = 0;
eob = 0;
/* GO TRE on RX: processing needed check this */
chain = 0;
link_rx = 1;
} else if (cmd == Q2SPI_TX_ONLY) {
eot = 0;
/* GO TRE on TX: processing needed check this */
eob = 0;
chain = 1;
} else if (cmd == Q2SPI_TX_RX) {
eot = 0;
eob = 0;
chain = 1;
link_rx = 1;
}
go_tre->dword[3] = MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob, chain);
Q2SPI_DEBUG(q2spi, "%s rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
__func__, rx_len, flags, cs, cmd, eot, eob, chain);
if (cmd == Q2SPI_RX_ONLY)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_RX_ONLY\n", __func__);
else if (cmd == Q2SPI_TX_ONLY)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_ONLY\n", __func__);
else if (cmd == Q2SPI_TX_RX)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_RX_ONLY\n", __func__);
Q2SPI_DEBUG(q2spi, "%s go_tre dword[0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
__func__, go_tre->dword[0], go_tre->dword[1], go_tre->dword[2],
go_tre->dword[3]);
return go_tre;
}
/*3.10.5 DMA TRE */
static struct
msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre, dma_addr_t buf, u32 len,
struct q2spi_geni *q2spi, bool is_tx)
{
if (IS_ERR_OR_NULL(tre))
return tre;
tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0);
Q2SPI_DEBUG(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
__func__, tre->dword[0], tre->dword[1],
tre->dword[2], tre->dword[3]);
return tre;
}
int check_gsi_transfer_completion_rx(struct q2spi_geni *q2spi)
{
int i = 0, ret = 0;
unsigned long timeout = 0, xfer_timeout = 0;
xfer_timeout = XFER_TIMEOUT_OFFSET;
timeout = wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout));
if (timeout <= 0) {
Q2SPI_ERROR(q2spi, "%s Rx[%d] timeout%lu\n", __func__, i, timeout);
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else {
Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
}
err_gsi_geni_transfer:
return ret;
}
int check_gsi_transfer_completion(struct q2spi_geni *q2spi)
{
int i = 0, ret = 0;
unsigned long timeout = 0, xfer_timeout = 0;
xfer_timeout = XFER_TIMEOUT_OFFSET;
Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__,
q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot);
for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) {
timeout =
wait_for_completion_timeout(&q2spi->tx_cb, msecs_to_jiffies(xfer_timeout));
if (timeout <= 0) {
Q2SPI_ERROR(q2spi, "%s Tx[%d] timeout\n", __func__, i);
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else {
Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__);
}
}
for (i = 0 ; i < q2spi->gsi->num_rx_eot; i++) {
timeout =
wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout));
if (timeout <= 0) {
Q2SPI_ERROR(q2spi, "%s Rx[%d] timeout\n", __func__, i);
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else {
Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
}
}
err_gsi_geni_transfer:
if (q2spi->gsi->qup_gsi_err) {
Q2SPI_ERROR(q2spi, "%s Err QUP Gsi Error\n", __func__);
q2spi->gsi->qup_gsi_err = false;
q2spi->setup_config0 = false;
dmaengine_terminate_all(q2spi->gsi->tx_c);
}
return ret;
}
int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
{
struct msm_gpi_tre *c0_tre = NULL;
struct msm_gpi_tre *go_tre = NULL;
struct msm_gpi_tre *tx_tre = NULL;
struct msm_gpi_tre *rx_tre = NULL;
struct scatterlist *xfer_tx_sg;
struct scatterlist *xfer_rx_sg;
u8 cs = 0;
u32 tx_rx_len = 0;
int rx_nent = 0;
int tx_nent = 0;
int go_flags = 0;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct q2spi_geni *q2spi = q2spi_pkt->q2spi;
struct q2spi_dma_transfer *xfer;
u8 cmd;
if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY)
xfer = q2spi->db_xfer;
else
xfer = q2spi->xfer;
cmd = xfer->cmd;
Q2SPI_DEBUG(q2spi, "%s PID=%d xfer:%p\n", __func__, current->pid, xfer);
reinit_completion(&q2spi->tx_cb);
reinit_completion(&q2spi->rx_cb);
if (q2spi_pkt->vtype == VARIANT_1_HRF)
reinit_completion(&q2spi->doorbell_up);
Q2SPI_DEBUG(q2spi, "%s cmd:%d q2spi_pkt:%p\n", __func__, cmd, q2spi_pkt);
q2spi->gsi->num_tx_eot = 0;
q2spi->gsi->num_rx_eot = 0;
q2spi->gsi->qup_gsi_err = false;
xfer_tx_sg = q2spi->gsi->tx_sg;
xfer_rx_sg = q2spi->gsi->rx_sg;
c0_tre = &q2spi->gsi->config0_tre;
go_tre = &q2spi->gsi->go_tre;
tx_nent++;
if (!q2spi->setup_config0) {
c0_tre = setup_cfg0_tre(q2spi);
if (IS_ERR_OR_NULL(c0_tre)) {
Q2SPI_DEBUG(q2spi, "%s Err setting c0_tre", __func__);
return -EINVAL;
}
}
if (cmd == Q2SPI_TX_ONLY)
tx_rx_len = xfer->tx_data_len;
else
tx_rx_len = xfer->rx_data_len;
go_flags |= Q2SPI_CMD;
go_flags |= (SINGLE_SDR_MODE << Q2SPI_MODE_SHIFT) & Q2SPI_MODE;
go_tre = setup_go_tre(cmd, cs, tx_rx_len, go_flags, q2spi);
if (IS_ERR_OR_NULL(go_tre)) {
Q2SPI_DEBUG(q2spi, "%s Err setting g0_tre", __func__);
return -EINVAL;
}
if (cmd == Q2SPI_TX_ONLY) {
tx_nent += 2;
} else if (cmd == Q2SPI_RX_ONLY) {
tx_nent++;
rx_nent++;
} else if (cmd == Q2SPI_TX_RX) {
tx_nent += 2;
rx_nent++;
}
Q2SPI_DEBUG(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent);
sg_init_table(xfer_tx_sg, tx_nent);
if (rx_nent)
sg_init_table(xfer_rx_sg, rx_nent);
if (c0_tre)
sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
tx_tre = &q2spi->gsi->tx_dma_tre;
tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->tx_len, q2spi, 1);
if (IS_ERR_OR_NULL(tx_tre)) {
Q2SPI_ERROR(q2spi, "%s Err setting up tx tre\n", __func__);
return -EINVAL;
}
sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
q2spi->gsi->num_tx_eot++;
q2spi->gsi->tx_desc = dmaengine_prep_slave_sg(q2spi->gsi->tx_c, q2spi->gsi->tx_sg, tx_nent,
DMA_MEM_TO_DEV, flags);
if (IS_ERR_OR_NULL(q2spi->gsi->tx_desc)) {
Q2SPI_ERROR(q2spi, "%s Err setting up tx desc\n", __func__);
return -EIO;
}
q2spi->gsi->tx_desc->callback = q2spi_gsi_tx_callback;
q2spi->gsi->tx_desc->callback_param = &q2spi->gsi->tx_cb_param;
q2spi->gsi->tx_cb_param.userdata = q2spi_pkt;
q2spi->gsi->tx_cookie = dmaengine_submit(q2spi->gsi->tx_desc);
Q2SPI_DEBUG(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param);
if (dma_submit_error(q2spi->gsi->tx_cookie)) {
Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n",
__func__, q2spi->gsi->tx_cookie);
dmaengine_terminate_all(q2spi->gsi->tx_c);
return -EINVAL;
}
if (cmd & Q2SPI_RX_ONLY) {
rx_tre = &q2spi->gsi->rx_dma_tre;
rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->rx_len, q2spi, 1);
if (IS_ERR_OR_NULL(rx_tre)) {
Q2SPI_ERROR(q2spi, "%s Err setting up rx tre\n", __func__);
return -EINVAL;
}
sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
q2spi->gsi->rx_desc = dmaengine_prep_slave_sg(q2spi->gsi->rx_c, q2spi->gsi->rx_sg,
rx_nent, DMA_DEV_TO_MEM, flags);
if (IS_ERR_OR_NULL(q2spi->gsi->rx_desc)) {
Q2SPI_ERROR(q2spi, "%s rx_desc fail\n", __func__);
return -EIO;
}
q2spi->gsi->rx_desc->callback = q2spi_gsi_rx_callback;
q2spi->gsi->rx_desc->callback_param = &q2spi->gsi->rx_cb_param;
q2spi->gsi->rx_cb_param.userdata = q2spi_pkt;
q2spi->gsi->num_rx_eot++;
q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->rx_desc);
Q2SPI_DEBUG(q2spi, "%s Rx cb_param:%p\n", __func__,
q2spi->gsi->rx_desc->callback_param);
if (dma_submit_error(q2spi->gsi->rx_cookie)) {
Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n",
__func__, q2spi->gsi->rx_cookie);
dmaengine_terminate_all(q2spi->gsi->rx_c);
return -EINVAL;
}
}
if (cmd & Q2SPI_RX_ONLY) {
Q2SPI_DEBUG(q2spi, "%s rx_c dma_async_issue_pending\n", __func__);
q2spi_dump_ipc(q2spi, q2spi->ipc, "GSI DMA-RX", (char *)xfer->rx_buf, tx_rx_len);
dma_async_issue_pending(q2spi->gsi->rx_c);
}
if (cmd & Q2SPI_TX_ONLY)
q2spi_dump_ipc(q2spi, q2spi->ipc, "GSI DMA TX", (char *)xfer->tx_buf,
Q2SPI_HEADER_LEN + tx_rx_len);
Q2SPI_DEBUG(q2spi, "%s tx_c dma_async_issue_pending\n", __func__);
dma_async_issue_pending(q2spi->gsi->tx_c);
Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
return 0;
}
void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *ptr)
{
struct q2spi_geni *q2spi = ptr;
Q2SPI_DEBUG(q2spi, "%s event:%d\n", __func__, cb->cb_event);
switch (cb->cb_event) {
case MSM_GPI_QUP_NOTIFY:
case MSM_GPI_QUP_MAX_EVENT:
dev_err(q2spi->dev, "%s:cb_ev%d status%llu ts%llu count%llu\n",
__func__, cb->cb_event, cb->status,
cb->timestamp, cb->count);
break;
case MSM_GPI_QUP_ERROR:
case MSM_GPI_QUP_CH_ERROR:
case MSM_GPI_QUP_FW_ERROR:
case MSM_GPI_QUP_PENDING_EVENT:
case MSM_GPI_QUP_EOT_DESC_MISMATCH:
case MSM_GPI_QUP_SW_ERROR:
Q2SPI_ERROR(q2spi, "%s cb_ev %d status %llu ts %llu count %llu\n",
__func__, cb->cb_event, cb->status,
cb->timestamp, cb->count);
Q2SPI_ERROR(q2spi, "%s err_routine:%u err_type:%u err.code%u\n",
__func__, cb->error_log.routine, cb->error_log.type,
cb->error_log.error_code);
q2spi->gsi->qup_gsi_err = true;
break;
case MSM_GPI_QUP_CR_HEADER:
Q2SPI_DEBUG(q2spi, "%s GSI doorbell event\n", __func__);
q2spi_parse_cr_header(q2spi, cb);
break;
default:
break;
}
if (cb->cb_event == MSM_GPI_QUP_FW_ERROR) {
q2spi_geni_se_dump_regs(q2spi);
Q2SPI_ERROR(q2spi, "%s dump GSI regs\n", __func__);
}
}

77
drivers/spi/q2spi-gsi.h Normal file
View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SPI_Q2SPI_GPI_H_
#define __SPI_Q2SPI_GPI_H_
/* Q2SPI Config0 TRE */
#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode, intr_pol, word_size) \
(((tsn) << 27) | ((pack) << 24) | \
((tdn) << 14) | ((cs_mode) << 6) | ((intr_pol) << 5) | (word_size))
#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_del, ssn) \
((tan) | ((cs_clk_del) << 8) | ((ssn) << 16))
#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, clk_src, clk_div) (((cn_delay) << 20) | \
((clk_src) << 16) | (clk_div))
#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) \
((0x2 << 20) | (0x2 << 16) | ((link_rx) << 11) | ((bei) << 10) | \
((ieot) << 9) | ((ieob) << 8) | (ch))
/* Q2SPI Go TRE */
#define MSM_GPI_Q2SPI_GO_TRE_DWORD0(flags, cs, cmd) (((flags) << 17) | \
((cs) << 8) | (cmd))
#define MSM_GPI_Q2SPI_GO_TRE_DWORD1 (0)
#define MSM_GPI_Q2SPI_GO_TRE_DWORD2(rx_len) (rx_len)
#define MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) ((0x2 << 20) | \
(0x0 << 16) | ((link_rx) << 11) | ((bei) << 10) | ((ieot) << 9) | \
((ieob) << 8) | (ch))
/**
* struct q2spi_gsi - structure to store gsi information for q2spi driver
*
* @tx_c: TX DMA channel
* @rx_c: RX DMA channel
* @config0_tre: stores config0 tre info
* @go_tre: stores go tre info
* @tx_dma_tre: stores DMA TX tre info
* @rx_dma_tre: stores DMA RX tre info
* @tx_ev: control structure to config gpi dma engine via dmaengine_slave_config() for tx.
* @rx_ev: control structure to config gpi dma engine via dmaengine_slave_config() for rx.
* @tx_desc: async transaction descriptor for tx
* @rx_desc: async transaction descriptor for rx
* @tx_cb_param: gpi specific callback parameters to pass between gpi client and gpi engine for TX.
* @rx_cb_param: gpi specific callback parameters to pass between gpi client and gpi engine for RX.
* @chan_setup: flag to mark channel setup completion.
* @tx_sg: sg table for TX transfers
* @rx_sg: sg table for RX transfers
* tx_cookie: Represents dma tx cookie
* rx_cookie: Represents dma rx cookie
* num_tx_eot: Represents number of TX End of Transfers
* num_rx_eot: Represents number of RX End of Transfers
* qup_gsi_err: flag to represent gsi error if any
*/
struct q2spi_gsi {
struct dma_chan *tx_c;
struct dma_chan *rx_c;
struct msm_gpi_tre config0_tre;
struct msm_gpi_tre go_tre;
struct msm_gpi_tre tx_dma_tre;
struct msm_gpi_tre rx_dma_tre;
struct msm_gpi_ctrl tx_ev;
struct msm_gpi_ctrl rx_ev;
struct dma_async_tx_descriptor *tx_desc;
struct dma_async_tx_descriptor *rx_desc;
struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
bool chan_setup;
struct scatterlist tx_sg[3];
struct scatterlist rx_sg[3];
dma_cookie_t tx_cookie;
dma_cookie_t rx_cookie;
int num_tx_eot;
int num_rx_eot;
bool qup_gsi_err;
};
#endif /* __SPI_Q2SPI_GPI_H_ */

3031
drivers/spi/q2spi-msm-geni.c Normal file

File diff suppressed because it is too large Load Diff

556
drivers/spi/q2spi-msm.h Normal file
View File

@ -0,0 +1,556 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _SPI_Q2SPI_H_
#define _SPI_Q2SPI_H_
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/ipc_logging.h>
#include <linux/kthread.h>
#include <linux/msm_gpi.h>
#include <linux/poll.h>
//#include <linux/qcom-geni-se.h>
#include <linux/soc/qcom/geni-se.h>
#include <linux/qcom-geni-se-common.h>
#include <linux/types.h>
#include <uapi/linux/q2spi/q2spi.h>
#include "q2spi-gsi.h"
#define DATA_WORD_LEN 4
#define SMA_BUF_SIZE 4096
#define MAX_CR_SIZE 24 /* Max CR size is 24 bytes per CR */
#define MAX_RX_CRS 4
#define RX_DMA_CR_BUF_SIZE (MAX_CR_SIZE * MAX_RX_CRS)
#define Q2SPI_MAX_BUF 2
#define XFER_TIMEOUT_OFFSET 500
#define TIMEOUT_MSECONDS 10 /* 10 milliseconds */
#define RETRIES 1
#define Q2SPI_MAX_DATA_LEN 4096
/* Host commands */
#define HC_DB_REPORT_LEN_READ 1
#define HC_DB_REPORT_BODY_READ 2
#define HC_ABORT 3
#define HC_DATA_READ 5
#define HC_DATA_WRITE 6
#define HC_SMA_READ 5
#define HC_SMA_WRITE 6
#define HC_SOFT_RESET 0xF
#define CM_FLOW 1
#define MC_FLOW 0
#define CLIENT_INTERRUPT 1 //need to confirm this
#define SEGMENT_LST 1 //need to confirm this
#define LOCAL_REG_ACCESS 0
#define SYSTEM_MEMORY_ACCESS 1
#define CLIENT_ADDRESS 1
#define NO_CLIENT_ADDRESS 0
#define HC_SOFT_RESET_FLAGS 0xF
#define HC_SOFT_RESET_CODE 0x2
/* Client Requests */
#define ADDR_LESS_WR_ACCESS 3
#define ADDR_LESS_RD_ACCESS 4
#define BULK_ACCESS_STATUS 8
#define Q2SPI_HEADER_LEN 7 //7 bytes header excluding checksum we use in SW
#define DMA_Q2SPI_SIZE 2048
#define MAX_DW_LEN_1 4 // 4DWlen
#define MAX_DW_LEN_2 1024 //for 1K DWlen
#define CS_LESS_MODE 0
#define INTR_HIGH_POLARITY 1
#define MAX_TX_SG (3)
#define NUM_Q2SPI_XFER (10)
#define Q2SPI_START_TID_ID (0)
#define Q2SPI_END_TID_ID (8)
/* Q2SPI specific SE GENI registers */
#define IO_MACRO_IO3_DATA_IN_SEL_MASK GENMASK(15, 14)
#define IO_MACRO_IO3_DATA_IN_SEL_SHIFT 14
#define IO_MACRO_IO3_DATA_IN_SEL 1
#define SE_SPI_TRANS_CFG 0x25c
#define CS_TOGGLE BIT(1)
#define SPI_NOT_USED_CFG1 BIT(2)
#define SE_SPI_PRE_POST_CMD_DLY 0x274
#define SPI_DELAYS_COUNTERS 0x278
#define M_GP_CNT4_TAN 0
#define M_GP_CNT4_TAN_MASK GENMASK(9, 0)
#define M_GP_CNT5_TE2D GENMASK(19, 10)
#define M_GP_CNT5_TE2D_SHIFT 10
#define M_GP_CNT6_CN GENMASK(29, 20)
#define M_GP_CNT6_CN_SHIFT 20
#define SE_GENI_CFG_REG95 0x27C
#define M_GP_CNT7 GENMASK(9, 0)
#define M_GP_CNT7_TSN 0
#define SPI_INTER_WORDS_DLY 0
//#define SPI_CS_CLK_DLY 0x50 //80 from VI SW
#define SPI_CS_CLK_DLY 0x80 //128 from ganges SW
#define SPI_PIPE_DLY_TPM 0x320 //800 from VI SW
#define SE_GENI_CFG_REG103 0x29C
#define S_GP_CNT5 GENMASK(19, 10)
#define S_GP_CNT5_SHIFT 10
#define S_GP_CNT5_TDN 0
#define SE_GENI_CFG_REG104 0x2A0
#define S_GP_CNT7 GENMASK(9, 0)
//#define S_GP_CNT7_SSN 0x50 //80 from VI SW
#define S_GP_CNT7_SSN 0x80 //128 from ganges SW
//#define M_GP_CNT6_CN_DELAY 0x3f //63 from VI SW
#define M_GP_CNT6_CN_DELAY 0x50 //trying with 80 from SW
#define SE_SPI_WORD_LEN 0x268
#define WORD_LEN_MSK GENMASK(9, 0)
#define MIN_WORD_LEN 4
#define NUMBER_OF_DATA_LINES GENMASK(1, 0)
#define PARAM_14 BIT(14)
#define SE_GENI_CGC_CTRL 0x28
#define SE_GENI_CFG_SEQ_START 0x84
#define SE_GENI_CFG_STATUS 0x88
#define SE_UART_TX_TRANS_CFG 0x25C
#define CFG_SEQ_DONE BIT(1)
#define SPI_CS_CLK_DL 0
#define SPI_PRE_POST_CMD_DLY 0
#define SE_SPI_CPHA 0x224
#define CPHA BIT(0)
#define SE_SPI_CPOL 0x230
#define CPOL BIT(2)
#define SPI_LSB_TO_MSB 0
#define SPI_MSB_TO_LSB 1
#define SE_SPI_TX_TRANS_LEN 0x26c
#define SE_SPI_RX_TRANS_LEN 0x270
#define TRANS_LEN_MSK GENMASK(23, 0)
/* HRF FLOW Info */
#define HRF_ENTRY_OPCODE 3
#define HRF_ENTRY_TYPE 3
#define HRF_ENTRY_FLOW 0
#define HRF_ENTRY_PARITY 0
#define HRF_ENTRY_DATA_LEN 16 //HRF entry always has DW=3
#define LRA_SINGLE_REG_LENGTH 4
/* M_CMD OP codes for Q2SPI */
#define Q2SPI_TX_ONLY (1)
#define Q2SPI_RX_ONLY (2)
#define Q2SPI_TX_RX (7)
/* M_CMD params for Q2SPI */
#define PRE_CMD_DELAY BIT(0)
#define TIMESTAMP_BEFORE BIT(1)
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
#define Q2SPI_MODE GENMASK(11, 8)
#define Q2SPI_MODE_SHIFT 8
#define SINGLE_SDR_MODE 0
#define Q2SPI_CMD BIT(14)
#define CS_MODE CS_LESS_MODE
#define Q2SPI_INTR_POL INTR_HIGH_POLARITY
#define CR_BULK_DATA_size 1
#define CR_DMA_DATA_size 7
// max Minor devices
#define MAX_DEV 2
#define DEVICE_NAME_MAX_LEN 64
//Q2SPI specific configuration
#define QSPI_NUM_CS 2
#define QSPI_BYTES_PER_WORD 4
//#define Q2SPI_LSB_FIRST _BITUL(3) /* per-word bits-on-wire */
#define Q2SPI_INFO(q2spi_ptr, x...) do { \
if (q2spi_ptr) { \
ipc_log_string(q2spi_ptr->ipc, x); \
if (q2spi_ptr->dev) \
q2spi_trace_log(q2spi_ptr->dev, x); \
pr_info(x); \
} \
} while (0)
#define Q2SPI_DEBUG(q2spi_ptr, x...) do { \
if (q2spi_ptr) { \
GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \
if (q2spi_ptr->dev) \
q2spi_trace_log(q2spi_ptr->dev, x); \
} \
} while (0)
#define Q2SPI_ERROR(q2spi_ptr, x...) do { \
if (q2spi_ptr) { \
GENI_SE_ERR(q2spi_ptr->ipc, true, q2spi_ptr->dev, x); \
if (q2spi_ptr->dev) \
q2spi_trace_log(q2spi_ptr->dev, x); \
} \
} while (0)
#define DATA_BYTES_PER_LINE (64)
#define CHUNK_SIZE (16)
/* global storage for device Major number */
static int q2spi_cdev_major;
static int q2spi_alloc_count;
enum abort_code {
TERMINATE_CMD = 0,
ERR_DUPLICATE_ID = 1,
ERR_NOT_VALID = 2,
ERR_ACCESS_BLOCKED = 3,
ERR_DWLEN = 4,
OTHERS = 5,
};
struct q2spi_mc_hrf_entry {
u8 cmd:4;
u8 flow:1;
u8 type:2;
u8 parity:1;
u8 resrv_0:4;
u8 flow_id:4;
u8 resrv_1:4;
u8 dwlen_part1:4;
u8 dwlen_part2:8;
u8 dwlen_part3:8;
u8 arg1:8;
u8 arg2:8;
u8 arg3:8;
u8 reserved[8];
};
/**
* struct q2spi_ch_header structure of cr header
* @flow: flow direction of cr hdr, 1: CM flow, 0: MC flow
*/
struct q2spi_cr_header {
u8 cmd:4;
u8 flow:1;
u8 type:2;
u8 parity:1;
};
struct q2spi_client_bulk_access_pkt {
u8 cmd:4;
u8 flow:1;
u8 rsvd:2;
u8 parity:1;
u8 status:4;
u8 flow_id:4;
u8 reserved[2];
};
struct q2spi_client_dma_pkt {
u8 seg_len:4;
u8 flow_id:4;
u8 interrupt:1;
u8 seg_last:1;
u8 channel:2;
u8 dw_len_part1:4;
u8 dw_len_part2:8;
u8 dw_len_part3:8;
u8 arg1:8;
u8 arg2:8;
u8 arg3:8;
};
struct q2spi_host_variant1_pkt {
u8 cmd:4;
u8 flow:1;
u8 interrupt:1;
u8 seg_last:1;
u8 rsvd:1;
u8 dw_len:2;
u8 access_type:1;
u8 address_mode:1;
u8 flow_id:4;
u8 reg_offset;
u8 reserved[4];
u8 data_buf[16];
u8 status;
};
struct q2spi_host_variant4_5_pkt {
u8 cmd:4;
u8 flow:1;
u8 interrupt:1;
u8 seg_last:1;
u8 rsvd:1;
u8 dw_len_part1:2;
u8 access_type:1;
u8 address_mode:1;
u8 flow_id:4;
u8 dw_len_part2;
u8 rsvd_1[4];
u8 data_buf[4096];
u8 status;
};
struct q2spi_host_abort_pkt {
u8 cmd:4;
u8 rsvd:4;
u8 code:4;
u8 flow_id:4;
u8 reserved[5];
};
struct q2spi_host_soft_reset_pkt {
u8 cmd:4;
u8 flags:4;
u8 code:4;
u8 flow_id:4;
u8 reserved[5];
};
enum cr_var_type {
VARIANT_T_3 = 1, //T:3 DMA CR type
VARIANT_T_4 = 2, //TODO check
VARIANT_T_5 = 3, //TODO check
};
enum var_type {
VARIANT_1 = 1,
VARIANT_2 = 2,
VARIANT_3 = 3,
VARIANT_4 = 4,
VARIANT_5 = 5,
VARIANT_1_HRF = 6,
VAR_ABORT = 7,
VAR_SOFT_RESET = 8,
};
/**
* struct q2spi_chrdev - structure for character device
* q2spi_dev: q2spi device
* @cdev: cdev pointer
* @major: major number of q2spi device
* @minor: minor number of q2spi device
* @dev: basic device structure.
* @dev_name: name of the device
* @class_dev: pointer to char dev class
* @q2spi_class: pointer to q2spi class
*/
struct q2spi_chrdev {
dev_t q2spi_dev;
struct cdev cdev[MAX_DEV];
int major;
int minor;
struct device *dev;
char dev_name[DEVICE_NAME_MAX_LEN];
struct device *class_dev;
struct class *q2spi_class;
};
/**
* struct q2spi_dma_transfer - q2spi transfer dmadata
* @tx_buf: TX data buffer
* @rx_buf: RX data buffer
* @tx_len: length of the Tx transfer
* @rx_len: length of the rx transfer
* @tx_dma: dma pointer for Tx transfer
* @rx_dma: dma pointer for Rx transfer
* @cmd: q2spi cmd type
* @tid: Unique Transaction ID. Used for q2spi messages.
*/
struct q2spi_dma_transfer {
void *tx_buf;
void *rx_buf;
unsigned int tx_len;
unsigned int rx_len;
unsigned int tx_data_len;
unsigned int rx_data_len;
dma_addr_t tx_dma;
dma_addr_t rx_dma;
enum cmd_type cmd;
int tid; //tid and flow_id same check ?
struct list_head queue;
};
/**
* struct q2spi_geni - structure to store Q2SPI GENI information
* @wrapper_dev: qupv3 wrapper device pointer
* @dev: q2spi device pointer
* @base: pointer to ioremap()'d registers
* @m_ahb_clk: master ahb clock for the controller
* @s_ahb_clk: slave ahb clock for the controller
* @se_clk: serial engine clock
* @geni_pinctrl: pin-controller's instance
* @geni_gpio_active: active state pin control
* @geni_gpio_sleep: sleep state pin control
* q2spi_chrdev: cdev structure
* @geni_se: stores info parsed from device tree
* @q2spi_dma_transfer: stores Q2SPI transfer dma information
* @q2spi_gsi: stores GSI structure information
* @xfer: reference to q2spi_dma_transfer structure
* @db_xfer: reference to q2spi_dma_transfer structure for doorbell
* @req: reference to q2spi request structure
* @c_req: reference to q2spi client request structure
* @rx_fifo_depth: RX FIFO depth
* @tx_fifo_depth: TX FIFO depth
* @tx_fifo_width: TX FIFO width
* @setup_config0: used to mark config0 setup completion
* @irq: IRQ of the SE
* @lock: Lock to protect xfer
* @tid_idr: tid id allocator
* @readq: waitqueue for rx data.
*/
struct q2spi_geni {
struct device *wrapper_dev;
struct device *dev;
void __iomem *base;
struct clk *m_ahb_clk;
struct clk *s_ahb_clk;
struct clk *se_clk;
struct pinctrl *geni_pinctrl;
struct pinctrl_state *geni_gpio_active;
struct pinctrl_state *geni_gpio_sleep;
struct q2spi_chrdev chrdev;
struct geni_se se;
struct q2spi_gsi *gsi;
bool qup_gsi_err;
struct q2spi_dma_transfer *xfer;
struct q2spi_dma_transfer *db_xfer;
struct q2spi_request *req;
struct q2spi_client_request *c_req;
bool setup_config0;
int irq;
struct list_head tx_queue_list;
struct list_head rx_queue_list;
struct list_head cr_queue_list;
struct list_head hc_cr_queue_list;
struct kthread_worker *kworker;
struct kthread_work send_messages;
/* lock to protect gsi operations one at a time */
struct mutex gsi_lock;
/* lock to protect transfer id allocation and free */
spinlock_t txn_lock;
/* lock to protect HC operations one at a time*/
struct mutex queue_lock;
/* lock to protect CR of operations one at a time*/
spinlock_t cr_queue_lock;
u32 max_speed_hz;
u32 cur_speed_hz;
int oversampling;
int xfer_mode;
int cur_xfer_mode;
bool gsi_mode; /* GSI Mode */
void *q2spi_buf;
bool cmd_done;
struct completion tx_cb;
struct completion rx_cb;
atomic_t rx_avail;
struct idr tid_idr;
wait_queue_head_t readq;
void *rx_buf;
dma_addr_t rx_dma;
bool hrf_flow;
struct completion doorbell_up;
void *var1_buf[Q2SPI_MAX_BUF];
dma_addr_t var1_dma_buf[Q2SPI_MAX_BUF];
void *var5_buf[Q2SPI_MAX_BUF];
dma_addr_t var5_dma_buf[Q2SPI_MAX_BUF];
void *cr_buf[Q2SPI_MAX_BUF];
dma_addr_t cr_dma_buf[Q2SPI_MAX_BUF];
void *var1_buf_used[Q2SPI_MAX_BUF];
void *var5_buf_used[Q2SPI_MAX_BUF];
void *cr_buf_used[Q2SPI_MAX_BUF];
void *bulk_buf[Q2SPI_MAX_BUF];
dma_addr_t bulk_dma_buf[Q2SPI_MAX_BUF];
void *bulk_buf_used[Q2SPI_MAX_BUF];
dma_addr_t dma_buf;
struct completion sync_wait;
bool init;
void *ipc;
struct work_struct q2spi_doorbell_work;
struct workqueue_struct *doorbell_wq;
struct q2spi_cr_packet *cr_pkt;
bool doorbell_setup;
struct qup_q2spi_cr_header_event q2spi_cr_hdr_event;
wait_queue_head_t read_wq;
};
/**
* struct q2spi_cr_packet - structure for Q2SPI CR packet
*
* @q2spi: pointer for q2spi_geni structure
* @cr_hdr: pointer for q2spi_cr_header structure
* @var3: pointer for q2spi_client_dma_pkt structure
* @bulk: pointer for q2spi_client_bulk_access_pkt structure
* @vtype: variant type.
* @hrf_flow_id: flow id used for transaction.
* @list: list for CR packets.
*/
struct q2spi_cr_packet {
struct q2spi_cr_header *cr_hdr[4];
struct q2spi_client_dma_pkt var3_pkt; /* 4.2.2.3 Variant 4 T=3 */
struct q2spi_client_bulk_access_pkt bulk_pkt; /* 4.2.2.5 Bulk Access Status */
//u8 flow_id[4];
//u8 flow[4];
enum cr_var_type vtype;
u8 hrf_flow_id;
struct list_head list;
int no_of_valid_crs;
u8 type; /* 01 -> bulk, 02 -> var3 (01 10 10 01) */
struct q2spi_dma_transfer *xfer;
};
/**
* struct q2spi_packet - structure for Q2SPI packet
*
* @m_cmd_param: cmd corresponding to q2spi_packet
* @var1_pkt: pointer for HC variant1_pkt structure
* @var4_pkt: pointer for HC_variant4_5_pkt structure
* @var5_pkt: pointer for HC variant4_5_pkt structure
* @abort_pkt: pointer for abort_pkt structure
* @soft_reset_pkt: pointer for q2spi_soft_reset_pkt structure
* @vtype: variant type.
* @valid: packet valid or not.
* @hrf_flow_id: flow id usedyy for transaction.
* @status: success of failure xfer status
* @var1_tx_dma: variant_1 tx_dma buffer pointer
* @var5_tx_dma: variant_5 tx_dma buffer pointer
* @sync: sync or async mode of transfer
* @q2spi: pointer for q2spi_geni structure
* @list: list for hc packets.
* @in_use: Represents if packet is under use
* @data_length: Represents data length of the packet transfer
*/
struct q2spi_packet {
unsigned int m_cmd_param;
struct q2spi_host_variant1_pkt *var1_pkt; /* 4.4.3.1 Variant 1 */
struct q2spi_host_variant4_5_pkt *var4_pkt; /*4.4.3.3 Variant 4 */
struct q2spi_host_variant4_5_pkt *var5_pkt; /*4.4.3.3 Variant 5 */
struct q2spi_host_abort_pkt *abort_pkt; /* 4.4.4 Abort Command */
struct q2spi_host_soft_reset_pkt *soft_reset_pkt; /*4.4.6.2 Soft Reset Command */
enum var_type vtype;
bool valid;
u8 hrf_flow_id;
enum xfer_status status;
dma_addr_t var1_tx_dma;
dma_addr_t var5_tx_dma;
bool sync;
struct q2spi_geni *q2spi;
struct list_head list;
bool in_use;
unsigned int data_length;
};
void q2spi_doorbell(struct q2spi_geni *q2spi, const struct qup_q2spi_cr_header_event *event);
void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *ptr);
void q2spi_geni_se_dump_regs(struct q2spi_geni *q2spi);
void q2spi_dump_ipc(struct q2spi_geni *q2spi, void *ipc_ctx, char *prefix, char *str, int size);
void q2spi_add_req_to_rx_queue(struct q2spi_geni *q2spi, u32 status, u32 cmd);
void q2spi_trace_log(struct device *dev, const char *fmt, ...);
void dump_ipc(struct q2spi_geni *q2spi, void *ctx, char *prefix, char *str, int size);
void *q2spi_kzalloc(struct q2spi_geni *q2spi, int size);
void q2spi_kfree(struct q2spi_geni *q2spi, void *ptr);
int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt);
int q2spi_alloc_xfer_tid(struct q2spi_geni *q2spi);
int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi);
int check_gsi_transfer_completion(struct q2spi_geni *q2spi);
int check_gsi_transfer_completion_rx(struct q2spi_geni *q2spi);
#endif /* _SPI_Q2SPI_H_ */

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _SPI_Q2SPI_SLAVE_H_
#define _SPI_Q2SPI_SLAVE_H_
#define Q2SPI_SLAVE_BASE 0x42808000
#define Q2SPI_OFFSET_MASK 0x4
#define Q2SPI_HW_VERSION 0x00000000
#define Q2SPI_DRIVER_VERSION 0x00000004
#define Q2SPI_OP_MODE1 0x00000010
#define Q2SPI_OP_MODE2 0x00000014
#define Q2SPI_HRF_PUSH_ADDRESS 0x00000018
#define Q2SPI_CAP0 0x00000024
#define Q2SPI_CAP1 0x00000028
#define Q2SPI_SCRATCH0 0x00000030
#define Q2SPI_SCRATCH1 0x00000034
#define Q2SPI_SCRATCH2 0x00000038
#define Q2SPI_SCRATCH3 0x0000003C
#define Q2SPI_DB_STATUS 0x00000040
#define Q2SPI_ABORT_STATUS 0x00000044
#define Q2SPI_CLIENT_STATE 0x00000048
#define Q2SPI_RUNTIME_STATUS 0x0000004C
#define Q2SPI_TDB_FREE_SPACE 0x00000050
#define Q2SPI_SLAVE_ERROR 0x00000054
#define Q2SPI_HDR_ERROR 0x00000058
#define Q2SPI_ERROR_EN 0x0000005C
#define Q2SPI_SMA_DATA(n) (0x00000070 + (0x4 * (n)))
#define Q2SPI_SMA_ADDR1 0x00000080
#define Q2SPI_SMA_ADDR2 0x00000084
#define Q2SPI_SMA_CTRL 0x00000088
#define Q2SPI_PURGE_COMPLETE 0x0000008C
#define Q2SPI_HOST_CFG 0x00000090
#endif /* _SPI_Q2SPI_SLAVE_H_ */

35
drivers/spi/q2spi-trace.h Normal file
View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM q2spi_trace
#if !defined(_TRACE_Q2SPI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_Q2SPI_TRACE_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#define MAX_MSG_LEN 256
TRACE_EVENT(q2spi_log_info,
TP_PROTO(const char *name, struct va_format *vaf),
TP_ARGS(name, vaf),
TP_STRUCT__entry(__string(name, name)
__dynamic_array(char, msg, MAX_MSG_LEN)),
TP_fast_assign(__assign_str(name, name);
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), MAX_MSG_LEN,
vaf->fmt, *vaf->va) >= MAX_MSG_LEN);),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
#endif /* _TRACE_Q2SPI_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE q2spi-trace
#include <trace/define_trace.h>

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __MSM_GPI_H_
@ -12,6 +13,15 @@ struct __packed msm_gpi_tre {
u32 dword[4];
};
enum GPI_EV_TYPE {
XFER_COMPLETE_EV_TYPE = 0x22,
IMMEDIATE_DATA_EV_TYPE = 0x30,
QUP_NOTIF_EV_TYPE = 0x31,
STALE_EV_TYPE = 0xFF,
QUP_TCE_TYPE_Q2SPI_STATUS = 0x35,
QUP_TCE_TYPE_Q2SPI_CR_HEADER = 0x36,
};
enum msm_gpi_tre_type {
MSM_GPI_TRE_INVALID = 0x00,
MSM_GPI_TRE_NOP = 0x01,
@ -202,6 +212,7 @@ enum msm_gpi_cb_event {
MSM_GPI_QUP_PENDING_EVENT,
MSM_GPI_QUP_EOT_DESC_MISMATCH,
MSM_GPI_QUP_SW_ERROR,
MSM_GPI_QUP_CR_HEADER,
MSM_GPI_QUP_MAX_EVENT,
};
@ -211,12 +222,32 @@ struct msm_gpi_error_log {
u32 error_code;
};
struct __packed qup_q2spi_cr_header_event {
u32 cr_hdr_0 : 8;
u32 cr_hdr_1 : 8;
u32 cr_hdr_2 : 8;
u32 cr_hdr_3 : 8;
u32 cr_ed_byte_0 : 8;
u32 cr_ed_byte_1 : 8;
u32 cr_ed_byte_2 : 8;
u32 cr_ed_byte_3 : 8;
u32 reserved0 : 24;
u8 code : 8;
u32 byte0_len : 4;
u32 reserved1 : 3;
u32 byte0_err : 1;
u32 reserved2 : 8;
u8 type : 8;
u8 ch_id : 8;
};
struct msm_gpi_cb {
enum msm_gpi_cb_event cb_event;
u64 status;
u64 timestamp;
u64 count;
struct msm_gpi_error_log error_log;
struct __packed qup_q2spi_cr_header_event q2spi_cr_header_event;
};
struct dma_chan;
@ -261,6 +292,8 @@ struct msm_gpi_dma_async_tx_cb_param {
u32 status;
struct __packed msm_gpi_tre imed_tre;
void *userdata;
enum GPI_EV_TYPE tce_type;
u32 q2spi_status:8;
};
/* Client drivers of the GPI can call this function to dump the GPI registers

View File

@ -44,6 +44,7 @@ if (print) { \
/* In KHz */
#define DEFAULT_SE_CLK 19200
#define SPI_CORE2X_VOTE 51000
#define Q2SPI_CORE2X_VOTE 100000
#define I2C_CORE2X_VOTE 50000
#define I3C_CORE2X_VOTE 19200
#define APPS_PROC_TO_QUP_VOTE 140000
@ -56,6 +57,7 @@ if (print) { \
#define SE_DMA_TX_LEN (0xC3C)
#define SE_DMA_TX_IRQ_EN (0xC48)
#define SE_DMA_TX_LEN_IN (0xC54)
#define GENI_SE_DMA_EOT_BUF (BIT(0))
#define SE_DMA_RX_PTR_L (0xD30)
#define SE_DMA_RX_PTR_H (0xD34)

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _LINUX_QCOM_GENI_SE
@ -37,6 +37,7 @@ enum geni_se_protocol_type {
GENI_SE_I2C,
GENI_SE_I3C,
GENI_SE_SPI_SLAVE,
GENI_SE_Q2SPI = 0xE,
};
struct geni_wrapper;