msm: mhi_dev: Add snapshot of MHI device driver

Snapshot of PCIe MHI device driver as of msm-5.4
'commit <07e8c4b8d300> ("smcinvoke: Move IPC_LOG
before deleting cb_txn")'.

The following changes were done:
 - Fixes "Overlapping comparisons always evaluates to true".
 - Issues with the module licences and conflicting
   module_exit and module_init symbols.
 - Remove usage of (devm_)ioremap_nocache and replace with
   (devm_)ioremap inline with latest implementations.

Change-Id: I775a85dfe30a16de96afaef1d9f1ff0c6a032b7f
Signed-off-by: Sriharsha Allenki <sallenki@codeaurora.org>
This commit is contained in:
Sriharsha Allenki 2021-05-04 11:08:48 +05:30
parent e1336d4d94
commit 1afa208156
12 changed files with 11888 additions and 0 deletions

View File

@ -0,0 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
# Makefile for MHI driver
obj-y += mhi_mmio.o
obj-y += mhi.o
obj-y += mhi_ring.o
obj-y += mhi_uci.o
obj-y += mhi_sm.o
obj-y += mhi_dev_net.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,853 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.*/
/*
* MHI Device Network interface
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/ipc_logging.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/ktime.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
#include <linux/list.h>
#include "mhi.h"
#define MHI_NET_DRIVER_NAME "mhi_dev_net_drv"
#define MHI_NET_DEV_NAME "mhi_swip%d"
#define MHI_NET_DEFAULT_MTU 8192
#define MHI_NET_IPC_PAGES (100)
#define MHI_MAX_RX_REQ (128)
#define MHI_MAX_TX_REQ (128)
enum mhi_dev_net_dbg_lvl {
MHI_VERBOSE = 0x1,
MHI_INFO = 0x2,
MHI_DBG = 0x3,
MHI_WARNING = 0x4,
MHI_ERROR = 0x5,
MHI_CRITICAL = 0x6,
MSG_NET_reserved = 0x80000000
};
static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_CRITICAL;
static enum mhi_dev_net_dbg_lvl mhi_net_ipc_log_lvl = MHI_VERBOSE;
static void *mhi_net_ipc_log;
enum mhi_chan_dir {
MHI_DIR_INVALID = 0x0,
MHI_DIR_OUT = 0x1,
MHI_DIR_IN = 0x2,
MHI_DIR__reserved = 0x80000000
};
struct mhi_dev_net_chan_attr {
/* SW maintained channel id */
enum mhi_client_channel chan_id;
/* maximum buffer size for this channel */
size_t max_packet_size;
/* direction of the channel, see enum mhi_chan_dir */
enum mhi_chan_dir dir;
};
#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
#define mhi_dev_net_log(_msg_lvl, _msg, ...) do { \
if (_msg_lvl >= mhi_net_msg_lvl) { \
pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
} \
if (mhi_net_ipc_log && (_msg_lvl >= mhi_net_ipc_log_lvl)) { \
ipc_log_string(mhi_net_ipc_log, \
"[%s] " _msg, __func__, ##__VA_ARGS__); \
} \
} while (0)
struct mhi_dev_net_client {
/* write channel - always even*/
u32 out_chan;
/* read channel - always odd */
u32 in_chan;
bool eth_iface;
struct mhi_dev_client *out_handle;
struct mhi_dev_client *in_handle;
/*process pendig packets */
struct workqueue_struct *pending_pckt_wq;
struct work_struct xmit_work;
/*Read data from host work queue*/
atomic_t rx_enabled;
atomic_t tx_enabled;
struct net_device *dev;
struct sk_buff_head tx_buffers;
struct list_head rx_buffers;
struct list_head wr_req_buffers;
struct mhi_dev_net_ctxt *net_ctxt;
/*To check write channel is empty or not*/
spinlock_t wrt_lock;
spinlock_t rd_lock;
struct mutex in_chan_lock;
struct mutex out_chan_lock;
};
struct mhi_dev_net_ctxt {
struct mhi_dev_net_chan_attr chan_attr[MHI_MAX_SOFTWARE_CHANNELS];
struct mhi_dev_net_client *client_handle;
struct platform_device *pdev;
void (*net_event_notifier)(struct mhi_dev_client_cb_reason *cb);
};
static struct mhi_dev_net_ctxt mhi_net_ctxt;
static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *);
static int mhi_dev_net_init_ch_attributes(struct mhi_dev_net_ctxt *mhi_ctxt)
{
u32 channel = 0;
struct mhi_dev_net_chan_attr *chan_attrib = NULL;
channel = MHI_CLIENT_IP_SW_4_OUT;
chan_attrib = &mhi_ctxt->chan_attr[channel];
chan_attrib->dir = MHI_DIR_OUT;
chan_attrib->chan_id = channel;
chan_attrib->max_packet_size = TRB_MAX_DATA_SIZE;
mhi_dev_net_log(MHI_INFO, "Write chan attributes dir %d chan_id %d\n",
chan_attrib->dir, chan_attrib->chan_id);
channel = MHI_CLIENT_IP_SW_4_IN;
chan_attrib = &mhi_ctxt->chan_attr[channel];
chan_attrib->dir = MHI_DIR_IN;
chan_attrib->chan_id = channel;
chan_attrib->max_packet_size = TRB_MAX_DATA_SIZE;
mhi_dev_net_log(MHI_INFO, "Read chan attributes dir %d chan_id %d\n",
chan_attrib->dir, chan_attrib->chan_id);
return 0;
}
static void mhi_dev_net_process_queue_packets(struct work_struct *work)
{
struct mhi_dev_net_client *client = container_of(work,
struct mhi_dev_net_client, xmit_work);
unsigned long flags = 0;
int xfer_data = 0;
struct sk_buff *skb = NULL;
struct mhi_req *wreq = NULL;
if (mhi_dev_channel_isempty(client->in_handle)) {
mhi_dev_net_log(MHI_INFO, "%s stop network xmmit\n", __func__);
netif_stop_queue(client->dev);
return;
}
while (!((skb_queue_empty(&client->tx_buffers)) ||
(list_empty(&client->wr_req_buffers)))) {
spin_lock_irqsave(&client->wrt_lock, flags);
skb = skb_dequeue(&(client->tx_buffers));
if (!skb) {
mhi_dev_net_log(MHI_INFO,
"SKB is NULL from dequeue\n");
spin_unlock_irqrestore(&client->wrt_lock, flags);
return;
}
wreq = container_of(client->wr_req_buffers.next,
struct mhi_req, list);
list_del_init(&wreq->list);
wreq->client = client->in_handle;
wreq->context = skb;
wreq->buf = skb->data;
wreq->len = skb->len;
wreq->chan = client->in_chan;
wreq->mode = DMA_ASYNC;
if (skb_queue_empty(&client->tx_buffers) ||
list_empty(&client->wr_req_buffers)) {
wreq->snd_cmpl = 1;
} else
wreq->snd_cmpl = 0;
spin_unlock_irqrestore(&client->wrt_lock, flags);
xfer_data = mhi_dev_write_channel(wreq);
if (xfer_data <= 0) {
pr_err("%s(): Failed to write skb len %d\n",
__func__, skb->len);
kfree_skb(skb);
return;
}
client->dev->stats.tx_packets++;
/* Check if free buffers are available*/
if (mhi_dev_channel_isempty(client->in_handle)) {
mhi_dev_net_log(MHI_INFO,
"%s buffers are full stop xmit\n",
__func__);
netif_stop_queue(client->dev);
break;
}
} /* While TX queue is not empty */
}
static void mhi_dev_net_event_notifier(struct mhi_dev_client_cb_reason *reason)
{
struct mhi_dev_net_client *client_handle = mhi_net_ctxt.client_handle;
if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
if (reason->ch_id % 2) {
if (netif_queue_stopped(client_handle->dev)) {
netif_wake_queue(client_handle->dev);
queue_work(client_handle->pending_pckt_wq,
&client_handle->xmit_work);
}
} else
mhi_dev_net_client_read(client_handle);
}
}
static __be16 mhi_dev_net_eth_type_trans(struct sk_buff *skb)
{
__be16 protocol = 0;
/* Determine L3 protocol */
switch (skb->data[0] & 0xf0) {
case 0x40:
protocol = htons(ETH_P_IP);
break;
case 0x60:
protocol = htons(ETH_P_IPV6);
break;
default:
/* Default is QMAP */
protocol = htons(ETH_P_MAP);
break;
}
return protocol;
}
static void mhi_dev_net_read_completion_cb(void *req)
{
struct mhi_dev_net_client *net_handle =
mhi_net_ctxt.client_handle;
struct mhi_req *mreq =
(struct mhi_req *)req;
struct sk_buff *skb = mreq->context;
unsigned long flags;
skb->len = mreq->transfer_len;
if (net_handle->eth_iface)
skb->protocol = eth_type_trans(skb, net_handle->dev);
else
skb->protocol = mhi_dev_net_eth_type_trans(skb);
skb_put(skb, mreq->transfer_len);
net_handle->dev->stats.rx_packets++;
skb->dev = net_handle->dev;
netif_rx(skb);
spin_lock_irqsave(&net_handle->rd_lock, flags);
list_add_tail(&mreq->list, &net_handle->rx_buffers);
spin_unlock_irqrestore(&net_handle->rd_lock, flags);
}
static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle)
{
int bytes_avail = 0;
int ret_val = 0;
u32 chan = 0;
struct mhi_dev_client *client_handle = NULL;
struct mhi_req *req;
struct sk_buff *skb;
unsigned long flags;
client_handle = mhi_handle->out_handle;
chan = mhi_handle->out_chan;
if (!atomic_read(&mhi_handle->rx_enabled))
return -EPERM;
while (1) {
spin_lock_irqsave(&mhi_handle->rd_lock, flags);
if (list_empty(&mhi_handle->rx_buffers)) {
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
break;
}
req = container_of(mhi_handle->rx_buffers.next,
struct mhi_req, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
skb = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_ATOMIC);
if (skb == NULL) {
pr_err("%s(): skb alloc failed\n", __func__);
spin_lock_irqsave(&mhi_handle->rd_lock, flags);
list_add_tail(&req->list, &mhi_handle->rx_buffers);
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
ret_val = -ENOMEM;
return ret_val;
}
req->client = client_handle;
req->chan = chan;
req->buf = skb->data;
req->len = MHI_NET_DEFAULT_MTU;
req->context = skb;
req->mode = DMA_ASYNC;
bytes_avail = mhi_dev_read_channel(req);
if (bytes_avail < 0) {
pr_err("Failed to read chan %d bytes_avail = %d\n",
chan, bytes_avail);
spin_lock_irqsave(&mhi_handle->rd_lock, flags);
kfree_skb(skb);
list_add_tail(&req->list, &mhi_handle->rx_buffers);
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
ret_val = -EIO;
return 0;
}
/* no data to send to network stack, break */
if (!bytes_avail) {
spin_lock_irqsave(&mhi_handle->rd_lock, flags);
kfree_skb(skb);
list_add_tail(&req->list, &mhi_handle->rx_buffers);
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
return 0;
}
}
/* coming out while only in case of no data or error */
return ret_val;
}
static void mhi_dev_net_write_completion_cb(void *req)
{
struct mhi_dev_net_client *client_handle = mhi_net_ctxt.client_handle;
struct mhi_req *wreq = (struct mhi_req *)req;
struct sk_buff *skb = wreq->context;
unsigned long flags;
kfree_skb(skb);
spin_lock_irqsave(&client_handle->wrt_lock, flags);
list_add_tail(&wreq->list, &client_handle->wr_req_buffers);
spin_unlock_irqrestore(&client_handle->wrt_lock, flags);
}
static int mhi_dev_net_alloc_write_reqs(struct mhi_dev_net_client *client)
{
int nreq = 0, rc = 0;
struct mhi_req *wreq;
while (nreq < MHI_MAX_TX_REQ) {
wreq = kzalloc(sizeof(struct mhi_req), GFP_ATOMIC);
if (!wreq)
return -ENOMEM;
wreq->client_cb = mhi_dev_net_write_completion_cb;
list_add_tail(&wreq->list, &client->wr_req_buffers);
nreq++;
}
mhi_dev_net_log(MHI_INFO,
"mhi write reqs allocation success\n");
return rc;
}
static int mhi_dev_net_alloc_read_reqs(struct mhi_dev_net_client *client)
{
int nreq = 0, rc = 0;
struct mhi_req *mreq;
while (nreq < MHI_MAX_RX_REQ) {
mreq = kzalloc(sizeof(struct mhi_req), GFP_ATOMIC);
if (!mreq)
return -ENOMEM;
mreq->len = TRB_MAX_DATA_SIZE;
mreq->client_cb = mhi_dev_net_read_completion_cb;
list_add_tail(&mreq->list, &client->rx_buffers);
nreq++;
}
mhi_dev_net_log(MHI_INFO,
"mhi read reqs allocation success\n");
return rc;
}
static int mhi_dev_net_open(struct net_device *dev)
{
struct mhi_dev_net_client *mhi_dev_net_ptr =
*(struct mhi_dev_net_client **)netdev_priv(dev);
mhi_dev_net_log(MHI_INFO,
"mhi_net_dev interface is up for IN %d OUT %d\n",
mhi_dev_net_ptr->out_chan,
mhi_dev_net_ptr->in_chan);
netif_start_queue(dev);
return 0;
}
static netdev_tx_t mhi_dev_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mhi_dev_net_client *mhi_dev_net_ptr =
*(struct mhi_dev_net_client **)netdev_priv(dev);
unsigned long flags;
if (skb->len <= 0) {
mhi_dev_net_log(MHI_ERROR,
"Invalid skb received freeing skb\n");
kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock_irqsave(&mhi_dev_net_ptr->wrt_lock, flags);
skb_queue_tail(&(mhi_dev_net_ptr->tx_buffers), skb);
spin_unlock_irqrestore(&mhi_dev_net_ptr->wrt_lock, flags);
queue_work(mhi_dev_net_ptr->pending_pckt_wq,
&mhi_dev_net_ptr->xmit_work);
return NETDEV_TX_OK;
}
static int mhi_dev_net_stop(struct net_device *dev)
{
netif_stop_queue(dev);
mhi_dev_net_log(MHI_VERBOSE, "mhi_dev_net interface is down\n");
return 0;
}
static int mhi_dev_net_change_mtu(struct net_device *dev, int new_mtu)
{
if (0 > new_mtu || MHI_NET_DEFAULT_MTU < new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops mhi_dev_net_ops_ip = {
.ndo_open = mhi_dev_net_open,
.ndo_stop = mhi_dev_net_stop,
.ndo_start_xmit = mhi_dev_net_xmit,
.ndo_change_mtu = mhi_dev_net_change_mtu,
};
static void mhi_dev_net_rawip_setup(struct net_device *dev)
{
dev->netdev_ops = &mhi_dev_net_ops_ip;
ether_setup(dev);
mhi_dev_net_log(MHI_INFO,
"mhi_dev_net Raw IP setup\n");
/* set this after calling ether_setup */
dev->header_ops = NULL;
dev->type = ARPHRD_RAWIP;
dev->hard_header_len = 0;
dev->mtu = MHI_NET_DEFAULT_MTU;
dev->addr_len = 0;
dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
}
static void mhi_dev_net_ether_setup(struct net_device *dev)
{
dev->netdev_ops = &mhi_dev_net_ops_ip;
ether_setup(dev);
mhi_dev_net_log(MHI_INFO,
"mhi_dev_net Ethernet setup\n");
}
static int mhi_dev_net_enable_iface(struct mhi_dev_net_client *mhi_dev_net_ptr)
{
int ret = 0;
struct mhi_dev_net_client **mhi_dev_net_ctxt = NULL;
struct net_device *netdev;
if (!mhi_dev_net_ptr)
return -EINVAL;
/* Initialize skb list head to queue the packets for mhi dev client */
skb_queue_head_init(&(mhi_dev_net_ptr->tx_buffers));
mhi_dev_net_log(MHI_INFO,
"mhi_dev_net interface registration\n");
netdev = alloc_netdev(sizeof(struct mhi_dev_net_client),
MHI_NET_DEV_NAME, NET_NAME_PREDICTABLE,
mhi_net_ctxt.client_handle->eth_iface ?
mhi_dev_net_ether_setup :
mhi_dev_net_rawip_setup);
if (!netdev) {
pr_err("Failed to allocate netdev for mhi_dev_net\n");
goto net_dev_alloc_fail;
}
if (mhi_net_ctxt.client_handle->eth_iface) {
eth_random_addr(netdev->dev_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
return -EADDRNOTAVAIL;
}
mhi_dev_net_ctxt = netdev_priv(netdev);
mhi_dev_net_ptr->dev = netdev;
*mhi_dev_net_ctxt = mhi_dev_net_ptr;
ret = register_netdev(mhi_dev_net_ptr->dev);
if (ret) {
pr_err("Failed to register mhi_dev_net device\n");
goto net_dev_reg_fail;
}
mhi_dev_net_log(MHI_INFO, "Successfully registred mhi_dev_net\n");
return 0;
net_dev_reg_fail:
free_netdev(mhi_dev_net_ptr->dev);
net_dev_alloc_fail:
mhi_dev_close_channel(mhi_dev_net_ptr->in_handle);
mhi_dev_close_channel(mhi_dev_net_ptr->out_handle);
mhi_dev_net_ptr->dev = NULL;
return -ENOMEM;
}
static int mhi_dev_net_open_chan_create_netif(struct mhi_dev_net_client *client)
{
int rc = 0;
int ret = 0;
struct list_head *cp, *q;
struct mhi_req *mreq;
mhi_dev_net_log(MHI_DBG, "opening OUT %d IN %d channels\n",
client->out_chan,
client->in_chan);
mhi_dev_net_log(MHI_DBG,
"Initializing inbound chan %d.\n",
client->in_chan);
rc = mhi_dev_open_channel(client->out_chan, &client->out_handle,
mhi_net_ctxt.net_event_notifier);
if (rc < 0) {
mhi_dev_net_log(MHI_ERROR,
"Failed to open chan %d, ret 0x%x\n",
client->out_chan, rc);
goto handle_not_rdy_err;
} else
atomic_set(&client->rx_enabled, 1);
rc = mhi_dev_open_channel(client->in_chan, &client->in_handle,
mhi_net_ctxt.net_event_notifier);
if (rc < 0) {
mhi_dev_net_log(MHI_ERROR,
"Failed to open chan %d, ret 0x%x\n",
client->in_chan, rc);
goto handle_in_err;
} else
atomic_set(&client->tx_enabled, 1);
mhi_dev_net_log(MHI_INFO, "IN %d, OUT %d channels are opened",
client->in_chan, client->out_chan);
INIT_LIST_HEAD(&client->rx_buffers);
INIT_LIST_HEAD(&client->wr_req_buffers);
/* pre allocate read request buffer */
ret = mhi_dev_net_alloc_read_reqs(client);
if (ret) {
pr_err("failed to allocate rx req buffers\n");
goto rx_req_failed;
}
ret = mhi_dev_net_alloc_write_reqs(client);
if (ret) {
pr_err("failed to allocate write req buffers\n");
goto tx_req_failed;
}
if (atomic_read(&client->tx_enabled)) {
ret = mhi_dev_net_enable_iface(client);
if (ret < 0)
mhi_dev_net_log(MHI_ERROR,
"failed to enable mhi_dev_net iface\n");
}
return ret;
tx_req_failed:
list_for_each_safe(cp, q, &client->rx_buffers);
mreq = list_entry(cp, struct mhi_req, list);
list_del(cp);
kfree(mreq);
rx_req_failed:
mhi_dev_close_channel(client->in_handle);
handle_in_err:
mhi_dev_close_channel(client->out_handle);
handle_not_rdy_err:
mutex_unlock(&client->in_chan_lock);
mutex_unlock(&client->out_chan_lock);
return rc;
}
static int mhi_dev_net_close(void)
{
struct mhi_dev_net_client *client;
mhi_dev_net_log(MHI_INFO,
"mhi_dev_net module is removed\n");
client = mhi_net_ctxt.client_handle;
mhi_dev_close_channel(client->out_handle);
atomic_set(&client->tx_enabled, 0);
mhi_dev_close_channel(client->in_handle);
atomic_set(&client->rx_enabled, 0);
if (client->dev != NULL) {
netif_stop_queue(client->dev);
unregister_netdev(client->dev);
free_netdev(client->dev);
client->dev = NULL;
}
/* freeing mhi client and IPC context */
kfree(client);
kfree(mhi_net_ipc_log);
return 0;
}
static int mhi_dev_net_rgstr_client(struct mhi_dev_net_client *client, int idx)
{
client->out_chan = idx;
client->in_chan = idx + 1;
mutex_init(&client->in_chan_lock);
mutex_init(&client->out_chan_lock);
spin_lock_init(&client->wrt_lock);
spin_lock_init(&client->rd_lock);
mhi_dev_net_log(MHI_INFO, "Registering out %d, In %d channels\n",
client->out_chan, client->in_chan);
return 0;
}
static int mhi_dev_net_dergstr_client
(struct mhi_dev_net_client *client)
{
mutex_destroy(&client->in_chan_lock);
mutex_destroy(&client->out_chan_lock);
return 0;
}
static void mhi_dev_net_free_reqs(struct list_head *buff)
{
struct list_head *node, *next;
struct mhi_req *mreq;
list_for_each_safe(node, next, buff) {
mreq = list_entry(node, struct mhi_req, list);
list_del(&mreq->list);
kfree(mreq);
}
}
static void mhi_dev_net_state_cb(struct mhi_dev_client_cb_data *cb_data)
{
struct mhi_dev_net_client *mhi_client;
uint32_t info_in_ch = 0, info_out_ch = 0;
int ret;
if (!cb_data || !cb_data->user_data) {
mhi_dev_net_log(MHI_ERROR, "invalid input received\n");
return;
}
mhi_client = cb_data->user_data;
ret = mhi_ctrl_state_info(mhi_client->in_chan, &info_in_ch);
if (ret) {
mhi_dev_net_log(MHI_ERROR,
"Failed to obtain in_channel %d state\n",
mhi_client->in_chan);
return;
}
ret = mhi_ctrl_state_info(mhi_client->out_chan, &info_out_ch);
if (ret) {
mhi_dev_net_log(MHI_ERROR,
"Failed to obtain out_channel %d state\n",
mhi_client->out_chan);
return;
}
mhi_dev_net_log(MHI_MSG_VERBOSE, "in_channel :%d, state :%d\n",
mhi_client->in_chan, info_in_ch);
mhi_dev_net_log(MHI_MSG_VERBOSE, "out_channel :%d, state :%d\n",
mhi_client->out_chan, info_out_ch);
if (info_in_ch == MHI_STATE_CONNECTED &&
info_out_ch == MHI_STATE_CONNECTED) {
/**
* Open IN and OUT channels for Network client
* and create Network Interface.
*/
ret = mhi_dev_net_open_chan_create_netif(mhi_client);
if (ret) {
mhi_dev_net_log(MHI_ERROR,
"Failed to open channels\n");
return;
}
} else if (info_in_ch == MHI_STATE_DISCONNECTED ||
info_out_ch == MHI_STATE_DISCONNECTED) {
if (mhi_client->dev != NULL) {
netif_stop_queue(mhi_client->dev);
unregister_netdev(mhi_client->dev);
mhi_dev_close_channel(mhi_client->out_handle);
atomic_set(&mhi_client->tx_enabled, 0);
mhi_dev_close_channel(mhi_client->in_handle);
atomic_set(&mhi_client->rx_enabled, 0);
mhi_dev_net_free_reqs(&mhi_client->rx_buffers);
mhi_dev_net_free_reqs(&mhi_client->wr_req_buffers);
free_netdev(mhi_client->dev);
mhi_client->dev = NULL;
}
}
}
int mhi_dev_net_interface_init(void)
{
int ret_val = 0, index = 0;
bool out_channel_started = false;
struct mhi_dev_net_client *mhi_net_client = NULL;
if (mhi_net_ctxt.client_handle) {
mhi_dev_net_log(MHI_INFO,
"MHI Netdev interface already initialized\n");
return ret_val;
}
mhi_net_client = kzalloc(sizeof(struct mhi_dev_net_client), GFP_KERNEL);
if (!mhi_net_client)
return -ENOMEM;
mhi_net_ipc_log = ipc_log_context_create(MHI_NET_IPC_PAGES,
"mhi-net", 0);
if (!mhi_net_ipc_log) {
mhi_dev_net_log(MHI_DBG,
"Failed to create IPC logging for mhi_dev_net\n");
kfree(mhi_net_client);
return -ENOMEM;
}
mhi_net_ctxt.client_handle = mhi_net_client;
if (mhi_net_ctxt.pdev)
mhi_net_ctxt.client_handle->eth_iface =
of_property_read_bool
((&mhi_net_ctxt.pdev->dev)->of_node,
"qcom,mhi-ethernet-interface");
/*Process pending packet work queue*/
mhi_net_client->pending_pckt_wq =
create_singlethread_workqueue("pending_xmit_pckt_wq");
INIT_WORK(&mhi_net_client->xmit_work,
mhi_dev_net_process_queue_packets);
mhi_dev_net_log(MHI_INFO,
"Registering for MHI transfer events from host\n");
mhi_net_ctxt.net_event_notifier = mhi_dev_net_event_notifier;
ret_val = mhi_dev_net_init_ch_attributes(&mhi_net_ctxt);
if (ret_val < 0) {
mhi_dev_net_log(MHI_ERROR,
"Failed to init client attributes\n");
goto channel_init_fail;
}
mhi_dev_net_log(MHI_DBG, "Initializing client\n");
index = MHI_CLIENT_IP_SW_4_OUT;
ret_val = mhi_dev_net_rgstr_client(mhi_net_client, index);
if (ret_val) {
mhi_dev_net_log(MHI_CRITICAL,
"Failed to reg client %d ret 0\n", ret_val);
goto client_register_fail;
}
ret_val = mhi_register_state_cb(mhi_dev_net_state_cb,
mhi_net_client, MHI_CLIENT_IP_SW_4_OUT);
/* -EEXIST indicates success and channel is already open */
if (ret_val == -EEXIST)
out_channel_started = true;
else if (ret_val < 0)
goto register_state_cb_fail;
ret_val = mhi_register_state_cb(mhi_dev_net_state_cb,
mhi_net_client, MHI_CLIENT_IP_SW_4_IN);
/* -EEXIST indicates success and channel is already open */
if (ret_val == -EEXIST) {
/**
* If both in and out channels were opened by host at the
* time of registration proceed with opening channels and
* create network interface from device side.
* if the channels are not opened at the time of registration
* we will get a call back notification mhi_dev_net_state_cb()
* and proceed to open channels and create network interface
* with mhi_dev_net_open_chan_create_netif().
*/
ret_val = 0;
if (out_channel_started) {
ret_val = mhi_dev_net_open_chan_create_netif
(mhi_net_client);
if (ret_val < 0) {
mhi_dev_net_log(MHI_ERROR,
"Failed to open channels\n");
goto channel_open_fail;
}
}
} else if (ret_val < 0) {
goto register_state_cb_fail;
}
return ret_val;
channel_open_fail:
register_state_cb_fail:
mhi_dev_net_dergstr_client(mhi_net_client);
client_register_fail:
channel_init_fail:
destroy_workqueue(mhi_net_client->pending_pckt_wq);
kfree(mhi_net_client);
kfree(mhi_net_ipc_log);
return ret_val;
}
EXPORT_SYMBOL(mhi_dev_net_interface_init);
void __exit mhi_dev_net_exit(void)
{
mhi_dev_net_log(MHI_INFO,
"MHI Network Interface Module exited\n");
mhi_dev_net_close();
}
EXPORT_SYMBOL(mhi_dev_net_exit);
static int mhi_dev_net_probe(struct platform_device *pdev)
{
if (pdev->dev.of_node) {
mhi_net_ctxt.pdev = pdev;
mhi_dev_net_log(MHI_INFO,
"MHI Network probe success");
}
return 0;
}
static int mhi_dev_net_remove(struct platform_device *pdev)
{
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id mhi_dev_net_match_table[] = {
{ .compatible = "qcom,msm-mhi-dev-net" },
{}
};
static struct platform_driver mhi_dev_net_driver = {
.driver = {
.name = "qcom,msm-mhi-dev-net",
.of_match_table = mhi_dev_net_match_table,
},
.probe = mhi_dev_net_probe,
.remove = mhi_dev_net_remove,
};
static int __init mhi_dev_net_init(void)
{
return platform_driver_register(&mhi_dev_net_driver);
}
subsys_initcall(mhi_dev_net_init);
static void __exit mhi_dev_exit(void)
{
platform_driver_unregister(&mhi_dev_net_driver);
}
module_exit(mhi_dev_exit);
MODULE_DESCRIPTION("MHI net device driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,189 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015, 2017-2020, The Linux Foundation. All rights reserved.*/
#ifndef _MHI_HWIO_
#define _MHI_HWIO_
/* MHI register definition */
#define MHI_CTRL_INT_STATUS_A7 (0x0004)
#define MHI_CTRL_INT_STATUS_A7_STATUS_MASK 0xffffffff
#define MHI_CTRL_INT_STATUS_A7_STATUS_SHIFT 0x0
#define MHI_CHDB_INT_STATUS_A7_n(n) (0x0028 + 0x4 * (n))
#define MHI_CHDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff
#define MHI_CHDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0
#define MHI_ERDB_INT_STATUS_A7_n(n) (0x0038 + 0x4 * (n))
#define MHI_ERDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff
#define MHI_ERDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0
#define MHI_CTRL_INT_CLEAR_A7 (0x004C)
#define MHI_CTRL_INT_CLEAR_A7_CLEAR_MASK 0xffffffff
#define MHI_CTRL_INT_CLEAR_A7_CLEAR_SHIFT 0x0
#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x0070 + 0x4 * (n))
#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff
#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0
#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x0080 + 0x4 * (n))
#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff
#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0
#define MHI_CTRL_INT_MASK_A7 (0x0094)
#define MHI_CTRL_INT_MASK_A7_MASK_MASK 0x3
#define MHI_CTRL_INT_MASK_A7_MASK_SHIFT 0x0
#define MHI_CTRL_MHICTRL_MASK BIT(0)
#define MHI_CTRL_MHICTRL_SHFT 0
#define MHI_CTRL_CRDB_MASK BIT(1)
#define MHI_CTRL_CRDB_SHFT 1
#define MHI_CHDB_INT_MASK_A7_n(n) (0x00B8 + 0x4 * (n))
#define MHI_CHDB_INT_MASK_A7_n_MASK_MASK 0xffffffff
#define MHI_CHDB_INT_MASK_A7_n_MASK_SHIFT 0x0
#define MHI_ERDB_INT_MASK_A7_n(n) (0x00C8 + 0x4 * (n))
#define MHI_ERDB_INT_MASK_A7_n_MASK_MASK 0xffffffff
#define MHI_ERDB_INT_MASK_A7_n_MASK_SHIFT 0x0
#define MHIREGLEN (0x0100)
#define MHIREGLEN_MHIREGLEN_MASK 0xffffffff
#define MHIREGLEN_MHIREGLEN_SHIFT 0x0
#define MHIVER (0x0108)
#define MHIVER_MHIVER_MASK 0xffffffff
#define MHIVER_MHIVER_SHIFT 0x0
#define MHICFG (0x0110)
#define MHICFG_NHWER_MASK 0xff000000
#define MHICFG_NHWER_SHIFT 0x18
#define MHICFG_NER_MASK 0xff0000
#define MHICFG_NER_SHIFT 0x10
#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
#define MHICFG_NCH_MASK 0xff
#define MHICFG_NCH_SHIFT 0x0
#define CHDBOFF (0x0118)
#define CHDBOFF_CHDBOFF_MASK 0xffffffff
#define CHDBOFF_CHDBOFF_SHIFT 0x0
#define ERDBOFF (0x0120)
#define ERDBOFF_ERDBOFF_MASK 0xffffffff
#define ERDBOFF_ERDBOFF_SHIFT 0x0
#define BHIOFF (0x0128)
#define BHIOFF_BHIOFF_MASK 0xffffffff
#define BHIOFF_BHIOFF_SHIFT 0x0
#define DEBUGOFF (0x0130)
#define DEBUGOFF_DEBUGOFF_MASK 0xffffffff
#define DEBUGOFF_DEBUGOFF_SHIFT 0x0
#define MHICTRL (0x0138)
#define MHICTRL_MHISTATE_MASK 0x0000FF00
#define MHICTRL_MHISTATE_SHIFT 0x8
#define MHICTRL_RESET_MASK 0x2
#define MHICTRL_RESET_SHIFT 0x1
#define MHISTATUS (0x0148)
#define MHISTATUS_MHISTATE_MASK 0x0000ff00
#define MHISTATUS_MHISTATE_SHIFT 0x8
#define MHISTATUS_SYSERR_MASK 0x4
#define MHISTATUS_SYSERR_SHIFT 0x2
#define MHISTATUS_READY_MASK 0x1
#define MHISTATUS_READY_SHIFT 0x0
#define CCABAP_LOWER (0x0158)
#define CCABAP_LOWER_CCABAP_LOWER_MASK 0xffffffff
#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0x0
#define CCABAP_HIGHER (0x015C)
#define CCABAP_HIGHER_CCABAP_HIGHER_MASK 0xffffffff
#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0x0
#define ECABAP_LOWER (0x0160)
#define ECABAP_LOWER_ECABAP_LOWER_MASK 0xffffffff
#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0x0
#define ECABAP_HIGHER (0x0164)
#define ECABAP_HIGHER_ECABAP_HIGHER_MASK 0xffffffff
#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0x0
#define CRCBAP_LOWER (0x0168)
#define CRCBAP_LOWER_CRCBAP_LOWER_MASK 0xffffffff
#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0x0
#define CRCBAP_HIGHER (0x016C)
#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK 0xffffffff
#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0x0
#define CRDB_LOWER (0x0170)
#define CRDB_LOWER_CRDB_LOWER_MASK 0xffffffff
#define CRDB_LOWER_CRDB_LOWER_SHIFT 0x0
#define CRDB_HIGHER (0x0174)
#define CRDB_HIGHER_CRDB_HIGHER_MASK 0xffffffff
#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0x0
#define MHICTRLBASE_LOWER (0x0180)
#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK 0xffffffff
#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0x0
#define MHICTRLBASE_HIGHER (0x0184)
#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK 0xffffffff
#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0x0
#define MHICTRLLIMIT_LOWER (0x0188)
#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK 0xffffffff
#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0x0
#define MHICTRLLIMIT_HIGHER (0x018C)
#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK 0xffffffff
#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0x0
#define MHIDATABASE_LOWER (0x0198)
#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK 0xffffffff
#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0x0
#define MHIDATABASE_HIGHER (0x019C)
#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK 0xffffffff
#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0x0
#define MHIDATALIMIT_LOWER (0x01A0)
#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK 0xffffffff
#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0x0
#define MHIDATALIMIT_HIGHER (0x01A4)
#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK 0xffffffff
#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0x0
#define CHDB_LOWER_n(n) (0x0400 + 0x8 * (n))
#define CHDB_LOWER_n_CHDB_LOWER_MASK 0xffffffff
#define CHDB_LOWER_n_CHDB_LOWER_SHIFT 0x0
#define CHDB_HIGHER_n(n) (0x0404 + 0x8 * (n))
#define CHDB_HIGHER_n_CHDB_HIGHER_MASK 0xffffffff
#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT 0x0
#define ERDB_LOWER_n(n) (0x0800 + 0x8 * (n))
#define ERDB_LOWER_n_ERDB_LOWER_MASK 0xffffffff
#define ERDB_LOWER_n_ERDB_LOWER_SHIFT 0x0
#define ERDB_HIGHER_n(n) (0x0804 + 0x8 * (n))
#define ERDB_HIGHER_n_ERDB_HIGHER_MASK 0xffffffff
#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT 0x0
#define BHI_INTVEC (0x220)
#define BHI_INTVEC_MASK 0xFFFFFFFF
#define BHI_INTVEC_SHIFT 0
#define BHI_EXECENV (0x228)
#define BHI_EXECENV_MASK 0xFFFFFFFF
#define BHI_EXECENV_SHIFT 0
#define BHI_IMGTXDB (0x218)
#endif

View File

@ -0,0 +1,766 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015,2017-2021, The Linux Foundation. All rights reserved.*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include "mhi.h"
#include "mhi_hwio.h"
int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
uint32_t *reg_value)
{
void __iomem *addr;
if (WARN_ON(!dev))
return -EINVAL;
addr = dev->mmio_base_addr + offset;
*reg_value = readl_relaxed(addr);
pr_debug("reg read:0x%x with value 0x%x\n", offset, *reg_value);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_read);
int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
uint32_t val)
{
void __iomem *addr;
if (WARN_ON(!dev))
return -EINVAL;
addr = dev->mmio_base_addr + offset;
writel_relaxed(val, addr);
pr_debug("reg write:0x%x with value 0x%x\n", offset, val);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_write);
int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
uint32_t mask, uint32_t shift,
uint32_t val)
{
uint32_t reg_val;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, offset, &reg_val);
reg_val &= ~mask;
reg_val |= ((val << shift) & mask);
mhi_dev_mmio_write(dev, offset, reg_val);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_masked_write);
int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
uint32_t mask, uint32_t shift,
uint32_t *reg_val)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, offset, reg_val);
*reg_val &= mask;
*reg_val >>= shift;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_masked_read);
static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev,
uint32_t chdb_id, bool enable)
{
uint32_t chid_mask, chid_idx, chid_shft, val = 0;
chid_shft = chdb_id%32;
chid_mask = (1 << chid_shft);
chid_idx = chdb_id/32;
if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB) {
pr_err("Invalid channel id:%d\n", chid_idx);
return -EINVAL;
}
if (enable)
val = 1;
mhi_dev_mmio_masked_write(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx),
chid_mask, chid_shft, val);
mhi_dev_mmio_read(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx),
&dev->chdb[chid_idx].mask);
return 0;
}
int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, true);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_a7);
int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, false);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_disable_chdb_a7);
static int mhi_dev_mmio_set_erdb_int_a7(struct mhi_dev *dev,
uint32_t erdb_ch_id, bool enable)
{
uint32_t erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0;
erdb_id_shft = erdb_ch_id%32;
erdb_id_mask = (1 << erdb_id_shft);
erdb_id_idx = erdb_ch_id/32;
if (enable)
val = 1;
mhi_dev_mmio_masked_write(dev,
MHI_ERDB_INT_MASK_A7_n(erdb_id_idx),
erdb_id_mask, erdb_id_shft, val);
return 0;
}
int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, true);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_a7);
int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, false);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7);
int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state,
u32 *mhi_reset)
{
uint32_t reg_value = 0;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_masked_read(dev, MHICTRL,
MHISTATUS_MHISTATE_MASK, MHISTATUS_MHISTATE_SHIFT, state);
mhi_dev_mmio_read(dev, MHICTRL, &reg_value);
if (reg_value & MHICTRL_RESET_MASK)
*mhi_reset = 1;
else
*mhi_reset = 0;
mhi_log(MHI_MSG_VERBOSE, "MHICTRL is 0x%x, reset:%d\n",
reg_value, *mhi_reset);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_mhi_state);
static int mhi_dev_mmio_set_chdb_interrupts(struct mhi_dev *dev, bool enable)
{
uint32_t mask = 0, i = 0;
if (enable)
mask = MHI_CHDB_INT_MASK_A7_n_MASK_MASK;
for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
mhi_dev_mmio_write(dev,
MHI_CHDB_INT_MASK_A7_n(i), mask);
dev->chdb[i].mask = mask;
}
return 0;
}
int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_set_chdb_interrupts(dev, true);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_interrupts);
int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_set_chdb_interrupts(dev, false);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_mask_chdb_interrupts);
int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev)
{
uint32_t i;
if (WARN_ON(!dev))
return -EINVAL;
for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
mhi_dev_mmio_read(dev,
MHI_CHDB_INT_STATUS_A7_n(i), &dev->chdb[i].status);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_read_chdb_status_interrupts);
static int mhi_dev_mmio_set_erdb_interrupts(struct mhi_dev *dev, bool enable)
{
uint32_t mask = 0, i;
if (enable)
mask = MHI_ERDB_INT_MASK_A7_n_MASK_MASK;
for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
mhi_dev_mmio_write(dev,
MHI_ERDB_INT_MASK_A7_n(i), mask);
return 0;
}
int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_set_erdb_interrupts(dev, true);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_interrupts);
int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_set_erdb_interrupts(dev, false);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_mask_erdb_interrupts);
int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev)
{
uint32_t i;
if (WARN_ON(!dev))
return -EINVAL;
for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
mhi_dev_mmio_read(dev, MHI_ERDB_INT_STATUS_A7_n(i),
&dev->evdb[i].status);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_read_erdb_status_interrupts);
int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 1);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_enable_ctrl_interrupt);
int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 0);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_disable_ctrl_interrupt);
int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->ctrl_int);
dev->ctrl_int &= 0x1;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_read_ctrl_status_interrupt);
int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->cmd_int);
dev->cmd_int &= 0x10;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_read_cmdb_status_interrupt);
int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 1);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_enable_cmdb_interrupt);
int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 0);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt);
void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev)
{
mhi_dev_mmio_disable_ctrl_interrupt(dev);
mhi_dev_mmio_disable_cmdb_interrupt(dev);
mhi_dev_mmio_mask_chdb_interrupts(dev);
mhi_dev_mmio_mask_erdb_interrupts(dev);
}
EXPORT_SYMBOL(mhi_dev_mmio_mask_interrupts);
int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev)
{
uint32_t i = 0;
if (WARN_ON(!dev))
return -EINVAL;
for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
mhi_dev_mmio_write(dev, MHI_CHDB_INT_CLEAR_A7_n(i),
MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK);
for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
mhi_dev_mmio_write(dev, MHI_ERDB_INT_CLEAR_A7_n(i),
MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK);
mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7,
(MHI_CTRL_INT_MMIO_WR_CLEAR | MHI_CTRL_INT_CRDB_CLEAR |
MHI_CTRL_INT_CRDB_MHICTRL_CLEAR));
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_clear_interrupts);
int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev)
{
uint32_t ccabap_value = 0, offset = 0;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, CCABAP_HIGHER, &ccabap_value);
dev->ch_ctx_shadow.host_pa = ccabap_value;
dev->ch_ctx_shadow.host_pa <<= 32;
mhi_dev_mmio_read(dev, CCABAP_LOWER, &ccabap_value);
dev->ch_ctx_shadow.host_pa |= ccabap_value;
offset = (uint32_t)(dev->ch_ctx_shadow.host_pa -
dev->ctrl_base.host_pa);
dev->ch_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
dev->ch_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_chc_base);
int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev)
{
uint32_t ecabap_value = 0, offset = 0;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, ECABAP_HIGHER, &ecabap_value);
dev->ev_ctx_shadow.host_pa = ecabap_value;
dev->ev_ctx_shadow.host_pa <<= 32;
mhi_dev_mmio_read(dev, ECABAP_LOWER, &ecabap_value);
dev->ev_ctx_shadow.host_pa |= ecabap_value;
offset = (uint32_t)(dev->ev_ctx_shadow.host_pa -
dev->ctrl_base.host_pa);
dev->ev_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
dev->ev_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_erc_base);
int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev)
{
uint32_t crcbap_value = 0, offset = 0;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, CRCBAP_HIGHER, &crcbap_value);
dev->cmd_ctx_shadow.host_pa = crcbap_value;
dev->cmd_ctx_shadow.host_pa <<= 32;
mhi_dev_mmio_read(dev, CRCBAP_LOWER, &crcbap_value);
dev->cmd_ctx_shadow.host_pa |= crcbap_value;
offset = (uint32_t)(dev->cmd_ctx_shadow.host_pa -
dev->ctrl_base.host_pa);
dev->cmd_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
dev->cmd_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_crc_base);
int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
{
uint32_t value = 0, ch_start_idx = 0;
if (WARN_ON(!ring))
return -EINVAL;
ch_start_idx = ring->mhi_dev->ch_ring_start;
mhi_dev_mmio_read(ring->mhi_dev,
CHDB_HIGHER_n(ring->id-ch_start_idx), &value);
*wr_offset = value;
*wr_offset <<= 32;
mhi_dev_mmio_read(ring->mhi_dev,
CHDB_LOWER_n(ring->id-ch_start_idx), &value);
*wr_offset |= value;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_ch_db);
int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
{
uint32_t value = 0, ev_idx_start = 0;
if (WARN_ON(!ring))
return -EINVAL;
ev_idx_start = ring->mhi_dev->ev_ring_start;
mhi_dev_mmio_read(ring->mhi_dev,
ERDB_HIGHER_n(ring->id - ev_idx_start), &value);
*wr_offset = value;
*wr_offset <<= 32;
mhi_dev_mmio_read(ring->mhi_dev,
ERDB_LOWER_n(ring->id - ev_idx_start), &value);
*wr_offset |= value;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_erc_db);
int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
{
uint32_t value = 0;
if (WARN_ON(!ring))
return -EINVAL;
mhi_dev_mmio_read(ring->mhi_dev, CRDB_HIGHER, &value);
*wr_offset = value;
*wr_offset <<= 32;
mhi_dev_mmio_read(ring->mhi_dev, CRDB_LOWER, &value);
*wr_offset |= value;
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_get_cmd_db);
int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_write(dev, BHI_EXECENV, value);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_set_env);
int mhi_dev_mmio_clear_reset(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_masked_write(dev, MHICTRL,
MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_clear_reset);
int mhi_dev_mmio_reset(struct mhi_dev *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_write(dev, MHICTRL, 0);
mhi_dev_mmio_write(dev, MHISTATUS, 0);
mhi_dev_mmio_clear_interrupts(dev);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_reset);
int mhi_dev_restore_mmio(struct mhi_dev *dev)
{
uint32_t i, reg_cntl_value;
void *reg_cntl_addr;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_mask_interrupts(dev);
for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) {
reg_cntl_addr = dev->mmio_base_addr +
MHI_DEV_MMIO_OFFSET + (i * 4);
reg_cntl_value = dev->mmio_backup[i];
writel_relaxed(reg_cntl_value, reg_cntl_addr);
}
mhi_dev_mmio_clear_interrupts(dev);
/* Mask and enable control interrupt */
mhi_dev_mmio_enable_ctrl_interrupt(dev);
/*Enable chdb interrupt*/
mhi_dev_mmio_enable_chdb_interrupts(dev);
/*Enable cmdb interrupt*/
mhi_dev_mmio_enable_cmdb_interrupt(dev);
mb();
return 0;
}
EXPORT_SYMBOL(mhi_dev_restore_mmio);
int mhi_dev_backup_mmio(struct mhi_dev *dev)
{
uint32_t i = 0;
void __iomem *reg_cntl_addr;
if (WARN_ON(!dev))
return -EINVAL;
for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++) {
reg_cntl_addr = (void __iomem *) (dev->mmio_base_addr +
MHI_DEV_MMIO_OFFSET + (i * 4));
dev->mmio_backup[i] = readl_relaxed(reg_cntl_addr);
}
return 0;
}
EXPORT_SYMBOL(mhi_dev_backup_mmio);
int mhi_dev_get_mhi_addr(struct mhi_dev *dev)
{
uint32_t data_value = 0;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, MHICTRLBASE_LOWER, &data_value);
dev->host_addr.ctrl_base_lsb = data_value;
mhi_dev_mmio_read(dev, MHICTRLBASE_HIGHER, &data_value);
dev->host_addr.ctrl_base_msb = data_value;
mhi_dev_mmio_read(dev, MHICTRLLIMIT_LOWER, &data_value);
dev->host_addr.ctrl_limit_lsb = data_value;
mhi_dev_mmio_read(dev, MHICTRLLIMIT_HIGHER, &data_value);
dev->host_addr.ctrl_limit_msb = data_value;
mhi_dev_mmio_read(dev, MHIDATABASE_LOWER, &data_value);
dev->host_addr.data_base_lsb = data_value;
mhi_dev_mmio_read(dev, MHIDATABASE_HIGHER, &data_value);
dev->host_addr.data_base_msb = data_value;
mhi_dev_mmio_read(dev, MHIDATALIMIT_LOWER, &data_value);
dev->host_addr.data_limit_lsb = data_value;
mhi_dev_mmio_read(dev, MHIDATALIMIT_HIGHER, &data_value);
dev->host_addr.data_limit_msb = data_value;
return 0;
}
EXPORT_SYMBOL(mhi_dev_get_mhi_addr);
int mhi_dev_mmio_init(struct mhi_dev *dev)
{
int rc = 0;
if (WARN_ON(!dev))
return -EINVAL;
mhi_dev_mmio_read(dev, MHIREGLEN, &dev->cfg.mhi_reg_len);
mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
MHICFG_NER_SHIFT, &dev->cfg.event_rings);
rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NHWER_MASK,
MHICFG_NHWER_SHIFT, &dev->cfg.hw_event_rings);
if (rc)
return rc;
rc = mhi_dev_mmio_read(dev, CHDBOFF, &dev->cfg.chdb_offset);
if (rc)
return rc;
mhi_dev_mmio_read(dev, ERDBOFF, &dev->cfg.erdb_offset);
dev->cfg.channels = NUM_CHANNELS;
if (!dev->mmio_initialized)
mhi_dev_mmio_reset(dev);
return 0;
}
EXPORT_SYMBOL(mhi_dev_mmio_init);
int mhi_dev_update_ner(struct mhi_dev *dev)
{
int rc = 0, mhi_cfg = 0;
if (WARN_ON(!dev))
return -EINVAL;
rc = mhi_dev_mmio_read(dev, MHICFG, &mhi_cfg);
if (rc)
return rc;
pr_debug("MHICFG: 0x%x\n", mhi_cfg);
dev->cfg.event_rings =
(mhi_cfg & MHICFG_NER_MASK) >> MHICFG_NER_SHIFT;
dev->cfg.hw_event_rings =
(mhi_cfg & MHICFG_NHWER_MASK) >> MHICFG_NHWER_SHIFT;
return 0;
}
EXPORT_SYMBOL(mhi_dev_update_ner);
int mhi_dev_dump_mmio(struct mhi_dev *dev)
{
uint32_t r1, r2, r3, r4, i, offset = 0;
if (WARN_ON(!dev))
return -EINVAL;
for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i += 4) {
mhi_dev_mmio_read(dev, offset, &r1);
mhi_dev_mmio_read(dev, offset+4, &r2);
mhi_dev_mmio_read(dev, offset+8, &r3);
mhi_dev_mmio_read(dev, offset+0xC, &r4);
offset += 0x10;
pr_debug("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
offset, r1, r2, r3, r4);
}
return 0;
}
EXPORT_SYMBOL(mhi_dev_dump_mmio);

View File

@ -0,0 +1,592 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include "mhi.h"
static struct event_req dummy_ereq;
static void mhi_dev_event_buf_completion_dummy_cb(void *req)
{
mhi_log(MHI_MSG_VERBOSE, "%s invoked\n", __func__);
}
static size_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p)
{
uint64_t rbase;
rbase = ring->ring_ctx->generic.rbase;
return (p - rbase)/sizeof(union mhi_dev_ring_element_type);
}
static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring)
{
return ring->ring_ctx->generic.rlen/
sizeof(union mhi_dev_ring_element_type);
}
int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
size_t start, size_t end)
{
struct mhi_addr host_addr;
struct mhi_dev *mhi_ctx;
mhi_ctx = ring->mhi_dev;
/* fetch ring elements from start->end, take care of wrap-around case */
if (MHI_USE_DMA(mhi_ctx)) {
host_addr.host_pa = ring->ring_shadow.host_pa
+ sizeof(union mhi_dev_ring_element_type) * start;
host_addr.phy_addr = ring->ring_cache_dma_handle +
(sizeof(union mhi_dev_ring_element_type) * start);
} else {
host_addr.device_va = ring->ring_shadow.device_va
+ sizeof(union mhi_dev_ring_element_type) * start;
host_addr.virt_addr = &ring->ring_cache[start];
}
host_addr.size = (end-start) * sizeof(union mhi_dev_ring_element_type);
if (start < end) {
mhi_ctx->read_from_host(ring->mhi_dev, &host_addr);
} else if (start > end) {
/* copy from 'start' to ring end, then ring start to 'end'*/
host_addr.size = (ring->ring_size-start) *
sizeof(union mhi_dev_ring_element_type);
mhi_ctx->read_from_host(ring->mhi_dev, &host_addr);
if (end) {
/* wrapped around */
host_addr.device_pa = ring->ring_shadow.device_pa;
host_addr.device_va = ring->ring_shadow.device_va;
host_addr.host_pa = ring->ring_shadow.host_pa;
host_addr.virt_addr = &ring->ring_cache[0];
host_addr.phy_addr = ring->ring_cache_dma_handle;
host_addr.size = (end *
sizeof(union mhi_dev_ring_element_type));
mhi_ctx->read_from_host(ring->mhi_dev,
&host_addr);
}
}
return 0;
}
int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset)
{
size_t old_offset = 0;
struct mhi_dev *mhi_ctx;
if (WARN_ON(!ring))
return -EINVAL;
mhi_ctx = ring->mhi_dev;
if (ring->wr_offset == wr_offset) {
mhi_log(MHI_MSG_VERBOSE,
"nothing to cache for ring %d, local wr_ofst %lu\n",
ring->id, ring->wr_offset);
mhi_log(MHI_MSG_VERBOSE,
"new wr_offset %lu\n", wr_offset);
return 0;
}
old_offset = ring->wr_offset;
/*
* copy the elements starting from old_offset to wr_offset
* take in to account wrap around case event rings are not
* cached, not required
*/
if (ring->id >= mhi_ctx->ev_ring_start &&
ring->id < (mhi_ctx->ev_ring_start +
mhi_ctx->cfg.event_rings)) {
mhi_log(MHI_MSG_VERBOSE,
"not caching event ring %d\n", ring->id);
return 0;
}
mhi_log(MHI_MSG_VERBOSE, "caching ring %d, start %lu, end %lu\n",
ring->id, old_offset, wr_offset);
if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) {
mhi_log(MHI_MSG_ERROR,
"failed to fetch elements for ring %d, start %lu, end %lu\n",
ring->id, old_offset, wr_offset);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(mhi_dev_cache_ring);
int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
{
uint64_t wr_offset = 0;
size_t new_wr_offset = 0;
int32_t rc = 0;
if (WARN_ON(!ring))
return -EINVAL;
switch (ring->type) {
case RING_TYPE_CMD:
rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset);
if (rc) {
pr_err("%s: CMD DB read failed\n", __func__);
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"ring %d wr_offset from db 0x%lx\n",
ring->id, (size_t) wr_offset);
break;
case RING_TYPE_ER:
rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset);
if (rc) {
pr_err("%s: EVT DB read failed\n", __func__);
return rc;
}
break;
case RING_TYPE_CH:
rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset);
if (rc) {
pr_err("%s: CH DB read failed\n", __func__);
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"ring %d wr_offset from db 0x%lx\n",
ring->id, (size_t) wr_offset);
break;
default:
mhi_log(MHI_MSG_ERROR, "invalid ring type\n");
return -EINVAL;
}
new_wr_offset = mhi_dev_ring_addr2ofst(ring, wr_offset);
mhi_dev_cache_ring(ring, new_wr_offset);
ring->wr_offset = new_wr_offset;
return 0;
}
EXPORT_SYMBOL(mhi_dev_update_wr_offset);
int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, size_t offset)
{
union mhi_dev_ring_element_type *el;
if (WARN_ON(!ring))
return -EINVAL;
/* get the element and invoke the respective callback */
el = &ring->ring_cache[offset];
mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->tre.data_buf_ptr);
mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x, offset:%lu\n",
el->tre.len, offset);
if (ring->ring_cb)
ring->ring_cb(ring->mhi_dev, el, (void *)ring);
else
mhi_log(MHI_MSG_ERROR, "No callback registered for ring %d\n",
ring->id);
return 0;
}
EXPORT_SYMBOL(mhi_dev_process_ring_element);
int mhi_dev_process_ring(struct mhi_dev_ring *ring)
{
int rc = 0;
union mhi_dev_ring_element_type *el;
if (WARN_ON(!ring))
return -EINVAL;
mhi_log(MHI_MSG_VERBOSE,
"Before wr update ring_id (%d) element (%lu) with wr:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
rc = mhi_dev_update_wr_offset(ring);
if (rc) {
mhi_log(MHI_MSG_ERROR,
"Error updating write-offset for ring %d\n",
ring->id);
return rc;
}
/* get the element and invoke the respective callback */
el = &ring->ring_cache[ring->wr_offset];
mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->tre.data_buf_ptr);
mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x, wr_offset:%lu\n",
el->tre.len, ring->wr_offset);
if (ring->type == RING_TYPE_CH) {
/* notify the clients that there are elements in the ring */
rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
if (rc)
pr_err("Error fetching elements\n");
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"After ring update ring_id (%d) element (%lu) with wr:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
while (ring->rd_offset != ring->wr_offset) {
rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
if (rc) {
mhi_log(MHI_MSG_ERROR,
"Error processing ring (%d) element (%lu)\n",
ring->id, ring->rd_offset);
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"Processing ring (%d) rd_offset:%lu, wr_offset:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
mhi_dev_ring_inc_index(ring, ring->rd_offset);
}
if (!(ring->rd_offset == ring->wr_offset)) {
mhi_log(MHI_MSG_ERROR,
"Error with the rd offset/wr offset\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(mhi_dev_process_ring);
int mhi_dev_add_element(struct mhi_dev_ring *ring,
union mhi_dev_ring_element_type *element,
struct event_req *ereq, int size)
{
size_t old_offset = 0;
struct mhi_addr host_addr;
uint32_t num_elem = 1;
uint32_t num_free_elem;
struct mhi_dev *mhi_ctx;
uint32_t i;
if (WARN_ON(!ring || !element))
return -EINVAL;
mhi_ctx = ring->mhi_dev;
mhi_dev_update_wr_offset(ring);
if (ereq)
num_elem = size / (sizeof(union mhi_dev_ring_element_type));
if (ring->rd_offset < ring->wr_offset)
num_free_elem = ring->wr_offset - ring->rd_offset - 1;
else
num_free_elem = ring->ring_size - ring->rd_offset +
ring->wr_offset - 1;
if (num_free_elem < num_elem) {
mhi_log(MHI_MSG_ERROR, "No space to add %d elem in ring (%d)\n",
num_elem, ring->id);
return -EINVAL;
}
old_offset = ring->rd_offset;
if (ereq) {
ring->rd_offset += num_elem;
if (ring->rd_offset >= ring->ring_size)
ring->rd_offset -= ring->ring_size;
} else
mhi_dev_ring_inc_index(ring, ring->rd_offset);
mhi_log(MHI_MSG_VERBOSE,
"Writing %d elements, ring old 0x%x, new 0x%x\n",
num_elem, old_offset, ring->rd_offset);
ring->ring_ctx->generic.rp = (ring->rd_offset *
sizeof(union mhi_dev_ring_element_type)) +
ring->ring_ctx->generic.rbase;
/*
* Write the element, ring_base has to be the
* iomap of the ring_base for memcpy
*/
if (MHI_USE_DMA(mhi_ctx))
host_addr.host_pa = ring->ring_shadow.host_pa +
sizeof(union mhi_dev_ring_element_type) * old_offset;
else
host_addr.device_va = ring->ring_shadow.device_va +
sizeof(union mhi_dev_ring_element_type) * old_offset;
if (!ereq) {
/* We're adding only a single ring element */
host_addr.virt_addr = element;
host_addr.size = sizeof(union mhi_dev_ring_element_type);
mhi_log(MHI_MSG_VERBOSE, "adding element to ring (%d)\n",
ring->id);
mhi_log(MHI_MSG_VERBOSE, "rd_ofset %lu\n", ring->rd_offset);
mhi_log(MHI_MSG_VERBOSE, "type %d\n", element->generic.type);
mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
NULL, MHI_DEV_DMA_SYNC);
return 0;
}
// Log elements added to ring
for (i = 0; i < num_elem; ++i) {
mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n",
(element + i)->evt_tr_comp.ptr);
mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x\n",
(element + i)->evt_tr_comp.len);
mhi_log(MHI_MSG_VERBOSE, "evnt code :0x%x\n",
(element + i)->evt_tr_comp.code);
mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n",
(element + i)->evt_tr_comp.type);
mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n",
(element + i)->evt_tr_comp.chid);
}
/* Adding multiple ring elements */
if (ring->rd_offset == 0 || (ring->rd_offset > old_offset)) {
/* No wrap-around case */
host_addr.virt_addr = element;
host_addr.size = size;
host_addr.phy_addr = 0;
mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
ereq, MHI_DEV_DMA_ASYNC);
} else {
mhi_log(MHI_MSG_VERBOSE, "Wrap around case\n");
/* Wrap-around case - first chunk uses dma sync */
host_addr.virt_addr = element;
host_addr.size = (ring->ring_size - old_offset) *
sizeof(union mhi_dev_ring_element_type);
if (mhi_ctx->use_ipa) {
mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
NULL, MHI_DEV_DMA_SYNC);
} else {
dummy_ereq.event_type = SEND_EVENT_BUFFER;
host_addr.phy_addr = 0;
/* Nothing to do in the callback */
dummy_ereq.client_cb =
mhi_dev_event_buf_completion_dummy_cb;
mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
&dummy_ereq, MHI_DEV_DMA_ASYNC);
}
/* Copy remaining elements */
if (MHI_USE_DMA(mhi_ctx))
host_addr.host_pa = ring->ring_shadow.host_pa;
else
host_addr.device_va = ring->ring_shadow.device_va;
host_addr.virt_addr = element + (ring->ring_size - old_offset);
host_addr.size = ring->rd_offset *
sizeof(union mhi_dev_ring_element_type);
host_addr.phy_addr = 0;
mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
ereq, MHI_DEV_DMA_ASYNC);
}
return 0;
}
EXPORT_SYMBOL(mhi_dev_add_element);
static int mhi_dev_ring_alloc_msi_buf(struct mhi_dev_ring *ring)
{
if (ring->msi_buffer.buf) {
mhi_log(MHI_MSG_INFO, "MSI buf already allocated\n");
return 0;
}
ring->msi_buffer.buf = dma_alloc_coherent(&ring->mhi_dev->pdev->dev,
sizeof(u32),
&ring->msi_buffer.dma_addr,
GFP_KERNEL);
if (!ring->msi_buffer.buf)
return -ENOMEM;
return 0;
}
int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
struct mhi_dev *mhi)
{
int rc = 0;
size_t wr_offset = 0;
size_t offset = 0;
if (WARN_ON(!ring || !ctx || !mhi))
return -EINVAL;
ring->ring_ctx = ctx;
ring->ring_size = mhi_dev_ring_num_elems(ring);
ring->rd_offset = mhi_dev_ring_addr2ofst(ring,
ring->ring_ctx->generic.rp);
ring->wr_offset = mhi_dev_ring_addr2ofst(ring,
ring->ring_ctx->generic.rp);
ring->mhi_dev = mhi;
mhi_ring_set_state(ring, RING_STATE_IDLE);
wr_offset = mhi_dev_ring_addr2ofst(ring,
ring->ring_ctx->generic.wp);
if (!ring->ring_cache) {
ring->ring_cache = dma_alloc_coherent(mhi->dev,
ring->ring_size *
sizeof(union mhi_dev_ring_element_type),
&ring->ring_cache_dma_handle,
GFP_KERNEL);
if (!ring->ring_cache) {
mhi_log(MHI_MSG_ERROR,
"Failed to allocate ring cache\n");
return -ENOMEM;
}
}
if (ring->type == RING_TYPE_ER) {
if (!ring->evt_rp_cache) {
ring->evt_rp_cache = dma_alloc_coherent(mhi->dev,
sizeof(uint64_t) * ring->ring_size,
&ring->evt_rp_cache_dma_handle,
GFP_KERNEL);
if (!ring->evt_rp_cache) {
mhi_log(MHI_MSG_ERROR,
"Failed to allocate evt rp cache\n");
rc = -ENOMEM;
goto cleanup;
}
}
if (!ring->msi_buf) {
ring->msi_buf = dma_alloc_coherent(mhi->dev,
sizeof(uint32_t),
&ring->msi_buf_dma_handle,
GFP_KERNEL);
if (!ring->msi_buf) {
mhi_log(MHI_MSG_ERROR,
"Failed to allocate msi buf\n");
rc = -ENOMEM;
goto cleanup;
}
}
}
offset = (size_t)(ring->ring_ctx->generic.rbase -
mhi->ctrl_base.host_pa);
ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset;
ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;
if (ring->type == RING_TYPE_ER)
ring->ring_ctx_shadow =
(union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va +
(ring->id - mhi->ev_ring_start) *
sizeof(union mhi_dev_ring_ctx));
else if (ring->type == RING_TYPE_CMD)
ring->ring_ctx_shadow =
(union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va;
else if (ring->type == RING_TYPE_CH)
ring->ring_ctx_shadow =
(union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
(ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));
ring->ring_ctx_shadow = ring->ring_ctx;
if (ring->type != RING_TYPE_ER && ring->type != RING_TYPE_CH) {
rc = mhi_dev_cache_ring(ring, wr_offset);
if (rc)
return rc;
}
mhi_log(MHI_MSG_VERBOSE, "ctx ring_base:0x%lx, rp:0x%lx, wp:0x%lx\n",
(size_t)ring->ring_ctx->generic.rbase,
(size_t)ring->ring_ctx->generic.rp,
(size_t)ring->ring_ctx->generic.wp);
ring->wr_offset = wr_offset;
if (mhi->use_edma) {
rc = mhi_dev_ring_alloc_msi_buf(ring);
if (rc)
return rc;
}
return rc;
cleanup:
dma_free_coherent(mhi->dev,
ring->ring_size *
sizeof(union mhi_dev_ring_element_type),
ring->ring_cache,
ring->ring_cache_dma_handle);
ring->ring_cache = NULL;
if (ring->evt_rp_cache) {
dma_free_coherent(mhi->dev,
sizeof(uint64_t) * ring->ring_size,
ring->evt_rp_cache,
ring->evt_rp_cache_dma_handle);
ring->evt_rp_cache = NULL;
}
return rc;
}
EXPORT_SYMBOL(mhi_ring_start);
void mhi_ring_init(struct mhi_dev_ring *ring, enum mhi_dev_ring_type type,
int id)
{
if (WARN_ON(!ring))
return;
ring->id = id;
ring->state = RING_STATE_UINT;
ring->ring_cb = NULL;
ring->type = type;
mutex_init(&ring->event_lock);
}
EXPORT_SYMBOL(mhi_ring_init);
void mhi_ring_set_cb(struct mhi_dev_ring *ring,
void (*ring_cb)(struct mhi_dev *dev,
union mhi_dev_ring_element_type *el, void *ctx))
{
if (WARN_ON(!ring || !ring_cb))
return;
ring->ring_cb = ring_cb;
}
EXPORT_SYMBOL(mhi_ring_set_cb);
void mhi_ring_set_state(struct mhi_dev_ring *ring,
enum mhi_dev_ring_state state)
{
if (WARN_ON(!ring))
return;
if (state > RING_STATE_PENDING) {
pr_err("%s: Invalid ring state\n", __func__);
return;
}
ring->state = state;
}
EXPORT_SYMBOL(mhi_ring_set_state);
enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring)
{
if (WARN_ON(!ring))
return -EINVAL;
return ring->state;
}
EXPORT_SYMBOL(mhi_ring_get_state);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015,2017-2018,2020 The Linux Foundation. All rights reserved.*/
#ifndef MHI_SM_H
#define MHI_SM_H
#include "mhi.h"
#include <linux/slab.h>
#include <linux/msm_ep_pcie.h>
/**
* enum mhi_dev_event - MHI state change events
* @MHI_DEV_EVENT_CTRL_TRIG: CTRL register change event.
* Not supported,for future use
* @MHI_DEV_EVENT_M0_STATE: M0 state change event
* @MHI_DEV_EVENT_M1_STATE: M1 state change event. Not supported, for future use
* @MHI_DEV_EVENT_M2_STATE: M2 state change event. Not supported, for future use
* @MHI_DEV_EVENT_M3_STATE: M0 state change event
* @MHI_DEV_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup
* @MHI_DEV_EVENT_CORE_WAKEUP: MHI core initiate Host wakup
*/
enum mhi_dev_event {
MHI_DEV_EVENT_CTRL_TRIG,
MHI_DEV_EVENT_M0_STATE,
MHI_DEV_EVENT_M1_STATE,
MHI_DEV_EVENT_M2_STATE,
MHI_DEV_EVENT_M3_STATE,
MHI_DEV_EVENT_HW_ACC_WAKEUP,
MHI_DEV_EVENT_CORE_WAKEUP,
MHI_DEV_EVENT_MAX
};
int mhi_dev_sm_init(struct mhi_dev *dev);
int mhi_dev_sm_exit(struct mhi_dev *dev);
int mhi_dev_sm_set_ready(void);
int mhi_dev_notify_sm_event(enum mhi_dev_event event);
int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state);
int mhi_dev_sm_syserr(void);
void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify);
#endif /* MHI_SM_H */

File diff suppressed because it is too large Load Diff

268
include/linux/msm_mhi_dev.h Normal file
View File

@ -0,0 +1,268 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.*/
#ifndef __MSM_MHI_DEV_H
#define __MSM_MHI_DEV_H
#include <linux/types.h>
#include <linux/dma-mapping.h>
#define DMA_SYNC 1
#define DMA_ASYNC 0
enum cb_reason {
MHI_DEV_TRE_AVAILABLE = 0,
MHI_DEV_CTRL_UPDATE,
};
struct mhi_dev_client_cb_reason {
uint32_t ch_id;
enum cb_reason reason;
};
struct mhi_dev_client {
struct list_head list;
struct mhi_dev_channel *channel;
void (*event_trigger)(struct mhi_dev_client_cb_reason *cb);
/* mhi_dev calls are fully synchronous -- only one call may be
* active per client at a time for now.
*/
struct mutex write_lock;
wait_queue_head_t wait;
/* trace logs */
spinlock_t tr_lock;
unsigned int tr_head;
unsigned int tr_tail;
struct mhi_dev_trace *tr_log;
/* client buffers */
struct mhi_dev_iov *iov;
uint32_t nr_iov;
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
struct mhi_req {
u32 chan;
u32 mode;
u32 chain;
void *buf;
dma_addr_t dma;
u32 snd_cmpl;
void *context;
size_t len;
size_t transfer_len;
uint32_t rd_offset;
struct mhi_dev_client *client;
struct list_head list;
union mhi_dev_ring_element_type *el;
void (*client_cb)(void *req);
bool is_stale;
};
/* SW channel client list */
enum mhi_client_channel {
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_IP_CTRL_0_OUT = 16,
MHI_CLIENT_IP_CTRL_0_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_IPCR_OUT = 20,
MHI_CLIENT_IPCR_IN = 21,
MHI_CLIENT_IP_CTRL_3_OUT = 22,
MHI_CLIENT_IP_CTRL_3_IN = 23,
MHI_CLIENT_IP_CTRL_4_OUT = 24,
MHI_CLIENT_IP_CTRL_4_IN = 25,
MHI_CLIENT_IP_CTRL_5_OUT = 26,
MHI_CLIENT_IP_CTRL_5_IN = 27,
MHI_CLIENT_IP_CTRL_6_OUT = 28,
MHI_CLIENT_IP_CTRL_6_IN = 29,
MHI_CLIENT_IP_CTRL_7_OUT = 30,
MHI_CLIENT_IP_CTRL_7_IN = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_IP_SW_0_OUT = 34,
MHI_CLIENT_IP_SW_0_IN = 35,
MHI_CLIENT_ADB_OUT = 36,
MHI_CLIENT_ADB_IN = 37,
MHI_CLIENT_IP_SW_2_OUT = 38,
MHI_CLIENT_IP_SW_2_IN = 39,
MHI_CLIENT_IP_SW_3_OUT = 40,
MHI_CLIENT_IP_SW_3_IN = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_4_OUT = 46,
MHI_CLIENT_IP_SW_4_IN = 47,
MHI_MAX_SOFTWARE_CHANNELS,
MHI_CLIENT_TEST_OUT = 60,
MHI_CLIENT_TEST_IN = 61,
MHI_CLIENT_RESERVED_1_LOWER = 62,
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_ADPL_IN = 102,
MHI_CLIENT_IP_HW_QDSS = 103,
MHI_CLIENT_IP_HW_1_OUT = 105,
MHI_CLIENT_IP_HW_1_IN = 106,
MHI_CLIENT_QMAP_FLOW_CTRL_OUT = 109,
MHI_CLIENT_QMAP_FLOW_CTRL_IN = 110,
MHI_MAX_CHANNELS = 255,
MHI_CLIENT_INVALID = 0xFFFFFFFF
};
struct mhi_dev_client_cb_data {
void *user_data;
enum mhi_client_channel channel;
enum mhi_ctrl_info ctrl_info;
};
typedef void (*mhi_state_cb)(struct mhi_dev_client_cb_data *cb_dat);
struct mhi_dev_ready_cb_info {
struct list_head list;
mhi_state_cb cb;
struct mhi_dev_client_cb_data cb_data;
};
#if IS_ENABLED(CONFIG_MSM_MHI_DEV)
/**
* mhi_dev_open_channel() - Channel open for a given client done prior
* to read/write.
* @chan_id: Software Channel ID for the assigned client.
* @handle_client: Structure device for client handle.
* @notifier: Client issued callback notification.
*/
int mhi_dev_open_channel(uint32_t chan_id,
struct mhi_dev_client **handle_client,
void (*event_trigger)(struct mhi_dev_client_cb_reason *cb));
/**
* mhi_dev_close_channel() - Channel close for a given client.
*/
void mhi_dev_close_channel(struct mhi_dev_client *handle_client);
/**
* mhi_dev_read_channel() - Channel read for a given client
* @mreq: mreq is the client argument which includes meta info
* like write data location, buffer len, read offset, mode,
* chain and client call back function which will be invoked
* when data read is completed.
*/
int mhi_dev_read_channel(struct mhi_req *mreq);
/**
* mhi_dev_write_channel() - Channel write for a given software client.
* @wreq wreq is the client argument which includes meta info like
* client handle, read data location, buffer length, mode,
* and client call back function which will free the packet.
* when data write is completed.
*/
int mhi_dev_write_channel(struct mhi_req *wreq);
/**
* mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
* @handle_client: Client Handle issued during mhi_dev_open_channel
*/
int mhi_dev_channel_isempty(struct mhi_dev_client *handle);
/**
* mhi_dev_channel_has_pending_write() - Checks if there are any pending writes
* to be completed on inbound channel
* @handle_client: Client Handle issued during mhi_dev_open_channel
*/
bool mhi_dev_channel_has_pending_write(struct mhi_dev_client *handle);
/**
* mhi_ctrl_state_info() - Provide MHI state info
* @idx: Channel number idx. Look at channel_state_info and
* pass the index for the corresponding channel.
* @info: Return the control info.
* MHI_STATE=CONFIGURED - MHI device is present but not ready
* for data traffic.
* MHI_STATE=CONNECTED - MHI device is ready for data transfer.
* MHI_STATE=DISCONNECTED - MHI device has its pipes suspended.
* exposes device nodes for the supported MHI software
* channels.
*/
int mhi_ctrl_state_info(uint32_t idx, uint32_t *info);
/**
* mhi_register_state_cb() - Clients can register and receive callback after
* MHI channel is connected or disconnected.
*/
int mhi_register_state_cb(void (*mhi_state_cb)
(struct mhi_dev_client_cb_data *cb_data), void *data,
enum mhi_client_channel channel);
#else
static inline int mhi_dev_open_channel(uint32_t chan_id,
struct mhi_dev_client **handle_client,
void (*event_trigger)(struct mhi_dev_client_cb_reason *cb))
{
return -EINVAL;
};
static inline int mhi_dev_close_channel(struct mhi_dev_client *handle_client)
{
return -EINVAL;
};
static inline int mhi_dev_read_channel(struct mhi_req *mreq)
{
return -EINVAL;
};
static inline int mhi_dev_write_channel(struct mhi_req *wreq)
{
return -EINVAL;
};
static inline int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
{
return -EINVAL;
};
static inline bool mhi_dev_channel_has_pending_write
(struct mhi_dev_client *handle)
{
return false;
}
static inline int mhi_ctrl_state_info(uint32_t idx, uint32_t *info)
{
return -EINVAL;
};
static inline int mhi_register_state_cb(void (*mhi_state_cb)
(struct mhi_dev_client_cb_data *cb_data), void *data,
enum mhi_client_channel channel)
{
return -EINVAL;
};
#endif
#endif /* _MSM_MHI_DEV_H*/

41
include/uapi/linux/mhi.h Normal file
View File

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.*/
#ifndef _UAPI_MHI_H
#define _UAPI_MHI_H
#include <linux/types.h>
#include <linux/ioctl.h>
enum peripheral_ep_type {
DATA_EP_TYPE_RESERVED,
DATA_EP_TYPE_HSIC,
DATA_EP_TYPE_HSUSB,
DATA_EP_TYPE_PCIE,
DATA_EP_TYPE_EMBEDDED,
DATA_EP_TYPE_BAM_DMUX,
};
struct peripheral_ep_info {
enum peripheral_ep_type ep_type;
__u32 peripheral_iface_id;
};
struct ipa_ep_pair {
__u32 cons_pipe_num;
__u32 prod_pipe_num;
};
struct ep_info {
struct peripheral_ep_info ph_ep_info;
struct ipa_ep_pair ipa_ep_pair;
};
#define MHI_UCI_IOCTL_MAGIC 'm'
#define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info)
#define MHI_UCI_DPL_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 3, struct ep_info)
#define MHI_UCI_CV2X_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 4, struct ep_info)
#endif /* _UAPI_MHI_H */