xen-netback: Use skb->cb for pending_idx
Storing the pending_idx at the first byte of the linear buffer never looked good, skb->cb is a more proper place for this. It also prevents the header to be directly grant copied there, and we don't have the pending_idx after we copied the header here, so it's time to change it. It also introduces helpers for the RX side Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
31c70d5956
commit
8f13dd9612
@ -455,10 +455,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct skb_cb_overlay {
|
struct xenvif_rx_cb {
|
||||||
int meta_slots_used;
|
int meta_slots_used;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
|
||||||
|
|
||||||
void xenvif_kick_thread(struct xenvif *vif)
|
void xenvif_kick_thread(struct xenvif *vif)
|
||||||
{
|
{
|
||||||
wake_up(&vif->wq);
|
wake_up(&vif->wq);
|
||||||
@ -474,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
LIST_HEAD(notify);
|
LIST_HEAD(notify);
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
struct skb_cb_overlay *sco;
|
|
||||||
bool need_to_notify = false;
|
bool need_to_notify = false;
|
||||||
|
|
||||||
struct netrx_pending_operations npo = {
|
struct netrx_pending_operations npo = {
|
||||||
@ -513,9 +514,8 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
} else
|
} else
|
||||||
vif->rx_last_skb_slots = 0;
|
vif->rx_last_skb_slots = 0;
|
||||||
|
|
||||||
sco = (struct skb_cb_overlay *)skb->cb;
|
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
|
||||||
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
|
BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed);
|
||||||
BUG_ON(sco->meta_slots_used > max_slots_needed);
|
|
||||||
|
|
||||||
__skb_queue_tail(&rxq, skb);
|
__skb_queue_tail(&rxq, skb);
|
||||||
}
|
}
|
||||||
@ -529,7 +529,6 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
|
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
|
||||||
|
|
||||||
while ((skb = __skb_dequeue(&rxq)) != NULL) {
|
while ((skb = __skb_dequeue(&rxq)) != NULL) {
|
||||||
sco = (struct skb_cb_overlay *)skb->cb;
|
|
||||||
|
|
||||||
if ((1 << vif->meta[npo.meta_cons].gso_type) &
|
if ((1 << vif->meta[npo.meta_cons].gso_type) &
|
||||||
vif->gso_prefix_mask) {
|
vif->gso_prefix_mask) {
|
||||||
@ -540,19 +539,21 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
|
|
||||||
resp->offset = vif->meta[npo.meta_cons].gso_size;
|
resp->offset = vif->meta[npo.meta_cons].gso_size;
|
||||||
resp->id = vif->meta[npo.meta_cons].id;
|
resp->id = vif->meta[npo.meta_cons].id;
|
||||||
resp->status = sco->meta_slots_used;
|
resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
|
||||||
|
|
||||||
npo.meta_cons++;
|
npo.meta_cons++;
|
||||||
sco->meta_slots_used--;
|
XENVIF_RX_CB(skb)->meta_slots_used--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
vif->dev->stats.tx_bytes += skb->len;
|
vif->dev->stats.tx_bytes += skb->len;
|
||||||
vif->dev->stats.tx_packets++;
|
vif->dev->stats.tx_packets++;
|
||||||
|
|
||||||
status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
|
status = xenvif_check_gop(vif,
|
||||||
|
XENVIF_RX_CB(skb)->meta_slots_used,
|
||||||
|
&npo);
|
||||||
|
|
||||||
if (sco->meta_slots_used == 1)
|
if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
|
||||||
flags = 0;
|
flags = 0;
|
||||||
else
|
else
|
||||||
flags = XEN_NETRXF_more_data;
|
flags = XEN_NETRXF_more_data;
|
||||||
@ -589,13 +590,13 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||||||
|
|
||||||
xenvif_add_frag_responses(vif, status,
|
xenvif_add_frag_responses(vif, status,
|
||||||
vif->meta + npo.meta_cons + 1,
|
vif->meta + npo.meta_cons + 1,
|
||||||
sco->meta_slots_used);
|
XENVIF_RX_CB(skb)->meta_slots_used);
|
||||||
|
|
||||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
|
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
|
||||||
|
|
||||||
need_to_notify |= !!ret;
|
need_to_notify |= !!ret;
|
||||||
|
|
||||||
npo.meta_cons += sco->meta_slots_used;
|
npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -772,6 +773,13 @@ static struct page *xenvif_alloc_page(struct xenvif *vif,
|
|||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct xenvif_tx_cb {
|
||||||
|
u16 pending_idx;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
|
||||||
|
|
||||||
static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
|
static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct xen_netif_tx_request *txp,
|
struct xen_netif_tx_request *txp,
|
||||||
@ -779,7 +787,7 @@ static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
|
|||||||
{
|
{
|
||||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||||
skb_frag_t *frags = shinfo->frags;
|
skb_frag_t *frags = shinfo->frags;
|
||||||
u16 pending_idx = *((u16 *)skb->data);
|
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||||
u16 head_idx = 0;
|
u16 head_idx = 0;
|
||||||
int slot, start;
|
int slot, start;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
@ -897,7 +905,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
|
|||||||
struct gnttab_copy **gopp)
|
struct gnttab_copy **gopp)
|
||||||
{
|
{
|
||||||
struct gnttab_copy *gop = *gopp;
|
struct gnttab_copy *gop = *gopp;
|
||||||
u16 pending_idx = *((u16 *)skb->data);
|
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||||
struct pending_tx_info *tx_info;
|
struct pending_tx_info *tx_info;
|
||||||
int nr_frags = shinfo->nr_frags;
|
int nr_frags = shinfo->nr_frags;
|
||||||
@ -944,7 +952,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* First error: invalidate header and preceding fragments. */
|
/* First error: invalidate header and preceding fragments. */
|
||||||
pending_idx = *((u16 *)skb->data);
|
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||||
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
|
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||||
for (j = start; j < i; j++) {
|
for (j = start; j < i; j++) {
|
||||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||||
@ -1236,7 +1244,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
|
|||||||
memcpy(&vif->pending_tx_info[pending_idx].req,
|
memcpy(&vif->pending_tx_info[pending_idx].req,
|
||||||
&txreq, sizeof(txreq));
|
&txreq, sizeof(txreq));
|
||||||
vif->pending_tx_info[pending_idx].head = index;
|
vif->pending_tx_info[pending_idx].head = index;
|
||||||
*((u16 *)skb->data) = pending_idx;
|
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
|
||||||
|
|
||||||
__skb_put(skb, data_len);
|
__skb_put(skb, data_len);
|
||||||
|
|
||||||
@ -1283,7 +1291,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
|
|||||||
u16 pending_idx;
|
u16 pending_idx;
|
||||||
unsigned data_len;
|
unsigned data_len;
|
||||||
|
|
||||||
pending_idx = *((u16 *)skb->data);
|
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||||
txp = &vif->pending_tx_info[pending_idx].req;
|
txp = &vif->pending_tx_info[pending_idx].req;
|
||||||
|
|
||||||
/* Check the remap error code. */
|
/* Check the remap error code. */
|
||||||
|
Reference in New Issue
Block a user