vhost_net: tx batching
This patch tries to utilize tuntap rx batching by peeking the tx virtqueue during transmission, if there's more available buffers in the virtqueue, set MSG_MORE flag for a hint for backend (e.g tuntap) to batch the packets. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
275bf960ac
commit
0ed005ce02
@ -351,6 +351,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vhost_exceeds_maxpend(struct vhost_net *net)
|
||||||
|
{
|
||||||
|
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
|
||||||
|
struct vhost_virtqueue *vq = &nvq->vq;
|
||||||
|
|
||||||
|
return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
|
||||||
|
== nvq->done_idx;
|
||||||
|
}
|
||||||
|
|
||||||
/* Expects to be always run from workqueue - which acts as
|
/* Expects to be always run from workqueue - which acts as
|
||||||
* read-size critical section for our kind of RCU. */
|
* read-size critical section for our kind of RCU. */
|
||||||
static void handle_tx(struct vhost_net *net)
|
static void handle_tx(struct vhost_net *net)
|
||||||
@ -394,8 +403,7 @@ static void handle_tx(struct vhost_net *net)
|
|||||||
/* If more outstanding DMAs, queue the work.
|
/* If more outstanding DMAs, queue the work.
|
||||||
* Handle upend_idx wrap around
|
* Handle upend_idx wrap around
|
||||||
*/
|
*/
|
||||||
if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
|
if (unlikely(vhost_exceeds_maxpend(net)))
|
||||||
% UIO_MAXIOV == nvq->done_idx))
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
|
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
|
||||||
@ -454,6 +462,16 @@ static void handle_tx(struct vhost_net *net)
|
|||||||
msg.msg_control = NULL;
|
msg.msg_control = NULL;
|
||||||
ubufs = NULL;
|
ubufs = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
total_len += len;
|
||||||
|
if (total_len < VHOST_NET_WEIGHT &&
|
||||||
|
!vhost_vq_avail_empty(&net->dev, vq) &&
|
||||||
|
likely(!vhost_exceeds_maxpend(net))) {
|
||||||
|
msg.msg_flags |= MSG_MORE;
|
||||||
|
} else {
|
||||||
|
msg.msg_flags &= ~MSG_MORE;
|
||||||
|
}
|
||||||
|
|
||||||
/* TODO: Check specific error and bomb out unless ENOBUFS? */
|
/* TODO: Check specific error and bomb out unless ENOBUFS? */
|
||||||
err = sock->ops->sendmsg(sock, &msg, len);
|
err = sock->ops->sendmsg(sock, &msg, len);
|
||||||
if (unlikely(err < 0)) {
|
if (unlikely(err < 0)) {
|
||||||
@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net)
|
|||||||
vhost_add_used_and_signal(&net->dev, vq, head, 0);
|
vhost_add_used_and_signal(&net->dev, vq, head, 0);
|
||||||
else
|
else
|
||||||
vhost_zerocopy_signal_used(net, vq);
|
vhost_zerocopy_signal_used(net, vq);
|
||||||
total_len += len;
|
|
||||||
vhost_net_tx_packet(net);
|
vhost_net_tx_packet(net);
|
||||||
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
|
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
|
||||||
vhost_poll_queue(&vq->poll);
|
vhost_poll_queue(&vq->poll);
|
||||||
|
Reference in New Issue
Block a user