aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorWillem de Bruijn <willemb@google.com>2017-04-24 13:49:29 -0400
committerDavid S. Miller <davem@davemloft.net>2017-04-24 23:55:19 -0400
commit7b0411ef4aa69c9256d6a2c289d0a2b320414633 (patch)
tree53dfadb8aa5536fb7029b405a1b854551a6d9502 /drivers/net/virtio_net.c
parentvirtio-net: move free_old_xmit_skbs (diff)
downloadlinux-dev-7b0411ef4aa69c9256d6a2c289d0a2b320414633.tar.xz
linux-dev-7b0411ef4aa69c9256d6a2c289d0a2b320414633.zip
virtio-net: clean tx descriptors from rx napi
Amortize the cost of virtual interrupts by doing both rx and tx work on reception of a receive interrupt if tx napi is enabled. With VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion interrupts for bidirectional workloads. Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--drivers/net/virtio_net.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4ec79e5d7a86..9dd978f34c1f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1075,12 +1075,33 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ virtnet_poll_cleantx(rq);
+
received = virtnet_receive(rq, budget);
/* Out of packets? */