aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c114
1 files changed, 108 insertions, 6 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 072cbbadbc36..67898fa9c447 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -42,6 +42,21 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
+/*
+ * For transmit, used buffer len is unused; we override it to track buffer
+ * status internally; used for zerocopy tx only.
+ */
+/* Lower device DMA failed */
+#define VHOST_DMA_FAILED_LEN 3
+/* Lower device DMA done */
+#define VHOST_DMA_DONE_LEN 2
+/* Lower device DMA in progress */
+#define VHOST_DMA_IN_PROGRESS 1
+/* Buffer unused */
+#define VHOST_DMA_CLEAR_LEN 0
+
+#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
+
enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
@@ -62,8 +77,33 @@ struct vhost_net {
* We only do this when socket buffer fills up.
* Protected by tx vq lock. */
enum vhost_net_poll_state tx_poll_state;
+ /* Number of TX recently submitted.
+ * Protected by tx vq lock. */
+ unsigned tx_packets;
+ /* Number of times zerocopy TX recently failed.
+ * Protected by tx vq lock. */
+ unsigned tx_zcopy_err;
};
+static void vhost_net_tx_packet(struct vhost_net *net)
+{
+ ++net->tx_packets;
+ if (net->tx_packets < 1024)
+ return;
+ net->tx_packets = 0;
+ net->tx_zcopy_err = 0;
+}
+
+static void vhost_net_tx_err(struct vhost_net *net)
+{
+ ++net->tx_zcopy_err;
+}
+
+static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
+{
+ return net->tx_packets / 64 >= net->tx_zcopy_err;
+}
+
static bool vhost_sock_zcopy(struct socket *sock)
{
return unlikely(experimental_zcopytx) &&
@@ -126,6 +166,55 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
net->tx_poll_state = VHOST_NET_POLL_STARTED;
}
+/* In case of DMA done not in order in lower device driver for some reason.
+ * upend_idx is used to track end of used idx, done_idx is used to track head
+ * of used idx. Once lower device DMA done contiguously, we will signal KVM
+ * guest used idx.
+ */
+static int vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
+{
+ int i;
+ int j = 0;
+
+ for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
+ if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
+ vhost_net_tx_err(net);
+ if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
+ vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
+ vhost_add_used_and_signal(vq->dev, vq,
+ vq->heads[i].id, 0);
+ ++j;
+ } else
+ break;
+ }
+ if (j)
+ vq->done_idx = i;
+ return j;
+}
+
+static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+{
+ struct vhost_ubuf_ref *ubufs = ubuf->ctx;
+ struct vhost_virtqueue *vq = ubufs->vq;
+ int cnt = atomic_read(&ubufs->kref.refcount);
+
+ /*
+ * Trigger polling thread if guest stopped submitting new buffers:
+ * in this case, the refcount after decrement will eventually reach 1
+ * so here it is 2.
+ * We also trigger polling periodically after each 16 packets
+ * (the value 16 here is more or less arbitrary, it's tuned to trigger
+ * less than 10% of times).
+ */
+ if (cnt <= 2 || !(cnt % 16))
+ vhost_poll_queue(&vq->poll);
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ vhost_ubuf_put(ubufs);
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
@@ -172,7 +261,7 @@ static void handle_tx(struct vhost_net *net)
for (;;) {
/* Release DMAs done buffers first */
if (zcopy)
- vhost_zerocopy_signal_used(vq);
+ vhost_zerocopy_signal_used(net, vq);
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
@@ -227,7 +316,8 @@ static void handle_tx(struct vhost_net *net)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy) {
vq->heads[vq->upend_idx].id = head;
- if (len < VHOST_GOODCOPY_LEN) {
+ if (!vhost_net_tx_select_zcopy(net) ||
+ len < VHOST_GOODCOPY_LEN) {
/* copy don't need to wait for DMA done */
vq->heads[vq->upend_idx].len =
VHOST_DMA_DONE_LEN;
@@ -237,7 +327,8 @@ static void handle_tx(struct vhost_net *net)
} else {
struct ubuf_info *ubuf = &vq->ubuf_info[head];
- vq->heads[vq->upend_idx].len = len;
+ vq->heads[vq->upend_idx].len =
+ VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = vq->ubufs;
ubuf->desc = vq->upend_idx;
@@ -268,8 +359,9 @@ static void handle_tx(struct vhost_net *net)
if (!zcopy)
vhost_add_used_and_signal(&net->dev, vq, head, 0);
else
- vhost_zerocopy_signal_used(vq);
+ vhost_zerocopy_signal_used(net, vq);
total_len += len;
+ vhost_net_tx_packet(net);
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
break;
@@ -379,7 +471,8 @@ static void handle_rx(struct vhost_net *net)
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
- int err, headcount, mergeable;
+ int err, mergeable;
+ s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
/* TODO: check that we are running from vhost_worker? */
@@ -593,9 +686,18 @@ static int vhost_net_release(struct inode *inode, struct file *f)
struct vhost_net *n = f->private_data;
struct socket *tx_sock;
struct socket *rx_sock;
+ int i;
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
+ for (i = 0; i < n->dev.nvqs; ++i) {
+ /* Wait for all lower device DMAs done. */
+ if (n->dev.vqs[i].ubufs)
+ vhost_ubuf_put_and_wait(n->dev.vqs[i].ubufs);
+
+ vhost_zerocopy_signal_used(n, &n->dev.vqs[i]);
+ }
vhost_dev_cleanup(&n->dev, false);
if (tx_sock)
fput(tx_sock->file);
@@ -728,7 +830,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
if (oldubufs) {
vhost_ubuf_put_and_wait(oldubufs);
mutex_lock(&vq->mutex);
- vhost_zerocopy_signal_used(vq);
+ vhost_zerocopy_signal_used(n, vq);
mutex_unlock(&vq->mutex);
}