aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-11-28 13:30:59 +0200
committerDavid S. Miller <davem@davemloft.net>2013-12-01 20:27:16 -0500
commitf121159d72091f25afb22007c833e60a6845e912 (patch)
treed9570571764a2a1f74e0af9619464228e2af77c6
parentvirtio_net: fix error handling for mergeable buffers (diff)
downloadlinux-dev-f121159d72091f25afb22007c833e60a6845e912.tar.xz
linux-dev-f121159d72091f25afb22007c833e60a6845e912.zip
virtio_net: make all RX paths handle erors consistently
receive mergeable now handles errors internally. Do same for big and small packet paths, otherwise the logic is too hard to follow. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/virtio_net.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 71a2eac7b039..916241d16c67 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -299,6 +299,35 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
+static struct sk_buff *receive_small(void *buf, unsigned int len)
+{
+ struct sk_buff * skb = buf;
+
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *receive_big(struct net_device *dev,
+ struct receive_queue *rq,
+ void *buf,
+ unsigned int len)
+{
+ struct page *page = buf;
+ struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
+
+ if (unlikely(!skb))
+ goto err;
+
+ return skb;
+
+err:
+ dev->stats.rx_dropped++;
+ give_pages(rq, page);
+ return NULL;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
void *buf,
@@ -392,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
struct net_device *dev = vi->dev;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
- struct page *page;
struct skb_vnet_hdr *hdr;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
@@ -407,23 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
return;
}
- if (!vi->mergeable_rx_bufs && !vi->big_packets) {
- skb = buf;
- len -= sizeof(struct virtio_net_hdr);
- skb_trim(skb, len);
- } else if (vi->mergeable_rx_bufs) {
+ if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, rq, buf, len);
- if (unlikely(!skb))
- return;
- } else {
- page = buf;
- skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- give_pages(rq, page);
- return;
- }
- }
+ else if (vi->big_packets)
+ skb = receive_big(dev, rq, buf, len);
+ else
+ skb = receive_small(buf, len);
+
+ if (unlikely(!skb))
+ return;
hdr = skb_vnet_hdr(skb);