aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/macvtap.c
diff options
context:
space:
mode:
authorVlad Yasevich <vyasevic@redhat.com>2013-06-25 16:04:22 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-25 16:45:23 -0700
commit3e4f8b787370978733ca6cae452720a4f0c296b8 (patch)
treedea29bd9127db364d49599759f3a63df4a4babf7 /drivers/net/macvtap.c
parentmacvtap: Let TUNSETOFFLOAD actually controll offload features. (diff)
downloadlinux-dev-3e4f8b787370978733ca6cae452720a4f0c296b8.tar.xz
linux-dev-3e4f8b787370978733ca6cae452720a4f0c296b8.zip
macvtap: Perform GSO on forwarding path.
When macvtap forwards skb to its tap, it needs to check if GSO needs to be performed. This is sometimes necessary when the HW device performed GRO, but the guest reading from the tap does not support it (ex: Windows 7). Signed-off-by: Vlad Yasevich <vyasevic@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/macvtap.c')
-rw-r--r--drivers/net/macvtap.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7eab01975ed1..5bfaecdd2354 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -276,14 +276,44 @@ static void macvtap_del_queues(struct net_device *dev)
*/
static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
{
+ struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ netdev_features_t features;
if (!q)
goto drop;
if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
goto drop;
- skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ skb->dev = dev;
+ /* Apply the forward feature mask so that we perform segmentation
+ * according to users wishes.
+ */
+ features = netif_skb_features(skb) & vlan->tap_features;
+ if (netif_needs_gso(skb, features)) {
+ struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+
+ if (IS_ERR(segs))
+ goto drop;
+
+ if (!segs) {
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ goto wake_up;
+ }
+
+ kfree_skb(skb);
+ while (segs) {
+ struct sk_buff *nskb = segs->next;
+
+ segs->next = NULL;
+ skb_queue_tail(&q->sk.sk_receive_queue, segs);
+ segs = nskb;
+ }
+ } else {
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ }
+
+wake_up:
wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
return NET_RX_SUCCESS;