aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-08-23 15:59:57 -0700
committerDavid S. Miller <davem@davemloft.net>2015-08-23 15:59:57 -0700
commit1728369e8c57f27e0374f4702cbfdb9196bc586b (patch)
tree3793bb86b081a9c2a6811e1c4c2118484111c25b /drivers
parentnet/xen-netfront: only clean up queues if present (diff)
parentfou: Do WARN_ON_ONCE in gue_gro_receive for bad proto callbacks (diff)
downloadlinux-dev-1728369e8c57f27e0374f4702cbfdb9196bc586b.tar.xz
linux-dev-1728369e8c57f27e0374f4702cbfdb9196bc586b.zip
Merge branch 'gro_tunnels'
Tom Herbert says: ==================== gro: Fixes for tunnels and GRO This patch set addresses some issue related to tunneling and GRO: - Fix remote checksum offload to properly deal with frag0 in GRO. - Add support for GRO at VXLAN tunnel (call gro_cells) Testing: Ran one netperf TCP_STREAM to highlight impact of different configurations: GUE Zero UDP checksum 4628.42 MBps UDP checksums enabled 6800.51 MBps UDP checksums and remote checksum offload 7663.82 MBps UDP checksums and remote checksum offload using no-partial 7287.25 MBps VXLAN Zero UDP checksum 4112.02 UDP checksums enabled 6785.80 MBps UDP checksums and remote checksum offload 7075.56 MBps v2: - Drop "gro: Pull headers into skb head for 1st skb in gro list" from patch set - In vxlan_remcsum and gue_remcsum return immediately if remcsum processing was already done - Add gro callbacks for sit offload - Use WARN_ON_ONCE if we get a GUE protocol that does not have GRO offload support v3: - Don't restore gro callbacks for sit offload ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/vxlan.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 54615bb9d916..61b457b9ec00 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -519,10 +519,10 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
u32 data, struct gro_remcsum *grc,
bool nopartial)
{
- size_t start, offset, plen;
+ size_t start, offset;
if (skb->remcsum_offload)
- return NULL;
+ return vh;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
@@ -532,17 +532,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
offsetof(struct udphdr, check) :
offsetof(struct tcphdr, check));
- plen = hdrlen + offset + sizeof(u16);
-
- /* Pull checksum that will be written */
- if (skb_gro_header_hard(skb, off + plen)) {
- vh = skb_gro_header_slow(skb, off + plen, off);
- if (!vh)
- return NULL;
- }
-
- skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
- start, offset, grc, nopartial);
+ vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+ start, offset, grc, nopartial);
skb->remcsum_offload = 1;
@@ -573,7 +564,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
goto out;
}
- skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
flags = ntohl(vh->vx_flags);
@@ -588,6 +578,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
goto out;
}
+ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
flush = 0;
for (p = *head; p; p = p->next) {
@@ -1110,6 +1102,9 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
{
size_t start, offset, plen;
+ if (skb->remcsum_offload)
+ return vh;
+
start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
offset = start + ((data & VXLAN_RCO_UDP) ?
offsetof(struct udphdr, check) :
@@ -1213,7 +1208,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
- netif_rx(skb);
+ gro_cells_receive(&vxlan->gro_cells, skb);
return;
drop:
@@ -2451,6 +2446,8 @@ static void vxlan_setup(struct net_device *dev)
vxlan->dev = dev;
+ gro_cells_init(&vxlan->gro_cells, dev);
+
for (h = 0; h < FDB_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
@@ -2890,6 +2887,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
+ gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
@@ -3098,8 +3096,10 @@ static void __net_exit vxlan_exit_net(struct net *net)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net))
+ if (!net_eq(dev_net(vxlan->dev), net)) {
+ gro_cells_destroy(&vxlan->gro_cells);
unregister_netdevice_queue(vxlan->dev, &list);
+ }
}
unregister_netdevice_many(&list);