aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2018-05-07 11:08:46 -0700
committerDavid S. Miller <davem@davemloft.net>2018-05-08 22:30:06 -0400
commit6053d0f189064302420930f9ef9022e24a04946a (patch)
treea32784880b5707ba05ab9a1c5d6114ff4012fd85 /net/ipv4
parentudp: Partially unroll handling of first segment and last segment (diff)
downloadlinux-dev-6053d0f189064302420930f9ef9022e24a04946a.tar.xz
linux-dev-6053d0f189064302420930f9ef9022e24a04946a.zip
udp: Add support for software checksum and GSO_PARTIAL with GSO offload
This patch adds support for a software provided checksum and GSO_PARTIAL segmentation support. With this we can offload UDP segmentation on devices that only have partial support for tunnels. Since we are no longer needing the hardware checksum we can drop the checks in the segmentation code that were verifying if it was present. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/udp_offload.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index b15c78ac3f23..d4f2daca0c33 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -214,6 +214,13 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
return segs;
}
+ /* GSO partial and frag_list segmentation only requires splitting
+ * the frame into an MSS multiple and possibly a remainder, both
+ * cases return a GSO skb. So update the mss now.
+ */
+ if (skb_is_gso(segs))
+ mss *= skb_shinfo(segs)->gso_segs;
+
seg = segs;
uh = udp_hdr(seg);
@@ -232,6 +239,12 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
uh->len = newlen;
uh->check = check;
+ if (seg->ip_summed == CHECKSUM_PARTIAL)
+ gso_reset_checksum(seg, ~check);
+ else
+ uh->check = gso_make_checksum(seg, ~check) ? :
+ CSUM_MANGLED_0;
+
seg = seg->next;
uh = udp_hdr(seg);
}
@@ -244,6 +257,11 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
uh->len = newlen;
uh->check = check;
+ if (seg->ip_summed == CHECKSUM_PARTIAL)
+ gso_reset_checksum(seg, ~check);
+ else
+ uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
+
/* update refcount for the packet */
refcount_add(sum_truesize - gso_skb->truesize, &sk->sk_wmem_alloc);
@@ -251,15 +269,6 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
}
EXPORT_SYMBOL_GPL(__udp_gso_segment);
-static struct sk_buff *__udp4_gso_segment(struct sk_buff *gso_skb,
- netdev_features_t features)
-{
- if (!can_checksum_protocol(features, htons(ETH_P_IP)))
- return ERR_PTR(-EIO);
-
- return __udp_gso_segment(gso_skb, features);
-}
-
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -283,7 +292,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- return __udp4_gso_segment(skb, features);
+ return __udp_gso_segment(skb, features);
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))