aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
authorRonak Doshi <ronak.doshi@broadcom.com>2025-05-30 15:27:00 +0000
committerPaolo Abeni <pabeni@redhat.com>2025-06-03 11:54:26 +0200
commit982d30c30eaa2ec723df42e3bf526c014c1dbb88 (patch)
treea00f15e5a1800c7d5ef17b58ee227ea5f10974bf /drivers
parentRevert "kunit: configs: Enable CONFIG_INIT_STACK_ALL_PATTERN in all_tests" (diff)
downloadwireguard-linux-982d30c30eaa2ec723df42e3bf526c014c1dbb88.tar.xz
wireguard-linux-982d30c30eaa2ec723df42e3bf526c014c1dbb88.zip
vmxnet3: correctly report gso type for UDP tunnels
Commit 3d010c8031e3 ("udp: do not accept non-tunnel GSO skbs landing in a tunnel") added checks in linux stack to not accept non-tunnel GRO packets landing in a tunnel. This exposed an issue in vmxnet3 which was not correctly reporting GRO packets for tunnel packets. This patch fixes this issue by setting correct GSO type for the tunnel packets. Currently, vmxnet3 does not support reporting inner fields for LRO tunnel packets. The issue is not seen for egress drivers that do not use skb inner fields. The workaround is to enable tnl-segmentation offload on the egress interfaces if the driver supports it. This problem pre-exists this patch fix and can be addressed as a separate future patch. Fixes: dacce2be3312 ("vmxnet3: add geneve and vxlan tunnel offload support") Signed-off-by: Ronak Doshi <ronak.doshi@broadcom.com> Acked-by: Guolin Yang <guolin.yang@broadcom.com> Link: https://patch.msgid.link/20250530152701.70354-1-ronak.doshi@broadcom.com [pabeni@redhat.com: dropped the changelog] Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 2440e30c5bd1..0572f6a9bdb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1572,6 +1572,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
return (hlen + (hdr.tcp->doff << 2));
}
+static void
+vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
@@ -1885,6 +1909,8 @@ sop_done:
if (segCnt != 0 && mss != 0) {
skb_shinfo(skb)->gso_type = rcd->v4 ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ if (encap_lro)
+ vmxnet3_lro_tunnel(skb, skb->protocol);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {