aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/udp.h6
-rw-r--r--include/net/udp.h45
-rw-r--r--net/ipv4/udp.c23
-rw-r--r--net/ipv6/udp.c24
4 files changed, 88 insertions, 10 deletions
diff --git a/include/linux/udp.h b/include/linux/udp.h
index e23d5024f42f..0a9c54e76305 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -132,6 +132,12 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
}
}
+static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+{
+ return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+}
+
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
diff --git a/include/net/udp.h b/include/net/udp.h
index a496e441645e..eccca2325ee6 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -417,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
} while(0)
#if IS_ENABLED(CONFIG_IPV6)
-#define __UDPX_INC_STATS(sk, field) \
-do { \
- if ((sk)->sk_family == AF_INET) \
- __UDP_INC_STATS(sock_net(sk), field, 0); \
- else \
- __UDP6_INC_STATS(sock_net(sk), field, 0); \
-} while (0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics) : \
+ (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
+ sock_net(sk)->mib.udp_stats_in6); \
+})
#else
-#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics; \
+})
#endif
+#define __UDPX_INC_STATS(sk, field) \
+ __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
+
#ifdef CONFIG_PROC_FS
struct udp_seq_afinfo {
sa_family_t family;
@@ -461,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
#endif
+static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ struct sk_buff *skb, bool ipv4)
+{
+ struct sk_buff *segs;
+
+ /* the GSO CB lays after the UDP one, no need to save and restore any
+ * CB fragment
+ */
+ segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segs))) {
+ int segs_nr = skb_shinfo(skb)->gso_segs;
+
+ atomic_add(segs_nr, &sk->sk_drops);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ consume_skb(skb);
+ return segs;
+}
+
#endif /* _UDP_H */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index dddc6fe90f51..3488650b90ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1906,7 +1906,7 @@ EXPORT_SYMBOL(udp_encap_enable);
* Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed.
*/
-static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk);
@@ -2009,6 +2009,27 @@ drop:
return -1;
}
+static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *next, *segs;
+ int ret;
+
+ if (likely(!udp_unexpected_gso(sk, skb)))
+ return udp_queue_rcv_one_skb(sk, skb);
+
+ BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
+ __skb_push(skb, -skb_mac_offset(skb));
+ segs = udp_rcv_segment(sk, skb, true);
+ for (skb = segs; skb; skb = next) {
+ next = skb->next;
+ __skb_pull(skb, skb_transport_offset(skb));
+ ret = udp_queue_rcv_one_skb(sk, skb);
+ if (ret > 0)
+ ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
+ }
+ return 0;
+}
+
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4c79dc5329bc..c55698d19d68 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -554,7 +554,7 @@ void udpv6_encap_enable(void)
}
EXPORT_SYMBOL(udpv6_encap_enable);
-static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk);
@@ -637,6 +637,28 @@ drop:
return -1;
}
+static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *next, *segs;
+ int ret;
+
+ if (likely(!udp_unexpected_gso(sk, skb)))
+ return udpv6_queue_rcv_one_skb(sk, skb);
+
+ __skb_push(skb, -skb_mac_offset(skb));
+ segs = udp_rcv_segment(sk, skb, false);
+ for (skb = segs; skb; skb = next) {
+ next = skb->next;
+ __skb_pull(skb, skb_transport_offset(skb));
+
+ ret = udpv6_queue_rcv_one_skb(sk, skb);
+ if (ret > 0)
+ ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
+ true);
+ }
+ return 0;
+}
+
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,