aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-02-20 02:23:08 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2018-02-20 02:24:29 +0100
commit740319127f14793a13ad385e8150cd98c715c20c (patch)
tree20a5ac1a2a45fd7daf5c842d0b495c16a3bff9f0
parentversion: bump snapshot (diff)
downloadwireguard-monolithic-historical-740319127f14793a13ad385e8150cd98c715c20c.tar.xz
wireguard-monolithic-historical-740319127f14793a13ad385e8150cd98c715c20c.zip
queueing: skb_reset: mark as xnet
This was avoided for a long time, because I wanted the packet to be charged to the original socket for as long as possible. However, this broke net_cls, which looks at skb->sk for additional late-stage routing decisions. So we had no choice but to ensure that skb->sk is NULL by the time of xmit, and this means calling the skb destructor.
-rw-r--r--src/queueing.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/src/queueing.h b/src/queueing.h
index de8b7b2..d5948f3 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -63,7 +63,7 @@ static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
static inline void skb_reset(struct sk_buff *skb)
{
- skb_scrub_packet(skb, false);
+ skb_scrub_packet(skb, true);
memset(&skb->headers_start, 0, offsetof(struct sk_buff, headers_end) - offsetof(struct sk_buff, headers_start));
skb->queue_mapping = 0;
skb->nohdr = 0;