aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/net/dst.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2022-11-15 08:53:55 +0000
committerDavid S. Miller <davem@davemloft.net>2022-11-16 12:48:44 +0000
commit6c1c5097781f563b70a81683ea6fdac21637573b (patch)
tree55d3046d731a5bc00f1c0a6f4e535385d3de7e43 /include/net/dst.h
parentMerge branch 'net-try_cmpxchg-conversions' (diff)
downloadwireguard-linux-6c1c5097781f563b70a81683ea6fdac21637573b.tar.xz
wireguard-linux-6c1c5097781f563b70a81683ea6fdac21637573b.zip
net: add atomic_long_t to net_device_stats fields
Long standing KCSAN issues are caused by data-race around some dev->stats changes. Most performance critical paths already use per-cpu variables, or per-queue ones. It is reasonable (and more correct) to use atomic operations for the slow paths. This patch adds an union for each field of net_device_stats, so that we can convert paths that are not yet protected by a spinlock or a mutex. netdev_stats_to_stats64() no longer has an #if BITS_PER_LONG==64 Note that the memcpy() we were using on 64bit arches had no provision to avoid load-tearing, while atomic_long_read() is providing the needed protection at no cost. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/dst.h')
-rw-r--r--include/net/dst.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/include/net/dst.h b/include/net/dst.h
index 00b479ce6b99..d67fda89cd0f 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
- /* TODO : stats should be SMP safe */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ DEV_STATS_INC(dev, rx_packets);
+ DEV_STATS_ADD(dev, rx_bytes, skb->len);
__skb_tunnel_rx(skb, dev, net);
}