aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2022-03-10 21:14:20 -0800
committerJakub Kicinski <kuba@kernel.org>2022-03-11 23:17:24 -0800
commit625788b5844511cf4c30cffa7fa0bc3a69cebc82 (patch)
tree1b1b091212352474f56d6c4497017bc206c7c785 /include/linux/netdevice.h
parentMerge branch 'nfp-preliminary-support-for-nfp-3800' (diff)
downloadlinux-dev-625788b5844511cf4c30cffa7fa0bc3a69cebc82.tar.xz
linux-dev-625788b5844511cf4c30cffa7fa0bc3a69cebc82.zip
net: add per-cpu storage and net->core_stats
Before adding yet another possibly contended atomic_long_t, it is time to add per-cpu storage for existing ones: dev->tx_dropped, dev->rx_dropped, and dev->rx_nohandler Because many devices do not have to increment such counters, allocate the per-cpu storage on demand, so that dev_get_stats() does not have to spend considerable time folding zero counters. Note that some drivers have abused these counters which were supposed to be only used by core networking stack. v4: should use per_cpu_ptr() in dev_get_stats() (Jakub) v3: added a READ_ONCE() in netdev_core_stats_alloc() (Paolo) v2: add a missing include (reported by kernel test robot <lkp@intel.com>) Change in netdev_core_stats_alloc() (Jakub) Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: jeffreyji <jeffreyji@google.com> Reviewed-by: Brian Vazquez <brianvv@google.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Acked-by: Paolo Abeni <pabeni@redhat.com> Link: https://lore.kernel.org/r/20220311051420.2608812-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h46
1 files changed, 37 insertions, 9 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index acd3cf69b61f..0d994710b335 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -28,6 +28,7 @@
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
+#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
@@ -194,6 +195,14 @@ struct net_device_stats {
unsigned long tx_compressed;
};
+/* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+ */
+struct net_device_core_stats {
+ local_t rx_dropped;
+ local_t tx_dropped;
+ local_t rx_nohandler;
+} __aligned(4 * sizeof(local_t));
#include <linux/cache.h>
#include <linux/skbuff.h>
@@ -1735,12 +1744,8 @@ enum netdev_ml_priv_type {
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
*
- * @rx_dropped: Dropped packets by core network,
- * do not use this in drivers
- * @tx_dropped: Dropped packets by core network,
+ * @core_stats: core networking counters,
* do not use this in drivers
- * @rx_nohandler: nohandler dropped packets by core network on
- * inactive devices, do not use this in drivers
* @carrier_up_count: Number of times the carrier has been up
* @carrier_down_count: Number of times the carrier has been down
*
@@ -2023,9 +2028,7 @@ struct net_device {
struct net_device_stats stats; /* not used by modern drivers */
- atomic_long_t rx_dropped;
- atomic_long_t tx_dropped;
- atomic_long_t rx_nohandler;
+ struct net_device_core_stats __percpu *core_stats;
/* Stats to monitor link on/off, flapping */
atomic_t carrier_up_count;
@@ -3839,13 +3842,38 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false;
}
+struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev);
+
+static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev)
+{
+ /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
+ struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
+
+ if (likely(p))
+ return this_cpu_ptr(p);
+
+ return netdev_core_stats_alloc(dev);
+}
+
+#define DEV_CORE_STATS_INC(FIELD) \
+static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
+{ \
+ struct net_device_core_stats *p = dev_core_stats(dev); \
+ \
+ if (p) \
+ local_inc(&p->FIELD); \
+}
+DEV_CORE_STATS_INC(rx_dropped)
+DEV_CORE_STATS_INC(tx_dropped)
+DEV_CORE_STATS_INC(rx_nohandler)
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
- atomic_long_inc(&dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return NET_RX_DROP;
}