aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorEdward Cree <ecree@solarflare.com>2019-08-06 14:53:55 +0100
committerDavid S. Miller <davem@davemloft.net>2019-08-08 18:22:29 -0700
commit323ebb61e32b478e2432c5a3cbf9e2ca678a9609 (patch)
tree8a644610f9b44ed5ebf658bf23c7745b66c015a7 /include/linux/netdevice.h
parentsfc: falcon: don't score irq moderation points for GRO (diff)
downloadlinux-dev-323ebb61e32b478e2432c5a3cbf9e2ca678a9609.tar.xz
linux-dev-323ebb61e32b478e2432c5a3cbf9e2ca678a9609.zip
net: use listified RX for handling GRO_NORMAL skbs
When GRO decides not to coalesce a packet, in napi_frags_finish(), instead of passing it to the stack immediately, place it on a list in the napi struct. Then, at flush time (napi_complete_done(), napi_poll(), or napi_busy_loop()), call netif_receive_skb_list_internal() on the list. We'd like to do that in napi_gro_flush(), but it's not called if !napi->gro_bitmask, so we have to do it in the callers instead. (There are a handful of drivers that call napi_gro_flush() themselves, but it's not clear why, or whether this will affect them.) Because a full 64 packets is an inefficiently large batch, also consume the list whenever it exceeds gro_normal_batch, a new net/core sysctl that defaults to 8. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 88292953aa6f..55ac223553f8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -332,6 +332,8 @@ struct napi_struct {
struct net_device *dev;
struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
+ struct list_head rx_list; /* Pending GRO_NORMAL skbs */
+ int rx_count; /* length of rx_list */
struct hrtimer timer;
struct list_head dev_list;
struct hlist_node napi_hash_node;
@@ -4239,6 +4241,7 @@ extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
extern int dev_rx_weight;
extern int dev_tx_weight;
+extern int gro_normal_batch;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,