From fe5f0f661797b41648eac64b40e5038b25175047 Mon Sep 17 00:00:00 2001 From: Thomas Gschwantner Date: Wed, 18 Jul 2018 17:47:50 +0200 Subject: recieve: disable NAPI busy polling This avoids adding one reference per peer to the napi_hash hashtable, as normally done by netif_napi_add(). Since we potentially could have up to 2^20 peers this would make busy polling very slow globally. This approach is preferable to having only a single napi struct because we get one gro_list per peer, which means packets can be combined nicely even if we have a large number of peers. This is also done by gro_cells_init() in net/core/gro_cells.c . Signed-off-by: Thomas Gschwantner --- src/compat/compat.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'src/compat/compat.h') diff --git a/src/compat/compat.h b/src/compat/compat.h index 997ae39..7943ab8 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -601,6 +601,15 @@ static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned #define napi_complete_done(n, work_done) napi_complete(n) #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#include +/* NAPI_STATE_SCHED gets set by netif_napi_add anyway, so this is safe. + * Also, kernels without NAPI_STATE_NO_BUSY_POLL don't have a call to + * napi_hash_add inside of netif_napi_add. + */ +#define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED +#endif + /* https://lkml.kernel.org/r/20170624021727.17835-1-Jason@zx2c4.com */ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include -- cgit v1.2.3-59-g8ed1b