aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/compat/compat.h
diff options
context:
space:
mode:
authorThomas Gschwantner <tharre3@gmail.com>2018-07-18 17:47:50 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-07-18 19:48:17 +0200
commitfe5f0f661797b41648eac64b40e5038b25175047 (patch)
treebe603b24838b5c7ffd6d0e96344f39e559969dad /src/compat/compat.h
parentdevice: destroy workqueue before freeing queue (diff)
downloadwireguard-monolithic-historical-fe5f0f661797b41648eac64b40e5038b25175047.tar.xz
wireguard-monolithic-historical-fe5f0f661797b41648eac64b40e5038b25175047.zip
recieve: disable NAPI busy polling
This avoids adding one reference per peer to the napi_hash hashtable, as normally done by netif_napi_add(). Since we potentially could have up to 2^20 peers this would make busy polling very slow globally. This approach is preferable to having only a single napi struct because we get one gro_list per peer, which means packets can be combined nicely even if we have a large number of peers. This is also done by gro_cells_init() in net/core/gro_cells.c . Signed-off-by: Thomas Gschwantner <tharre3@gmail.com>
Diffstat (limited to 'src/compat/compat.h')
-rw-r--r--src/compat/compat.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/src/compat/compat.h b/src/compat/compat.h
index 997ae39..7943ab8 100644
--- a/src/compat/compat.h
+++ b/src/compat/compat.h
@@ -601,6 +601,15 @@ static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned
#define napi_complete_done(n, work_done) napi_complete(n)
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+#include <linux/netdevice.h>
+/* NAPI_STATE_SCHED gets set by netif_napi_add anyway, so this is safe.
+ * Also, kernels without NAPI_STATE_NO_BUSY_POLL don't have a call to
+ * napi_hash_add inside of netif_napi_add.
+ */
+#define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED
+#endif
+
/* https://lkml.kernel.org/r/20170624021727.17835-1-Jason@zx2c4.com */
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/ip.h>