diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2018-07-18 17:26:03 +0200 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2018-07-18 18:34:47 +0200 |
commit | 646df74bfaf31d836817c41f047d71a9903fd316 (patch) | |
tree | 7500aa0b45e6e4f247a0e60bce72763eb51915ae /src/device.c | |
parent | device: destroy workqueue before freeing queue (diff) | |
download | wireguard-monolithic-historical-jd/remove-per-peer-queues.tar.xz wireguard-monolithic-historical-jd/remove-per-peer-queues.zip |
queueing: remove per-peer queuesjd/remove-per-peer-queues
Previously, having many peers would result in many napi structs, which
could make lookups in the napi_hash in net/core/dev.c slow. So, we move
to using a single napi struct per device.
The best solution would be to replace napi_hash with an idr or just get
rid of it all together and use straight pointers. However, that isn't the
case currently, so we work with what is and begrudgingly remove per-peer
queues. On the upside, it means we reduce the per-peer memory usage by
about 8k/16k, but on the downside it means that napi_gro_receive is
called on a unified list, which might result in less GRO speedups on
systems with many peers active at once.
However, if napi_hash does ever go away, we should consider reverting
this commit.
Since this means moving to unified packet queues, flushing at peer
removal is something of a problem. So we make the slightly dubious
modification of just not flushing, and letting our reference counters do
the work. This in turn required some small changes to ensure that the
reference counter will, at some point in the future, still reach zero,
and not be kept alive by non-stop packet ingress.
Co-developed-by: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
Diffstat (limited to 'src/device.c')
-rw-r--r-- | src/device.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/src/device.c b/src/device.c index 54a94fb..9af2533 100644 --- a/src/device.c +++ b/src/device.c @@ -227,9 +227,13 @@ static void destruct(struct net_device *dev) destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); + napi_disable(&wg->napi); + packet_queue_free(&wg->rx_queue, false); + packet_queue_free(&wg->tx_queue, false); packet_queue_free(&wg->decrypt_queue, true); packet_queue_free(&wg->encrypt_queue, true); rcu_barrier_bh(); /* Wait for all the peers to be actually freed. */ + netif_napi_del(&wg->napi); ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(struct noise_static_identity)); skb_queue_purge(&wg->incoming_handshakes); @@ -322,13 +326,21 @@ static int newlink(struct net *src_net, struct net_device *dev, struct nlattr *t if (packet_queue_init(&wg->decrypt_queue, packet_decrypt_worker, true, MAX_QUEUED_PACKETS) < 0) goto error_7; + if (packet_queue_init(&wg->tx_queue, packet_tx_worker, false, MAX_QUEUED_PACKETS) < 0) + goto error_8; + + if (packet_queue_init(&wg->rx_queue, NULL, false, MAX_QUEUED_PACKETS) < 0) + goto error_9; + ret = ratelimiter_init(); if (ret < 0) - goto error_8; + goto error_10; + netif_napi_add(dev, &wg->napi, packet_rx_poll, NAPI_POLL_WEIGHT); + napi_enable(&wg->napi); ret = register_netdevice(dev); if (ret < 0) - goto error_9; + goto error_11; list_add(&wg->device_list, &device_list); @@ -340,8 +352,13 @@ static int newlink(struct net *src_net, struct net_device *dev, struct nlattr *t pr_debug("%s: Interface created\n", dev->name); return ret; -error_9: +error_11: + netif_napi_del(&wg->napi); ratelimiter_uninit(); +error_10: + packet_queue_free(&wg->rx_queue, false); +error_9: + packet_queue_free(&wg->tx_queue, false); error_8: packet_queue_free(&wg->decrypt_queue, true); error_7: |