aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/send.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-10-04 05:05:51 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2017-10-05 15:27:29 +0200
commitaa4d07187b66bf83bfddfa6d964f7c141518ba7b (patch)
treedd5d948dce7251d25079bea81ae103d7a0ae9bc8 /src/send.c
parentreceive: we're not planning on turning that into a while loop now (diff)
downloadwireguard-monolithic-historical-aa4d07187b66bf83bfddfa6d964f7c141518ba7b.tar.xz
wireguard-monolithic-historical-aa4d07187b66bf83bfddfa6d964f7c141518ba7b.zip
queueing: use ptr_ring instead of linked lists
Diffstat (limited to '')
-rw-r--r--src/send.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/src/send.c b/src/send.c
index d1fe150..016e438 100644
--- a/src/send.c
+++ b/src/send.c
@@ -190,12 +190,15 @@ void packet_tx_worker(struct work_struct *work)
struct crypt_queue *queue = container_of(work, struct crypt_queue, work);
struct crypt_ctx *ctx;
- while ((ctx = queue_first_per_peer(queue)) != NULL && atomic_read(&ctx->is_finished)) {
- queue_dequeue(queue);
+ spin_lock_bh(&queue->ring.consumer_lock);
+ while ((ctx = __ptr_ring_peek(&queue->ring)) != NULL && atomic_read(&ctx->is_finished)) {
+ __ptr_ring_discard_one(&queue->ring);
packet_create_data_done(&ctx->packets, ctx->peer);
+ noise_keypair_put(ctx->keypair);
peer_put(ctx->peer);
kmem_cache_free(crypt_ctx_cache, ctx);
}
+ spin_unlock_bh(&queue->ring.consumer_lock);
}
void packet_encrypt_worker(struct work_struct *work)
@@ -206,7 +209,7 @@ void packet_encrypt_worker(struct work_struct *work)
struct wireguard_peer *peer;
bool have_simd = chacha20poly1305_init_simd();
- while ((ctx = queue_dequeue_per_device(queue)) != NULL) {
+ while ((ctx = ptr_ring_consume_bh(&queue->ring)) != NULL) {
skb_queue_walk_safe(&ctx->packets, skb, tmp) {
if (likely(skb_encrypt(skb, ctx->keypair, have_simd))) {
skb_reset(skb);
@@ -215,7 +218,6 @@ void packet_encrypt_worker(struct work_struct *work)
dev_kfree_skb(skb);
}
}
- noise_keypair_put(ctx->keypair);
/* Dereferencing ctx is unsafe once ctx->is_finished == true, so
* we grab an additional reference to peer. */
peer = peer_rcu_get(ctx->peer);
@@ -230,6 +232,7 @@ static void packet_create_data(struct wireguard_peer *peer, struct sk_buff_head
{
struct crypt_ctx *ctx;
struct wireguard_device *wg = peer->device;
+ int ret;
ctx = kmem_cache_alloc(crypt_ctx_cache, GFP_ATOMIC);
if (unlikely(!ctx)) {
@@ -242,11 +245,17 @@ static void packet_create_data(struct wireguard_peer *peer, struct sk_buff_head
ctx->peer = peer;
__skb_queue_head_init(&ctx->packets);
skb_queue_splice_tail(packets, &ctx->packets);
- if (likely(queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, ctx, wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu)))
+ ret = queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, ctx, wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
+ if (likely(!ret))
return; /* Successful. No need to fall through to drop references below. */
__skb_queue_purge(&ctx->packets);
- kmem_cache_free(crypt_ctx_cache, ctx);
+ if (ret == -EPIPE) {
+ atomic_set(&ctx->is_finished, true);
+ queue_work_on(cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &peer->tx_queue.work);
+ return;
+ } else
+ kmem_cache_free(crypt_ctx_cache, ctx);
err_drop_refs:
noise_keypair_put(keypair);