aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src
diff options
context:
space:
mode:
authorSamuel Holland <samuel@sholland.org>2017-09-15 14:37:26 -0500
committerJason A. Donenfeld <Jason@zx2c4.com>2017-09-15 22:39:38 +0200
commitc441326a37f5f8e1b237b475844206a8ca693f4d (patch)
treebccd25fd9b42212892b229e439bce77667c6c16e /src
parentcompat: add READ_ONCE/WRITE_ONCE for old kernels (diff)
downloadwireguard-monolithic-historical-c441326a37f5f8e1b237b475844206a8ca693f4d.tar.xz
wireguard-monolithic-historical-c441326a37f5f8e1b237b475844206a8ca693f4d.zip
data: avoid running parallel/serial work on the same CPU
Diffstat (limited to 'src')
-rw-r--r--src/data.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/src/data.c b/src/data.c
index 000f035..f30b777 100644
--- a/src/data.c
+++ b/src/data.c
@@ -98,8 +98,15 @@ static inline bool queue_enqueue_per_peer(struct crypt_queue *queue, struct cryp
static inline void queue_enqueue_per_device(struct crypt_queue __percpu *queue, struct crypt_ctx *ctx, struct workqueue_struct *wq, int *next_cpu)
{
+ struct crypt_queue *cpu_queue;
int cpu = cpumask_next_online(next_cpu);
- struct crypt_queue *cpu_queue = per_cpu_ptr(queue, cpu);
+ /* Avoid running parallel work on the same CPU as the one handling all
+ * of the serial work. This improves overall throughput and especially
+ * throughput stability where we have at least two cores left for
+ * parallel work. */
+ if (cpu == ctx->peer->serial_work_cpu && num_online_cpus() >= 3)
+ cpu = cpumask_next_online(next_cpu);
+ cpu_queue = per_cpu_ptr(queue, cpu);
queue_enqueue(cpu_queue, &ctx->per_device_head, 0);
queue_work_on(cpu, wq, &cpu_queue->work);
}